id
int64
0
458k
file_name
stringlengths
4
119
file_path
stringlengths
14
227
content
stringlengths
24
9.96M
size
int64
24
9.96M
language
stringclasses
1 value
extension
stringclasses
14 values
total_lines
int64
1
219k
avg_line_length
float64
2.52
4.63M
max_line_length
int64
5
9.91M
alphanum_fraction
float64
0
1
repo_name
stringlengths
7
101
repo_stars
int64
100
139k
repo_forks
int64
0
26.4k
repo_open_issues
int64
0
2.27k
repo_license
stringclasses
12 values
repo_extraction_date
stringclasses
433 values
27,900
hsw_client_ratios.py
andikleen_pmu-tools/hsw_client_ratios.py
# -*- coding: latin-1 -*- # # auto generated TopDown/TMA 4.8-full-perf description for Intel 4rd gen Core (code named Haswell) # Please see http://ark.intel.com for more details on these CPUs. # # References: # http://bit.ly/tma-ispass14 # http://halobates.de/blog/p/262 # https://sites.google.com/site/analysismethods/yasin-pubs # https://download.01.org/perfmon/ # https://github.com/andikleen/pmu-tools/wiki/toplev-manual # # Helpers print_error = lambda msg: False smt_enabled = False ebs_mode = False version = "4.8-full-perf" base_frequency = -1.0 Memory = 0 Average_Frequency = 0.0 num_cores = 1 num_threads = 1 num_sockets = 1 def handle_error(obj, msg): print_error(msg) obj.errcount += 1 obj.val = 0 obj.thresh = False def handle_error_metric(obj, msg): print_error(msg) obj.errcount += 1 obj.val = 0 # Constants Exe_Ports = 8 Mem_L2_Store_Cost = 9 Mem_L3_Weight = 7 Mem_STLB_Hit_Cost = 8 BAClear_Cost = 12 MS_Switches_Cost = 2 Avg_Assist_Cost = 66 Pipeline_Width = 4 OneMillion = 1000000 OneBillion = 1000000000 Energy_Unit = 61 EBS_Mode = 0 DS = 0 # Aux. formulas def Backend_Bound_Cycles(self, EV, level): return (STALLS_TOTAL(self, EV, level) + (EV("UOPS_EXECUTED.CORE:c1", level) - Few_Uops_Executed_Threshold(self, EV, level)) / 2 - Frontend_RS_Empty_Cycles(self, EV, level) + EV("RESOURCE_STALLS.SB", level)) if smt_enabled else(STALLS_TOTAL(self, EV, level) + EV("UOPS_EXECUTED.CORE:c1", level) - Few_Uops_Executed_Threshold(self, EV, level) - Frontend_RS_Empty_Cycles(self, EV, level) + EV("RESOURCE_STALLS.SB", level)) def Cycles_0_Ports_Utilized(self, EV, level): return (EV("UOPS_EXECUTED.CORE:i1:c1", level)) / 2 if smt_enabled else(STALLS_TOTAL(self, EV, level) - Frontend_RS_Empty_Cycles(self, EV, level)) def Cycles_1_Port_Utilized(self, EV, level): return (EV("UOPS_EXECUTED.CORE:c1", level) - EV("UOPS_EXECUTED.CORE:c2", level)) / 2 if smt_enabled else(EV("UOPS_EXECUTED.CORE:c1", level) - EV("UOPS_EXECUTED.CORE:c2", level)) def Cycles_2_Ports_Utilized(self, EV, level): return (EV("UOPS_EXECUTED.CORE:c2", level) - EV("UOPS_EXECUTED.CORE:c3", level)) / 2 if smt_enabled else(EV("UOPS_EXECUTED.CORE:c2", level) - EV("UOPS_EXECUTED.CORE:c3", level)) def Cycles_3m_Ports_Utilized(self, EV, level): return (EV("UOPS_EXECUTED.CORE:c3", level) / 2) if smt_enabled else EV("UOPS_EXECUTED.CORE:c3", level) def DurationTimeInSeconds(self, EV, level): return EV("interval-ms", 0) / 1000 def Execute_Cycles(self, EV, level): return (EV("UOPS_EXECUTED.CORE:c1", level) / 2) if smt_enabled else EV("UOPS_EXECUTED.CORE:c1", level) def Fetched_Uops(self, EV, level): return (EV("IDQ.DSB_UOPS", level) + EV("LSD.UOPS", level) + EV("IDQ.MITE_UOPS", level) + EV("IDQ.MS_UOPS", level)) def Few_Uops_Executed_Threshold(self, EV, level): EV("UOPS_EXECUTED.CORE:c2", level) EV("UOPS_EXECUTED.CORE:c3", level) return EV("UOPS_EXECUTED.CORE:c3", level) if (IPC(self, EV, level)> 1.8) else EV("UOPS_EXECUTED.CORE:c2", level) def Frontend_RS_Empty_Cycles(self, EV, level): EV("RS_EVENTS.EMPTY_CYCLES", level) return EV("RS_EVENTS.EMPTY_CYCLES", level) if (self.Fetch_Latency.compute(EV)> 0.1) else 0 def Frontend_Latency_Cycles(self, EV, level): return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE", level)) , level ) def HighIPC(self, EV, level): val = IPC(self, EV, level) / Pipeline_Width return val def ITLB_Miss_Cycles(self, EV, level): return (14 * EV("ITLB_MISSES.STLB_HIT", level) + EV("ITLB_MISSES.WALK_DURATION", level)) def LOAD_L1_MISS(self, EV, level): return EV("MEM_LOAD_UOPS_RETIRED.L2_HIT", level) + EV("MEM_LOAD_UOPS_RETIRED.L3_HIT", level) + EV("MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT", level) + EV("MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM", level) + EV("MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS", level) def LOAD_L1_MISS_NET(self, EV, level): return LOAD_L1_MISS(self, EV, level) + EV("MEM_LOAD_UOPS_RETIRED.L3_MISS", level) def LOAD_L3_HIT(self, EV, level): return EV("MEM_LOAD_UOPS_RETIRED.L3_HIT", level) * (1 + EV("MEM_LOAD_UOPS_RETIRED.HIT_LFB", level) / LOAD_L1_MISS_NET(self, EV, level)) def LOAD_XSNP_HIT(self, EV, level): return EV("MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT", level) * (1 + EV("MEM_LOAD_UOPS_RETIRED.HIT_LFB", level) / LOAD_L1_MISS_NET(self, EV, level)) def LOAD_XSNP_HITM(self, EV, level): return EV("MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM", level) * (1 + EV("MEM_LOAD_UOPS_RETIRED.HIT_LFB", level) / LOAD_L1_MISS_NET(self, EV, level)) def LOAD_XSNP_MISS(self, EV, level): return EV("MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS", level) * (1 + EV("MEM_LOAD_UOPS_RETIRED.HIT_LFB", level) / LOAD_L1_MISS_NET(self, EV, level)) def Mem_L3_Hit_Fraction(self, EV, level): return EV("MEM_LOAD_UOPS_RETIRED.L3_HIT", level) / (EV("MEM_LOAD_UOPS_RETIRED.L3_HIT", level) + Mem_L3_Weight * EV("MEM_LOAD_UOPS_RETIRED.L3_MISS", level)) def Mem_Lock_St_Fraction(self, EV, level): return EV("MEM_UOPS_RETIRED.LOCK_LOADS", level) / EV("MEM_UOPS_RETIRED.ALL_STORES", level) def Memory_Bound_Fraction(self, EV, level): return (STALLS_MEM_ANY(self, EV, level) + EV("RESOURCE_STALLS.SB", level)) / Backend_Bound_Cycles(self, EV, level) def Mispred_Clears_Fraction(self, EV, level): return EV("BR_MISP_RETIRED.ALL_BRANCHES", level) / (EV("BR_MISP_RETIRED.ALL_BRANCHES", level) + EV("MACHINE_CLEARS.COUNT", level)) def ORO_Demand_RFO_C1(self, EV, level): return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO", level)) , level ) def ORO_DRD_Any_Cycles(self, EV, level): return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", level)) , level ) def ORO_DRD_BW_Cycles(self, EV, level): return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD:c6", level)) , level ) def SQ_Full_Cycles(self, EV, level): return (EV("OFFCORE_REQUESTS_BUFFER.SQ_FULL", level) / 2) if smt_enabled else EV("OFFCORE_REQUESTS_BUFFER.SQ_FULL", level) def STALLS_MEM_ANY(self, EV, level): return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("CYCLE_ACTIVITY.STALLS_LDM_PENDING", level)) , level ) def STALLS_TOTAL(self, EV, level): return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("CYCLE_ACTIVITY.CYCLES_NO_EXECUTE", level)) , level ) def Store_L2_Hit_Cycles(self, EV, level): return EV("L2_RQSTS.RFO_HIT", level) * Mem_L2_Store_Cost *(1 - Mem_Lock_St_Fraction(self, EV, level)) def Mem_XSNP_HitM_Cost(self, EV, level): return 60 def Mem_XSNP_Hit_Cost(self, EV, level): return 43 def Mem_XSNP_None_Cost(self, EV, level): return 29 def Recovery_Cycles(self, EV, level): return (EV("INT_MISC.RECOVERY_CYCLES_ANY", level) / 2) if smt_enabled else EV("INT_MISC.RECOVERY_CYCLES", level) def Retire_Fraction(self, EV, level): return Retired_Slots(self, EV, level) / EV("UOPS_ISSUED.ANY", level) # Retired slots per Logical Processor def Retired_Slots(self, EV, level): return EV("UOPS_RETIRED.RETIRE_SLOTS", level) # Number of logical processors (enabled or online) on the target system def Num_CPUs(self, EV, level): return 8 if smt_enabled else 4 # Instructions Per Cycle (per Logical Processor) def IPC(self, EV, level): return EV("INST_RETIRED.ANY", level) / CLKS(self, EV, level) # Uops Per Instruction def UopPI(self, EV, level): val = Retired_Slots(self, EV, level) / EV("INST_RETIRED.ANY", level) self.thresh = (val > 1.05) return val # Uops per taken branch def UpTB(self, EV, level): val = Retired_Slots(self, EV, level) / EV("BR_INST_RETIRED.NEAR_TAKEN", level) self.thresh = val < Pipeline_Width * 1.5 return val # Cycles Per Instruction (per Logical Processor) def CPI(self, EV, level): return 1 / IPC(self, EV, level) # Per-Logical Processor actual clocks when the Logical Processor is active. def CLKS(self, EV, level): return EV("CPU_CLK_UNHALTED.THREAD", level) # Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward) def SLOTS(self, EV, level): return Pipeline_Width * CORE_CLKS(self, EV, level) # Instructions Per Cycle across hyper-threads (per physical core) def CoreIPC(self, EV, level): return EV("INST_RETIRED.ANY", level) / CORE_CLKS(self, EV, level) # Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor) def ILP(self, EV, level): return (EV("UOPS_EXECUTED.CORE", level) / 2 / Execute_Cycles(self, EV, level)) if smt_enabled else EV("UOPS_EXECUTED.CORE", level) / Execute_Cycles(self, EV, level) # Core actual clocks when any Logical Processor is active on the Physical Core def CORE_CLKS(self, EV, level): return ((EV("CPU_CLK_UNHALTED.THREAD", level) / 2) * (1 + EV("CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE", level) / EV("CPU_CLK_UNHALTED.REF_XCLK", level))) if ebs_mode else(EV("CPU_CLK_UNHALTED.THREAD_ANY", level) / 2) if smt_enabled else CLKS(self, EV, level) # Instructions per Load (lower number means higher occurrence rate). Tip: reduce memory accesses. #Link Opt Guide section: Minimize Register Spills def IpLoad(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("MEM_UOPS_RETIRED.ALL_LOADS", level) self.thresh = (val < 3) return val # Instructions per Store (lower number means higher occurrence rate). Tip: reduce memory accesses. #Link Opt Guide section: Minimize Register Spills def IpStore(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("MEM_UOPS_RETIRED.ALL_STORES", level) self.thresh = (val < 8) return val # Instructions per Branch (lower number means higher occurrence rate) def IpBranch(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.ALL_BRANCHES", level) self.thresh = (val < 8) return val # Instructions per (near) call (lower number means higher occurrence rate) def IpCall(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.NEAR_CALL", level) self.thresh = (val < 200) return val # Instructions per taken branch def IpTB(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.NEAR_TAKEN", level) self.thresh = val < Pipeline_Width * 2 + 1 return val # Branch instructions per taken branch. . Can be used to approximate PGO-likelihood for non-loopy codes. def BpTkBranch(self, EV, level): return EV("BR_INST_RETIRED.ALL_BRANCHES", level) / EV("BR_INST_RETIRED.NEAR_TAKEN", level) # Total number of retired Instructions def Instructions(self, EV, level): return EV("INST_RETIRED.ANY", level) # Average number of Uops retired in cycles where at least one uop has retired. def Retire(self, EV, level): return Retired_Slots(self, EV, level) / EV("UOPS_RETIRED.RETIRE_SLOTS:c1", level) # Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). See section 'Decoded ICache' in Optimization Manual. http://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-optimization-manual.html def DSB_Coverage(self, EV, level): val = EV("IDQ.DSB_UOPS", level) / Fetched_Uops(self, EV, level) self.thresh = (val < 0.7) and HighIPC(self, EV, 1) return val # Instructions per speculative Unknown Branch Misprediction (BAClear) (lower number means higher occurrence rate) def IpUnknown_Branch(self, EV, level): return Instructions(self, EV, level) / EV("BACLEARS.ANY", level) # Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate) def IpMispredict(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_MISP_RETIRED.ALL_BRANCHES", level) self.thresh = (val < 200) return val # Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate). def IpMisp_Indirect(self, EV, level): val = Instructions(self, EV, level) / (Retire_Fraction(self, EV, level) * EV("BR_MISP_EXEC.INDIRECT", level)) self.thresh = (val < 1000) return val # Actual Average Latency for L1 data-cache miss demand load operations (in core cycles) def Load_Miss_Real_Latency(self, EV, level): return EV("L1D_PEND_MISS.PENDING", level) / (EV("MEM_LOAD_UOPS_RETIRED.L1_MISS", level) + EV("MEM_LOAD_UOPS_RETIRED.HIT_LFB", level)) # Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor) def MLP(self, EV, level): return EV("L1D_PEND_MISS.PENDING", level) / EV("L1D_PEND_MISS.PENDING_CYCLES", level) # L1 cache true misses per kilo instruction for retired demand loads def L1MPKI(self, EV, level): return 1000 * EV("MEM_LOAD_UOPS_RETIRED.L1_MISS", level) / EV("INST_RETIRED.ANY", level) # L2 cache true misses per kilo instruction for retired demand loads def L2MPKI(self, EV, level): return 1000 * EV("MEM_LOAD_UOPS_RETIRED.L2_MISS", level) / EV("INST_RETIRED.ANY", level) # Offcore requests (L2 cache miss) per kilo instruction for demand RFOs def L2MPKI_RFO(self, EV, level): return 1000 * EV("OFFCORE_REQUESTS.DEMAND_RFO", level) / EV("INST_RETIRED.ANY", level) # L3 cache true misses per kilo instruction for retired demand loads def L3MPKI(self, EV, level): return 1000 * EV("MEM_LOAD_UOPS_RETIRED.L3_MISS", level) / EV("INST_RETIRED.ANY", level) def L1D_Cache_Fill_BW(self, EV, level): return 64 * EV("L1D.REPLACEMENT", level) / OneBillion / Time(self, EV, level) def L2_Cache_Fill_BW(self, EV, level): return 64 * EV("L2_LINES_IN.ALL", level) / OneBillion / Time(self, EV, level) def L3_Cache_Fill_BW(self, EV, level): return 64 * EV("LONGEST_LAT_CACHE.MISS", level) / OneBillion / Time(self, EV, level) # Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses def Page_Walks_Utilization(self, EV, level): val = (EV("ITLB_MISSES.WALK_DURATION", level) + EV("DTLB_LOAD_MISSES.WALK_DURATION", level) + EV("DTLB_STORE_MISSES.WALK_DURATION", level)) / CORE_CLKS(self, EV, level) self.thresh = (val > 0.5) return val # Average per-core data fill bandwidth to the L1 data cache [GB / sec] def L1D_Cache_Fill_BW_2T(self, EV, level): return L1D_Cache_Fill_BW(self, EV, level) # Average per-core data fill bandwidth to the L2 cache [GB / sec] def L2_Cache_Fill_BW_2T(self, EV, level): return L2_Cache_Fill_BW(self, EV, level) # Average per-core data fill bandwidth to the L3 cache [GB / sec] def L3_Cache_Fill_BW_2T(self, EV, level): return L3_Cache_Fill_BW(self, EV, level) # Average Latency for L2 cache miss demand Loads def Load_L2_Miss_Latency(self, EV, level): return EV("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD", level) / EV("OFFCORE_REQUESTS.DEMAND_DATA_RD", level) # Average Parallel L2 cache miss demand Loads def Load_L2_MLP(self, EV, level): return EV("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD", level) / EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD", level) # Average Parallel L2 cache miss data reads def Data_L2_MLP(self, EV, level): return EV("OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD", level) / EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", level) # Average CPU Utilization (percentage) def CPU_Utilization(self, EV, level): return CPUs_Utilized(self, EV, level) / Num_CPUs(self, EV, level) # Average number of utilized CPUs def CPUs_Utilized(self, EV, level): return EV("CPU_CLK_UNHALTED.REF_TSC", level) / EV("msr/tsc/", 0) # Measured Average Core Frequency for unhalted processors [GHz] def Core_Frequency(self, EV, level): return Turbo_Utilization(self, EV, level) * EV("msr/tsc/", 0) / OneBillion / Time(self, EV, level) # Average Frequency Utilization relative nominal frequency def Turbo_Utilization(self, EV, level): return CLKS(self, EV, level) / EV("CPU_CLK_UNHALTED.REF_TSC", level) # Fraction of cycles where both hardware Logical Processors were active def SMT_2T_Utilization(self, EV, level): return 1 - EV("CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE", level) / (EV("CPU_CLK_UNHALTED.REF_XCLK_ANY", level) / 2) if smt_enabled else 0 # Fraction of cycles spent in the Operating System (OS) Kernel mode def Kernel_Utilization(self, EV, level): val = EV("CPU_CLK_UNHALTED.THREAD_P:SUP", level) / EV("CPU_CLK_UNHALTED.THREAD", level) self.thresh = (val > 0.05) return val # Cycles Per Instruction for the Operating System (OS) Kernel mode def Kernel_CPI(self, EV, level): return EV("CPU_CLK_UNHALTED.THREAD_P:SUP", level) / EV("INST_RETIRED.ANY_P:SUP", level) # Average external Memory Bandwidth Use for reads and writes [GB / sec] def DRAM_BW_Use(self, EV, level): return 64 *(EV("UNC_ARB_TRK_REQUESTS.ALL", level) + EV("UNC_ARB_COH_TRK_REQUESTS.ALL", level)) / OneMillion / Time(self, EV, level) / 1000 # Total package Power in Watts def Power(self, EV, level): return EV("UNC_PKG_ENERGY_STATUS", level) * Energy_Unit /(Time(self, EV, level) * OneMillion ) # Run duration time in seconds def Time(self, EV, level): val = EV("interval-s", 0) self.thresh = (val < 1) return val # Socket actual clocks when any core is active on that socket def Socket_CLKS(self, EV, level): return EV("UNC_CLOCK.SOCKET", level) # Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate] def IpFarBranch(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.FAR_BRANCH:USER", level) self.thresh = (val < 1000000) return val # Event groups class Frontend_Bound: name = "Frontend_Bound" domain = "Slots" area = "FE" level = 1 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvFB', 'BvIO', 'TmaL1', 'PGO']) maxval = None def compute(self, EV): try: self.val = EV("IDQ_UOPS_NOT_DELIVERED.CORE", 1) / SLOTS(self, EV, 1) self.thresh = (self.val > 0.15) except ZeroDivisionError: handle_error(self, "Frontend_Bound zero division") return self.val desc = """ This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.""" class Fetch_Latency: name = "Fetch_Latency" domain = "Slots" area = "FE" level = 2 htoff = False sample = ['RS_EVENTS.EMPTY_END'] errcount = 0 sibling = None metricgroup = frozenset(['Frontend', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = Pipeline_Width * Frontend_Latency_Cycles(self, EV, 2) / SLOTS(self, EV, 2) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Fetch_Latency zero division") return self.val desc = """ This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction- cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period.""" class ICache_Misses: name = "ICache_Misses" domain = "Clocks" area = "FE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BigFootprint', 'BvBC', 'FetchLat', 'IcMiss']) maxval = None def compute(self, EV): try: self.val = EV("ICACHE.IFDATA_STALL", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "ICache_Misses zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to instruction cache misses.. Using compiler's Profile-Guided Optimization (PGO) can reduce i-cache misses through improved hot code layout.""" class ITLB_Misses: name = "ITLB_Misses" domain = "Clocks" area = "FE" level = 3 htoff = False sample = ['ITLB_MISSES.WALK_COMPLETED'] errcount = 0 sibling = None metricgroup = frozenset(['BigFootprint', 'BvBC', 'FetchLat', 'MemoryTLB']) maxval = None def compute(self, EV): try: self.val = ITLB_Miss_Cycles(self, EV, 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "ITLB_Misses zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses.. Consider large 2M pages for code (selectively prefer hot large-size function, due to limited 2M entries). Linux options: standard binaries use libhugetlbfs; Hfsort.. https://github. com/libhugetlbfs/libhugetlbfs;https://research.fb.com/public ations/optimizing-function-placement-for-large-scale-data- center-applications-2/""" class Branch_Resteers: name = "Branch_Resteers" domain = "Clocks" area = "FE" level = 3 htoff = False sample = ['BR_MISP_RETIRED.ALL_BRANCHES'] errcount = 0 sibling = None metricgroup = frozenset(['FetchLat']) maxval = None def compute(self, EV): try: self.val = BAClear_Cost *(EV("BR_MISP_RETIRED.ALL_BRANCHES", 3) + EV("MACHINE_CLEARS.COUNT", 3) + EV("BACLEARS.ANY", 3)) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Branch_Resteers zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings.""" class MS_Switches: name = "MS_Switches" domain = "Clocks_Estimated" area = "FE" level = 3 htoff = False sample = ['IDQ.MS_SWITCHES'] errcount = 0 sibling = None metricgroup = frozenset(['FetchLat', 'MicroSeq']) maxval = 1.0 def compute(self, EV): try: self.val = MS_Switches_Cost * EV("IDQ.MS_SWITCHES", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "MS_Switches zero division") return self.val desc = """ This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals.""" class LCP: name = "LCP" domain = "Clocks" area = "FE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['FetchLat']) maxval = None def compute(self, EV): try: self.val = EV("ILD_STALL.LCP", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "LCP zero division") return self.val desc = """ This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this.""" class DSB_Switches: name = "DSB_Switches" domain = "Clocks" area = "FE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['DSBmiss', 'FetchLat']) maxval = None def compute(self, EV): try: self.val = EV("DSB2MITE_SWITCHES.PENALTY_CYCLES", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "DSB_Switches zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty.. See section 'Optimization for Decoded Icache' in Optimization Manual:. http://www.intel.com/content/www/us/en /architecture-and-technology/64-ia-32-architectures- optimization-manual.html""" class Fetch_Bandwidth: name = "Fetch_Bandwidth" domain = "Slots" area = "FE" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['FetchBW', 'Frontend', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = self.Frontend_Bound.compute(EV) - self.Fetch_Latency.compute(EV) self.thresh = (self.val > 0.2) except ZeroDivisionError: handle_error(self, "Fetch_Bandwidth zero division") return self.val desc = """ This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend.""" class MITE: name = "MITE" domain = "Slots_Estimated" area = "FE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['DSBmiss', 'FetchBW']) maxval = None def compute(self, EV): try: self.val = (EV("IDQ.ALL_MITE_CYCLES_ANY_UOPS", 3) - EV("IDQ.ALL_MITE_CYCLES_4_UOPS", 3)) / CORE_CLKS(self, EV, 3) / 2 self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "MITE zero division") return self.val desc = """ This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck.. Consider tuning codegen of 'small hotspots' that can fit in DSB. Read about 'Decoded ICache' in Optimization Manual:. http://www.intel.com/content/www/us/en /architecture-and-technology/64-ia-32-architectures- optimization-manual.html""" class DSB: name = "DSB" domain = "Slots_Estimated" area = "FE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['DSB', 'FetchBW']) maxval = None def compute(self, EV): try: self.val = (EV("IDQ.ALL_DSB_CYCLES_ANY_UOPS", 3) - EV("IDQ.ALL_DSB_CYCLES_4_UOPS", 3)) / CORE_CLKS(self, EV, 3) / 2 self.thresh = (self.val > 0.15) and self.parent.thresh except ZeroDivisionError: handle_error(self, "DSB zero division") return self.val desc = """ This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.""" class Bad_Speculation: name = "Bad_Speculation" domain = "Slots" area = "BAD" level = 1 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['TmaL1']) maxval = None def compute(self, EV): try: self.val = (EV("UOPS_ISSUED.ANY", 1) - Retired_Slots(self, EV, 1) + Pipeline_Width * Recovery_Cycles(self, EV, 1)) / SLOTS(self, EV, 1) self.thresh = (self.val > 0.15) except ZeroDivisionError: handle_error(self, "Bad_Speculation zero division") return self.val desc = """ This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss- predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.""" class Branch_Mispredicts: name = "Branch_Mispredicts" domain = "Slots" area = "BAD" level = 2 htoff = False sample = ['BR_MISP_RETIRED.ALL_BRANCHES:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BadSpec', 'BrMispredicts', 'BvMP', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = Mispred_Clears_Fraction(self, EV, 2) * self.Bad_Speculation.compute(EV) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Branch_Mispredicts zero division") return self.val desc = """ This metric represents fraction of slots the CPU has wasted due to Branch Misprediction. These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path.. Using profile feedback in the compiler may help. Please see the Optimization Manual for general strategies for addressing branch misprediction issues.. http://www.intel.com/content/www/us/en/architecture-and- technology/64-ia-32-architectures-optimization-manual.html""" class Machine_Clears: name = "Machine_Clears" domain = "Slots" area = "BAD" level = 2 htoff = False sample = ['MACHINE_CLEARS.COUNT'] errcount = 0 sibling = None metricgroup = frozenset(['BadSpec', 'BvMS', 'MachineClears', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = self.Bad_Speculation.compute(EV) - self.Branch_Mispredicts.compute(EV) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Machine_Clears zero division") return self.val desc = """ This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes.. See \"Memory Disambiguation\" in Optimization Manual and:. https://software.intel.com/sites/default/files/ m/d/4/1/d/8/sma.pdf""" class Backend_Bound: name = "Backend_Bound" domain = "Slots" area = "BE" level = 1 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvOB', 'TmaL1']) maxval = None def compute(self, EV): try: self.val = 1 -(self.Frontend_Bound.compute(EV) + self.Bad_Speculation.compute(EV) + self.Retiring.compute(EV)) self.thresh = (self.val > 0.2) except ZeroDivisionError: handle_error(self, "Backend_Bound zero division") return self.val desc = """ This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.""" class Memory_Bound: name = "Memory_Bound" domain = "Slots" area = "BE/Mem" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Backend', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = Memory_Bound_Fraction(self, EV, 2) * self.Backend_Bound.compute(EV) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Memory_Bound zero division") return self.val desc = """ This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).""" class L1_Bound: name = "L1_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = ['MEM_LOAD_UOPS_RETIRED.L1_HIT:pp', 'MEM_LOAD_UOPS_RETIRED.HIT_LFB:pp'] errcount = 0 sibling = None metricgroup = frozenset(['CacheHits', 'MemoryBound', 'TmaL3mem']) maxval = None def compute(self, EV): try: self.val = max((STALLS_MEM_ANY(self, EV, 3) - EV("CYCLE_ACTIVITY.STALLS_L1D_PENDING", 3)) / CLKS(self, EV, 3) , 0 ) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "L1_Bound zero division") return self.val desc = """ This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache.""" class DTLB_Load: name = "DTLB_Load" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_UOPS_RETIRED.STLB_MISS_LOADS:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvMT', 'MemoryTLB']) maxval = 1.0 def compute(self, EV): try: self.val = (Mem_STLB_Hit_Cost * EV("DTLB_LOAD_MISSES.STLB_HIT", 4) + EV("DTLB_LOAD_MISSES.WALK_DURATION", 4)) / CLKS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "DTLB_Load zero division") return self.val desc = """ This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss..""" class Store_Fwd_Blk: name = "Store_Fwd_Blk" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = 1.0 def compute(self, EV): try: self.val = 13 * EV("LD_BLOCKS.STORE_FORWARD", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Store_Fwd_Blk zero division") return self.val desc = """ This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading.""" class Lock_Latency: name = "Lock_Latency" domain = "Clocks" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_UOPS_RETIRED.LOCK_LOADS:pp'] errcount = 0 sibling = None metricgroup = frozenset(['Offcore']) maxval = 1.0 def compute(self, EV): try: self.val = Mem_Lock_St_Fraction(self, EV, 4) * ORO_Demand_RFO_C1(self, EV, 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Lock_Latency zero division") return self.val desc = """ This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them.""" class Split_Loads: name = "Split_Loads" domain = "Clocks_Calculated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_UOPS_RETIRED.SPLIT_LOADS:pp'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = 1.0 def compute(self, EV): try: self.val = Load_Miss_Real_Latency(self, EV, 4) * EV("LD_BLOCKS.NO_SR", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Split_Loads zero division") return self.val desc = """ This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. . Consider aligning data or hot structure fields. See the Optimization Manual for more details""" class G4K_Aliasing: name = "4K_Aliasing" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("LD_BLOCKS_PARTIAL.ADDRESS_ALIAS", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "G4K_Aliasing zero division") return self.val desc = """ This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset. False match is possible; which incur a few cycles load re-issue. However; the short re-issue duration is often hidden by the out-of-order core and HW optimizations; hence a user may safely ignore a high value of this metric unless it manages to propagate up into parent nodes of the hierarchy (e.g. to L1_Bound).. Consider reducing independent loads/stores accesses with 4K offsets. See the Optimization Manual for more details""" class FB_Full: name = "FB_Full" domain = "Clocks_Calculated" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvMB', 'MemoryBW']) maxval = None def compute(self, EV): try: self.val = Load_Miss_Real_Latency(self, EV, 4) * EV("L1D_PEND_MISS.REQUEST_FB_FULL:c1", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.3) except ZeroDivisionError: handle_error(self, "FB_Full zero division") return self.val desc = """ This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory).. See $issueBW and $issueSL hints. Avoid software prefetches if indeed memory BW limited.""" class L2_Bound: name = "L2_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = ['MEM_LOAD_UOPS_RETIRED.L2_HIT:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvML', 'CacheHits', 'MemoryBound', 'TmaL3mem']) maxval = None def compute(self, EV): try: self.val = (EV("CYCLE_ACTIVITY.STALLS_L1D_PENDING", 3) - EV("CYCLE_ACTIVITY.STALLS_L2_PENDING", 3)) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "L2_Bound zero division") return self.val desc = """ This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance.""" class L3_Bound: name = "L3_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = ['MEM_LOAD_UOPS_RETIRED.L3_HIT:pp'] errcount = 0 sibling = None metricgroup = frozenset(['CacheHits', 'MemoryBound', 'TmaL3mem']) maxval = None def compute(self, EV): try: self.val = Mem_L3_Hit_Fraction(self, EV, 3) * EV("CYCLE_ACTIVITY.STALLS_L2_PENDING", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "L3_Bound zero division") return self.val desc = """ This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance.""" class Contested_Accesses: name = "Contested_Accesses" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM:pp', 'MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvMS', 'DataSharing', 'Offcore', 'Snoop']) maxval = 1.0 def compute(self, EV): try: self.val = (Mem_XSNP_HitM_Cost(self, EV, 4) * LOAD_XSNP_HITM(self, EV, 4) + Mem_XSNP_Hit_Cost(self, EV, 4) * LOAD_XSNP_MISS(self, EV, 4)) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Contested_Accesses zero division") return self.val desc = """ This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing.""" class Data_Sharing: name = "Data_Sharing" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvMS', 'Offcore', 'Snoop']) maxval = 1.0 def compute(self, EV): try: self.val = Mem_XSNP_Hit_Cost(self, EV, 4) * LOAD_XSNP_HIT(self, EV, 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Data_Sharing zero division") return self.val desc = """ This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance.""" class L3_Hit_Latency: name = "L3_Hit_Latency" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_LOAD_UOPS_RETIRED.L3_HIT:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvML', 'MemoryLat']) maxval = 1.0 def compute(self, EV): try: self.val = Mem_XSNP_None_Cost(self, EV, 4) * LOAD_L3_HIT(self, EV, 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "L3_Hit_Latency zero division") return self.val desc = """ This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings.""" class SQ_Full: name = "SQ_Full" domain = "Clocks" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvMB', 'MemoryBW', 'Offcore']) maxval = None def compute(self, EV): try: self.val = SQ_Full_Cycles(self, EV, 4) / CORE_CLKS(self, EV, 4) self.thresh = (self.val > 0.3) and self.parent.thresh except ZeroDivisionError: handle_error(self, "SQ_Full zero division") return self.val desc = """ This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors).""" class DRAM_Bound: name = "DRAM_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = ['MEM_LOAD_UOPS_RETIRED.L3_MISS:pp'] errcount = 0 sibling = None metricgroup = frozenset(['MemoryBound', 'TmaL3mem']) maxval = 1.0 def compute(self, EV): try: self.val = (1 - Mem_L3_Hit_Fraction(self, EV, 3)) * EV("CYCLE_ACTIVITY.STALLS_L2_PENDING", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "DRAM_Bound zero division") return self.val desc = """ This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance.""" class MEM_Bandwidth: name = "MEM_Bandwidth" domain = "Clocks" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvMB', 'MemoryBW', 'Offcore']) maxval = None def compute(self, EV): try: self.val = ORO_DRD_BW_Cycles(self, EV, 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "MEM_Bandwidth zero division") return self.val desc = """ This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that).. Improve data accesses to reduce cacheline transfers from/to memory. Examples: 1) Consume all bytes of a each cacheline before it is evicted (e.g. reorder structure elements and split non-hot ones), 2) merge computed-limited with BW-limited loops, 3) NUMA optimizations in multi-socket system. Note: software prefetches will not help BW-limited application..""" class MEM_Latency: name = "MEM_Latency" domain = "Clocks" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvML', 'MemoryLat', 'Offcore']) maxval = None def compute(self, EV): try: self.val = ORO_DRD_Any_Cycles(self, EV, 4) / CLKS(self, EV, 4) - self.MEM_Bandwidth.compute(EV) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "MEM_Latency zero division") return self.val desc = """ This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that).. Improve data accesses or interleave them with compute. Examples: 1) Data layout re-structuring, 2) Software Prefetches (also through the compiler)..""" class Store_Bound: name = "Store_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = ['MEM_UOPS_RETIRED.ALL_STORES:pp'] errcount = 0 sibling = None metricgroup = frozenset(['MemoryBound', 'TmaL3mem']) maxval = None def compute(self, EV): try: self.val = EV("RESOURCE_STALLS.SB", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Store_Bound zero division") return self.val desc = """ This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck.""" class Store_Latency: name = "Store_Latency" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvML', 'MemoryLat', 'Offcore']) maxval = 1.0 def compute(self, EV): try: self.val = (Store_L2_Hit_Cycles(self, EV, 4) + (1 - Mem_Lock_St_Fraction(self, EV, 4)) * ORO_Demand_RFO_C1(self, EV, 4)) / CLKS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Store_Latency zero division") return self.val desc = """ This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Consider to avoid/reduce unnecessary (or easily load-able/computable) memory store.""" class False_Sharing: name = "False_Sharing" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM:pp', 'OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE'] errcount = 0 sibling = None metricgroup = frozenset(['BvMS', 'DataSharing', 'Offcore', 'Snoop']) maxval = 1.0 def compute(self, EV): try: self.val = Mem_XSNP_HitM_Cost(self, EV, 4) * EV("OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "False_Sharing zero division") return self.val desc = """ This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. . False Sharing can be easily avoided by padding to make Logical Processors access different lines.""" class Split_Stores: name = "Split_Stores" domain = "Core_Utilization" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_UOPS_RETIRED.SPLIT_STORES:pp'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = 2 * EV("MEM_UOPS_RETIRED.SPLIT_STORES", 4) / CORE_CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Split_Stores zero division") return self.val desc = """ This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity.""" class DTLB_Store: name = "DTLB_Store" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_UOPS_RETIRED.STLB_MISS_STORES:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvMT', 'MemoryTLB']) maxval = 1.0 def compute(self, EV): try: self.val = (Mem_STLB_Hit_Cost * EV("DTLB_STORE_MISSES.STLB_HIT", 4) + EV("DTLB_STORE_MISSES.WALK_DURATION", 4)) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "DTLB_Store zero division") return self.val desc = """ This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently- used data.""" class Core_Bound: name = "Core_Bound" domain = "Slots" area = "BE/Core" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Backend', 'TmaL2', 'Compute']) maxval = None def compute(self, EV): try: self.val = self.Backend_Bound.compute(EV) - self.Memory_Bound.compute(EV) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Core_Bound zero division") return self.val desc = """ This metric represents fraction of slots where Core non- memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).. Tip: consider Port Saturation analysis as next step.""" class Divider: name = "Divider" domain = "Clocks" area = "BE/Core" level = 3 htoff = False sample = ['ARITH.DIVIDER_UOPS'] errcount = 0 sibling = None metricgroup = frozenset(['BvCB']) maxval = 1.0 def compute(self, EV): try: self.val = 10 * EV("ARITH.DIVIDER_UOPS", 3) / CORE_CLKS(self, EV, 3) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Divider zero division") return self.val desc = """ This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication.""" class Ports_Utilization: name = "Ports_Utilization" domain = "Clocks" area = "BE/Core" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['PortsUtil']) maxval = None def compute(self, EV): try: self.val = (Backend_Bound_Cycles(self, EV, 3) - EV("RESOURCE_STALLS.SB", 3) - STALLS_MEM_ANY(self, EV, 3)) / CLKS(self, EV, 3) self.thresh = (self.val > 0.15) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Ports_Utilization zero division") return self.val desc = """ This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.. Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop.""" class Ports_Utilized_0: name = "Ports_Utilized_0" domain = "Clocks" area = "BE/Core" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['PortsUtil']) maxval = None def compute(self, EV): try: self.val = Cycles_0_Ports_Utilized(self, EV, 4) / CORE_CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Ports_Utilized_0 zero division") return self.val desc = """ This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric.. Check assembly view and Appendix C in Optimization Manual to find out instructions with say 5 or more cycles latency.. http://www.intel.com/content/www/us/en/architecture-and- technology/64-ia-32-architectures-optimization-manual.html""" class Ports_Utilized_1: name = "Ports_Utilized_1" domain = "Clocks" area = "BE/Core" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['PortsUtil']) maxval = None def compute(self, EV): try: self.val = Cycles_1_Port_Utilized(self, EV, 4) / CORE_CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Ports_Utilized_1 zero division") return self.val desc = """ This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). This can be due to heavy data-dependency among software instructions; or over oversubscribing a particular hardware resource. In some other cases with high 1_Port_Utilized and L1_Bound; this metric can point to L1 data-cache latency bottleneck that may not necessarily manifest with complete execution starvation (due to the short L1 latency e.g. walking a linked list) - looking at the assembly can be helpful.""" class Ports_Utilized_2: name = "Ports_Utilized_2" domain = "Clocks" area = "BE/Core" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['PortsUtil']) maxval = None def compute(self, EV): try: self.val = Cycles_2_Ports_Utilized(self, EV, 4) / CORE_CLKS(self, EV, 4) self.thresh = (self.val > 0.15) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Ports_Utilized_2 zero division") return self.val desc = """ This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto- Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop.""" class Ports_Utilized_3m: name = "Ports_Utilized_3m" domain = "Clocks" area = "BE/Core" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvCB', 'PortsUtil']) maxval = None def compute(self, EV): try: self.val = Cycles_3m_Ports_Utilized(self, EV, 4) / CORE_CLKS(self, EV, 4) self.thresh = (self.val > 0.4) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Ports_Utilized_3m zero division") return self.val desc = """ This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise).""" class ALU_Op_Utilization: name = "ALU_Op_Utilization" domain = "Core_Execution" area = "BE/Core" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = (EV("UOPS_DISPATCHED_PORT.PORT_0", 5) + EV("UOPS_DISPATCHED_PORT.PORT_1", 5) + EV("UOPS_DISPATCHED_PORT.PORT_5", 5) + EV("UOPS_DISPATCHED_PORT.PORT_6", 5)) / (4 * CORE_CLKS(self, EV, 5)) self.thresh = (self.val > 0.4) except ZeroDivisionError: handle_error(self, "ALU_Op_Utilization zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.""" class Port_0: name = "Port_0" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED_PORT.PORT_0'] errcount = 0 sibling = None metricgroup = frozenset(['Compute']) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_0", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_0 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ALU and 2nd branch""" class Port_1: name = "Port_1" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED_PORT.PORT_1'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_1", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_1 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU)""" class Port_5: name = "Port_5" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED_PORT.PORT_5'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_5", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_5 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ALU. See section 'Handling Port 5 Pressure' in Optimization Manual:. http://www.intel.com/content/www/us/en/architecture-and- technology/64-ia-32-architectures-optimization-manual.html""" class Port_6: name = "Port_6" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED_PORT.PORT_6'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_6", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_6 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 Primary Branch and simple ALU""" class Load_Op_Utilization: name = "Load_Op_Utilization" domain = "Core_Execution" area = "BE/Core" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = (EV("UOPS_DISPATCHED_PORT.PORT_2", 5) + EV("UOPS_DISPATCHED_PORT.PORT_3", 5) + EV("UOPS_DISPATCHED_PORT.PORT_7", 5) - EV("UOPS_DISPATCHED_PORT.PORT_4", 5)) / (2 * CORE_CLKS(self, EV, 5)) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Load_Op_Utilization zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations""" class Port_2: name = "Port_2" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED_PORT.PORT_2'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_2", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_2 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 2 Loads and Store-address""" class Port_3: name = "Port_3" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED_PORT.PORT_3'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_3", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_3 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 3 Loads and Store-address""" class Store_Op_Utilization: name = "Store_Op_Utilization" domain = "Core_Execution" area = "BE/Core" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_4", 5) / CORE_CLKS(self, EV, 5) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Store_Op_Utilization zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port for Store operations""" class Port_4: name = "Port_4" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED_PORT.PORT_4'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_4", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_4 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 4 (Store-data)""" class Port_7: name = "Port_7" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED_PORT.PORT_7'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_7", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_7 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 7 simple Store-address""" class Retiring: name = "Retiring" domain = "Slots" area = "RET" level = 1 htoff = False sample = ['UOPS_RETIRED.RETIRE_SLOTS'] errcount = 0 sibling = None metricgroup = frozenset(['BvUW', 'TmaL1']) maxval = None def compute(self, EV): try: self.val = Retired_Slots(self, EV, 1) / SLOTS(self, EV, 1) self.thresh = (self.val > 0.7) or self.Heavy_Operations.thresh except ZeroDivisionError: handle_error(self, "Retiring zero division") return self.val desc = """ This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved. Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. . A high Retiring value for non-vectorized code may be a good hint for programmer to consider vectorizing his code. Doing so essentially lets more computations be done without significantly increasing number of instructions thus improving the performance.""" class Light_Operations: name = "Light_Operations" domain = "Slots" area = "RET" level = 2 htoff = False sample = ['INST_RETIRED.PREC_DIST'] errcount = 0 sibling = None metricgroup = frozenset(['Retire', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = self.Retiring.compute(EV) - self.Heavy_Operations.compute(EV) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Light_Operations zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. . Focus on techniques that reduce instruction count or result in more efficient instructions generation such as vectorization.""" class Heavy_Operations: name = "Heavy_Operations" domain = "Slots" area = "RET" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Retire', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = self.Microcode_Sequencer.compute(EV) self.thresh = (self.val > 0.1) except ZeroDivisionError: handle_error(self, "Heavy_Operations zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.""" class Microcode_Sequencer: name = "Microcode_Sequencer" domain = "Slots" area = "RET" level = 3 htoff = False sample = ['IDQ.MS_UOPS'] errcount = 0 sibling = None metricgroup = frozenset(['MicroSeq']) maxval = None def compute(self, EV): try: self.val = Retire_Fraction(self, EV, 3) * EV("IDQ.MS_UOPS", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Microcode_Sequencer zero division") return self.val desc = """ This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided..""" class Assists: name = "Assists" domain = "Slots_Estimated" area = "RET" level = 4 htoff = False sample = ['OTHER_ASSISTS.ANY_WB_ASSIST'] errcount = 0 sibling = None metricgroup = frozenset(['BvIO']) maxval = 1.0 def compute(self, EV): try: self.val = Avg_Assist_Cost * EV("OTHER_ASSISTS.ANY_WB_ASSIST", 4) / SLOTS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Assists zero division") return self.val desc = """ This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases.""" class CISC: name = "CISC" domain = "Slots" area = "RET" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = max(0 , self.Microcode_Sequencer.compute(EV) - self.Assists.compute(EV)) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "CISC zero division") return self.val desc = """ This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources.""" class Metric_IPC: name = "IPC" domain = "Metric" maxval = Pipeline_Width + 2 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Ret', 'Summary']) sibling = None def compute(self, EV): try: self.val = IPC(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IPC zero division") desc = """ Instructions Per Cycle (per Logical Processor)""" class Metric_UopPI: name = "UopPI" domain = "Metric" maxval = 2.0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Pipeline', 'Ret', 'Retire']) sibling = None def compute(self, EV): try: self.val = UopPI(self, EV, 0) self.thresh = (self.val > 1.05) except ZeroDivisionError: handle_error_metric(self, "UopPI zero division") desc = """ Uops Per Instruction""" class Metric_UpTB: name = "UpTB" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Branches', 'Fed', 'FetchBW']) sibling = None def compute(self, EV): try: self.val = UpTB(self, EV, 0) self.thresh = self.val < Pipeline_Width * 1.5 except ZeroDivisionError: handle_error_metric(self, "UpTB zero division") desc = """ Uops per taken branch""" class Metric_CPI: name = "CPI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Pipeline', 'Mem']) sibling = None def compute(self, EV): try: self.val = CPI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CPI zero division") desc = """ Cycles Per Instruction (per Logical Processor)""" class Metric_CLKS: name = "CLKS" domain = "Count" maxval = 0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Pipeline']) sibling = None def compute(self, EV): try: self.val = CLKS(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CLKS zero division") desc = """ Per-Logical Processor actual clocks when the Logical Processor is active.""" class Metric_SLOTS: name = "SLOTS" domain = "Count" maxval = 0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['TmaL1']) sibling = None def compute(self, EV): try: self.val = SLOTS(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "SLOTS zero division") desc = """ Total issue-pipeline slots (per-Physical Core till ICL; per- Logical Processor ICL onward)""" class Metric_CoreIPC: name = "CoreIPC" domain = "Core_Metric" maxval = Pipeline_Width + 2 errcount = 0 area = "Info.Core" metricgroup = frozenset(['Ret', 'SMT', 'TmaL1']) sibling = None def compute(self, EV): try: self.val = CoreIPC(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CoreIPC zero division") desc = """ Instructions Per Cycle across hyper-threads (per physical core)""" class Metric_ILP: name = "ILP" domain = "Metric" maxval = Exe_Ports errcount = 0 area = "Info.Core" metricgroup = frozenset(['Backend', 'Cor', 'Pipeline', 'PortsUtil']) sibling = None def compute(self, EV): try: self.val = ILP(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "ILP zero division") desc = """ Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical- processor)""" class Metric_CORE_CLKS: name = "CORE_CLKS" domain = "Count" maxval = 0 errcount = 0 area = "Info.Core" metricgroup = frozenset(['SMT']) sibling = None def compute(self, EV): try: self.val = CORE_CLKS(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CORE_CLKS zero division") desc = """ Core actual clocks when any Logical Processor is active on the Physical Core""" class Metric_IpLoad: name = "IpLoad" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['InsType']) sibling = None def compute(self, EV): try: self.val = IpLoad(self, EV, 0) self.thresh = (self.val < 3) except ZeroDivisionError: handle_error_metric(self, "IpLoad zero division") desc = """ Instructions per Load (lower number means higher occurrence rate). Tip: reduce memory accesses.""" class Metric_IpStore: name = "IpStore" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['InsType']) sibling = None def compute(self, EV): try: self.val = IpStore(self, EV, 0) self.thresh = (self.val < 8) except ZeroDivisionError: handle_error_metric(self, "IpStore zero division") desc = """ Instructions per Store (lower number means higher occurrence rate). Tip: reduce memory accesses.""" class Metric_IpBranch: name = "IpBranch" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Branches', 'Fed', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpBranch(self, EV, 0) self.thresh = (self.val < 8) except ZeroDivisionError: handle_error_metric(self, "IpBranch zero division") desc = """ Instructions per Branch (lower number means higher occurrence rate)""" class Metric_IpCall: name = "IpCall" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Branches', 'Fed', 'PGO']) sibling = None def compute(self, EV): try: self.val = IpCall(self, EV, 0) self.thresh = (self.val < 200) except ZeroDivisionError: handle_error_metric(self, "IpCall zero division") desc = """ Instructions per (near) call (lower number means higher occurrence rate)""" class Metric_IpTB: name = "IpTB" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Branches', 'Fed', 'FetchBW', 'Frontend', 'PGO']) sibling = None def compute(self, EV): try: self.val = IpTB(self, EV, 0) self.thresh = self.val < Pipeline_Width * 2 + 1 except ZeroDivisionError: handle_error_metric(self, "IpTB zero division") desc = """ Instructions per taken branch""" class Metric_BpTkBranch: name = "BpTkBranch" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Branches', 'Fed', 'PGO']) sibling = None def compute(self, EV): try: self.val = BpTkBranch(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "BpTkBranch zero division") desc = """ Branch instructions per taken branch. . Can be used to approximate PGO-likelihood for non-loopy codes.""" class Metric_Instructions: name = "Instructions" domain = "Count" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Summary', 'TmaL1']) sibling = None def compute(self, EV): try: self.val = Instructions(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Instructions zero division") desc = """ Total number of retired Instructions""" class Metric_Retire: name = "Retire" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Pipeline" metricgroup = frozenset(['Pipeline', 'Ret']) sibling = None def compute(self, EV): try: self.val = Retire(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Retire zero division") desc = """ Average number of Uops retired in cycles where at least one uop has retired.""" class Metric_DSB_Coverage: name = "DSB_Coverage" domain = "Metric" maxval = 1.0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['DSB', 'Fed', 'FetchBW']) sibling = None def compute(self, EV): try: self.val = DSB_Coverage(self, EV, 0) self.thresh = (self.val < 0.7) and HighIPC(self, EV, 1) except ZeroDivisionError: handle_error_metric(self, "DSB_Coverage zero division") desc = """ Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). See section 'Decoded ICache' in Optimization Manual. http://www.intel.com/content/www/us/en/architecture- and-technology/64-ia-32-architectures-optimization- manual.html""" class Metric_IpUnknown_Branch: name = "IpUnknown_Branch" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['Fed']) sibling = None def compute(self, EV): try: self.val = IpUnknown_Branch(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IpUnknown_Branch zero division") desc = """ Instructions per speculative Unknown Branch Misprediction (BAClear) (lower number means higher occurrence rate)""" class Metric_IpMispredict: name = "IpMispredict" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Bad_Spec" metricgroup = frozenset(['Bad', 'BadSpec', 'BrMispredicts']) sibling = None def compute(self, EV): try: self.val = IpMispredict(self, EV, 0) self.thresh = (self.val < 200) except ZeroDivisionError: handle_error_metric(self, "IpMispredict zero division") desc = """ Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)""" class Metric_IpMisp_Indirect: name = "IpMisp_Indirect" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Bad_Spec" metricgroup = frozenset(['Bad', 'BrMispredicts']) sibling = None def compute(self, EV): try: self.val = IpMisp_Indirect(self, EV, 0) self.thresh = (self.val < 1000) except ZeroDivisionError: handle_error_metric(self, "IpMisp_Indirect zero division") desc = """ Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).""" class Metric_Load_Miss_Real_Latency: name = "Load_Miss_Real_Latency" domain = "Clocks_Latency" maxval = 1000 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBound', 'MemoryLat']) sibling = None def compute(self, EV): try: self.val = Load_Miss_Real_Latency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Load_Miss_Real_Latency zero division") desc = """ Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)""" class Metric_MLP: name = "MLP" domain = "Metric" maxval = 10.0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBound', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = MLP(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "MLP zero division") desc = """ Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)""" class Metric_L1MPKI: name = "L1MPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheHits', 'Mem']) sibling = None def compute(self, EV): try: self.val = L1MPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L1MPKI zero division") desc = """ L1 cache true misses per kilo instruction for retired demand loads""" class Metric_L2MPKI: name = "L2MPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'Backend', 'CacheHits']) sibling = None def compute(self, EV): try: self.val = L2MPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2MPKI zero division") desc = """ L2 cache true misses per kilo instruction for retired demand loads""" class Metric_L2MPKI_RFO: name = "L2MPKI_RFO" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheMisses', 'Offcore']) sibling = None def compute(self, EV): try: self.val = L2MPKI_RFO(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2MPKI_RFO zero division") desc = """ Offcore requests (L2 cache miss) per kilo instruction for demand RFOs""" class Metric_L3MPKI: name = "L3MPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem']) sibling = None def compute(self, EV): try: self.val = L3MPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L3MPKI zero division") desc = """ L3 cache true misses per kilo instruction for retired demand loads""" class Metric_L1D_Cache_Fill_BW: name = "L1D_Cache_Fill_BW" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L1D_Cache_Fill_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L1D_Cache_Fill_BW zero division") desc = """ """ class Metric_L2_Cache_Fill_BW: name = "L2_Cache_Fill_BW" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L2_Cache_Fill_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2_Cache_Fill_BW zero division") desc = """ """ class Metric_L3_Cache_Fill_BW: name = "L3_Cache_Fill_BW" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L3_Cache_Fill_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L3_Cache_Fill_BW zero division") desc = """ """ class Metric_Page_Walks_Utilization: name = "Page_Walks_Utilization" domain = "Core_Metric" maxval = 1.0 errcount = 0 area = "Info.Memory.TLB" metricgroup = frozenset(['Mem', 'MemoryTLB']) sibling = None def compute(self, EV): try: self.val = Page_Walks_Utilization(self, EV, 0) self.thresh = (self.val > 0.5) except ZeroDivisionError: handle_error_metric(self, "Page_Walks_Utilization zero division") desc = """ Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses""" class Metric_L1D_Cache_Fill_BW_2T: name = "L1D_Cache_Fill_BW_2T" domain = "Core_Metric" maxval = 0 errcount = 0 area = "Info.Memory.Core" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L1D_Cache_Fill_BW_2T(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L1D_Cache_Fill_BW_2T zero division") desc = """ Average per-core data fill bandwidth to the L1 data cache [GB / sec]""" class Metric_L2_Cache_Fill_BW_2T: name = "L2_Cache_Fill_BW_2T" domain = "Core_Metric" maxval = 0 errcount = 0 area = "Info.Memory.Core" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L2_Cache_Fill_BW_2T(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2_Cache_Fill_BW_2T zero division") desc = """ Average per-core data fill bandwidth to the L2 cache [GB / sec]""" class Metric_L3_Cache_Fill_BW_2T: name = "L3_Cache_Fill_BW_2T" domain = "Core_Metric" maxval = 0 errcount = 0 area = "Info.Memory.Core" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L3_Cache_Fill_BW_2T(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L3_Cache_Fill_BW_2T zero division") desc = """ Average per-core data fill bandwidth to the L3 cache [GB / sec]""" class Metric_Load_L2_Miss_Latency: name = "Load_L2_Miss_Latency" domain = "Clocks_Latency" maxval = 1000 errcount = 0 area = "Info.Memory.Latency" metricgroup = frozenset(['Memory_Lat', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Load_L2_Miss_Latency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Load_L2_Miss_Latency zero division") desc = """ Average Latency for L2 cache miss demand Loads""" class Metric_Load_L2_MLP: name = "Load_L2_MLP" domain = "Metric" maxval = 100 errcount = 0 area = "Info.Memory.Latency" metricgroup = frozenset(['Memory_BW', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Load_L2_MLP(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Load_L2_MLP zero division") desc = """ Average Parallel L2 cache miss demand Loads""" class Metric_Data_L2_MLP: name = "Data_L2_MLP" domain = "Metric" maxval = 100 errcount = 0 area = "Info.Memory.Latency" metricgroup = frozenset(['Memory_BW', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Data_L2_MLP(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Data_L2_MLP zero division") desc = """ Average Parallel L2 cache miss data reads""" class Metric_CPU_Utilization: name = "CPU_Utilization" domain = "Metric" maxval = 1 errcount = 0 area = "Info.System" metricgroup = frozenset(['HPC', 'Summary']) sibling = None def compute(self, EV): try: self.val = CPU_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CPU_Utilization zero division") desc = """ Average CPU Utilization (percentage)""" class Metric_CPUs_Utilized: name = "CPUs_Utilized" domain = "Metric" maxval = 300 errcount = 0 area = "Info.System" metricgroup = frozenset(['Summary']) sibling = None def compute(self, EV): try: self.val = CPUs_Utilized(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CPUs_Utilized zero division") desc = """ Average number of utilized CPUs""" class Metric_Core_Frequency: name = "Core_Frequency" domain = "SystemMetric" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Summary', 'Power']) sibling = None def compute(self, EV): try: self.val = Core_Frequency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Core_Frequency zero division") desc = """ Measured Average Core Frequency for unhalted processors [GHz]""" class Metric_Turbo_Utilization: name = "Turbo_Utilization" domain = "Core_Metric" maxval = 10.0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Power']) sibling = None def compute(self, EV): try: self.val = Turbo_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Turbo_Utilization zero division") desc = """ Average Frequency Utilization relative nominal frequency""" class Metric_SMT_2T_Utilization: name = "SMT_2T_Utilization" domain = "Core_Metric" maxval = 1.0 errcount = 0 area = "Info.System" metricgroup = frozenset(['SMT']) sibling = None def compute(self, EV): try: self.val = SMT_2T_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "SMT_2T_Utilization zero division") desc = """ Fraction of cycles where both hardware Logical Processors were active""" class Metric_Kernel_Utilization: name = "Kernel_Utilization" domain = "Metric" maxval = 1.0 errcount = 0 area = "Info.System" metricgroup = frozenset(['OS']) sibling = None def compute(self, EV): try: self.val = Kernel_Utilization(self, EV, 0) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error_metric(self, "Kernel_Utilization zero division") desc = """ Fraction of cycles spent in the Operating System (OS) Kernel mode""" class Metric_Kernel_CPI: name = "Kernel_CPI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['OS']) sibling = None def compute(self, EV): try: self.val = Kernel_CPI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Kernel_CPI zero division") desc = """ Cycles Per Instruction for the Operating System (OS) Kernel mode""" class Metric_DRAM_BW_Use: name = "DRAM_BW_Use" domain = "GB/sec" maxval = 200 errcount = 0 area = "Info.System" metricgroup = frozenset(['HPC', 'MemOffcore', 'MemoryBW', 'SoC']) sibling = None def compute(self, EV): try: self.val = DRAM_BW_Use(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "DRAM_BW_Use zero division") desc = """ Average external Memory Bandwidth Use for reads and writes [GB / sec]""" class Metric_Power: name = "Power" domain = "SystemMetric" maxval = 200 errcount = 0 area = "Info.System" metricgroup = frozenset(['Power', 'SoC']) sibling = None def compute(self, EV): try: self.val = Power(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Power zero division") desc = """ Total package Power in Watts""" class Metric_Time: name = "Time" domain = "Seconds" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Summary']) sibling = None def compute(self, EV): try: self.val = Time(self, EV, 0) self.thresh = (self.val < 1) except ZeroDivisionError: handle_error_metric(self, "Time zero division") desc = """ Run duration time in seconds""" class Metric_Socket_CLKS: name = "Socket_CLKS" domain = "Count" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['SoC']) sibling = None def compute(self, EV): try: self.val = Socket_CLKS(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Socket_CLKS zero division") desc = """ Socket actual clocks when any core is active on that socket""" class Metric_IpFarBranch: name = "IpFarBranch" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Branches', 'OS']) sibling = None def compute(self, EV): try: self.val = IpFarBranch(self, EV, 0) self.thresh = (self.val < 1000000) except ZeroDivisionError: handle_error_metric(self, "IpFarBranch zero division") desc = """ Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]""" # Schedule class Setup: def __init__(self, r): o = dict() n = Frontend_Bound() ; r.run(n) ; o["Frontend_Bound"] = n n = Fetch_Latency() ; r.run(n) ; o["Fetch_Latency"] = n n = ICache_Misses() ; r.run(n) ; o["ICache_Misses"] = n n = ITLB_Misses() ; r.run(n) ; o["ITLB_Misses"] = n n = Branch_Resteers() ; r.run(n) ; o["Branch_Resteers"] = n n = MS_Switches() ; r.run(n) ; o["MS_Switches"] = n n = LCP() ; r.run(n) ; o["LCP"] = n n = DSB_Switches() ; r.run(n) ; o["DSB_Switches"] = n n = Fetch_Bandwidth() ; r.run(n) ; o["Fetch_Bandwidth"] = n n = MITE() ; r.run(n) ; o["MITE"] = n n = DSB() ; r.run(n) ; o["DSB"] = n n = Bad_Speculation() ; r.run(n) ; o["Bad_Speculation"] = n n = Branch_Mispredicts() ; r.run(n) ; o["Branch_Mispredicts"] = n n = Machine_Clears() ; r.run(n) ; o["Machine_Clears"] = n n = Backend_Bound() ; r.run(n) ; o["Backend_Bound"] = n n = Memory_Bound() ; r.run(n) ; o["Memory_Bound"] = n n = L1_Bound() ; r.run(n) ; o["L1_Bound"] = n n = DTLB_Load() ; r.run(n) ; o["DTLB_Load"] = n n = Store_Fwd_Blk() ; r.run(n) ; o["Store_Fwd_Blk"] = n n = Lock_Latency() ; r.run(n) ; o["Lock_Latency"] = n n = Split_Loads() ; r.run(n) ; o["Split_Loads"] = n n = G4K_Aliasing() ; r.run(n) ; o["G4K_Aliasing"] = n n = FB_Full() ; r.run(n) ; o["FB_Full"] = n n = L2_Bound() ; r.run(n) ; o["L2_Bound"] = n n = L3_Bound() ; r.run(n) ; o["L3_Bound"] = n n = Contested_Accesses() ; r.run(n) ; o["Contested_Accesses"] = n n = Data_Sharing() ; r.run(n) ; o["Data_Sharing"] = n n = L3_Hit_Latency() ; r.run(n) ; o["L3_Hit_Latency"] = n n = SQ_Full() ; r.run(n) ; o["SQ_Full"] = n n = DRAM_Bound() ; r.run(n) ; o["DRAM_Bound"] = n n = MEM_Bandwidth() ; r.run(n) ; o["MEM_Bandwidth"] = n n = MEM_Latency() ; r.run(n) ; o["MEM_Latency"] = n n = Store_Bound() ; r.run(n) ; o["Store_Bound"] = n n = Store_Latency() ; r.run(n) ; o["Store_Latency"] = n n = False_Sharing() ; r.run(n) ; o["False_Sharing"] = n n = Split_Stores() ; r.run(n) ; o["Split_Stores"] = n n = DTLB_Store() ; r.run(n) ; o["DTLB_Store"] = n n = Core_Bound() ; r.run(n) ; o["Core_Bound"] = n n = Divider() ; r.run(n) ; o["Divider"] = n n = Ports_Utilization() ; r.run(n) ; o["Ports_Utilization"] = n n = Ports_Utilized_0() ; r.run(n) ; o["Ports_Utilized_0"] = n n = Ports_Utilized_1() ; r.run(n) ; o["Ports_Utilized_1"] = n n = Ports_Utilized_2() ; r.run(n) ; o["Ports_Utilized_2"] = n n = Ports_Utilized_3m() ; r.run(n) ; o["Ports_Utilized_3m"] = n n = ALU_Op_Utilization() ; r.run(n) ; o["ALU_Op_Utilization"] = n n = Port_0() ; r.run(n) ; o["Port_0"] = n n = Port_1() ; r.run(n) ; o["Port_1"] = n n = Port_5() ; r.run(n) ; o["Port_5"] = n n = Port_6() ; r.run(n) ; o["Port_6"] = n n = Load_Op_Utilization() ; r.run(n) ; o["Load_Op_Utilization"] = n n = Port_2() ; r.run(n) ; o["Port_2"] = n n = Port_3() ; r.run(n) ; o["Port_3"] = n n = Store_Op_Utilization() ; r.run(n) ; o["Store_Op_Utilization"] = n n = Port_4() ; r.run(n) ; o["Port_4"] = n n = Port_7() ; r.run(n) ; o["Port_7"] = n n = Retiring() ; r.run(n) ; o["Retiring"] = n n = Light_Operations() ; r.run(n) ; o["Light_Operations"] = n n = Heavy_Operations() ; r.run(n) ; o["Heavy_Operations"] = n n = Microcode_Sequencer() ; r.run(n) ; o["Microcode_Sequencer"] = n n = Assists() ; r.run(n) ; o["Assists"] = n n = CISC() ; r.run(n) ; o["CISC"] = n # parents o["Fetch_Latency"].parent = o["Frontend_Bound"] o["ICache_Misses"].parent = o["Fetch_Latency"] o["ITLB_Misses"].parent = o["Fetch_Latency"] o["Branch_Resteers"].parent = o["Fetch_Latency"] o["MS_Switches"].parent = o["Fetch_Latency"] o["LCP"].parent = o["Fetch_Latency"] o["DSB_Switches"].parent = o["Fetch_Latency"] o["Fetch_Bandwidth"].parent = o["Frontend_Bound"] o["MITE"].parent = o["Fetch_Bandwidth"] o["DSB"].parent = o["Fetch_Bandwidth"] o["Branch_Mispredicts"].parent = o["Bad_Speculation"] o["Machine_Clears"].parent = o["Bad_Speculation"] o["Memory_Bound"].parent = o["Backend_Bound"] o["L1_Bound"].parent = o["Memory_Bound"] o["DTLB_Load"].parent = o["L1_Bound"] o["Store_Fwd_Blk"].parent = o["L1_Bound"] o["Lock_Latency"].parent = o["L1_Bound"] o["Split_Loads"].parent = o["L1_Bound"] o["G4K_Aliasing"].parent = o["L1_Bound"] o["FB_Full"].parent = o["L1_Bound"] o["L2_Bound"].parent = o["Memory_Bound"] o["L3_Bound"].parent = o["Memory_Bound"] o["Contested_Accesses"].parent = o["L3_Bound"] o["Data_Sharing"].parent = o["L3_Bound"] o["L3_Hit_Latency"].parent = o["L3_Bound"] o["SQ_Full"].parent = o["L3_Bound"] o["DRAM_Bound"].parent = o["Memory_Bound"] o["MEM_Bandwidth"].parent = o["DRAM_Bound"] o["MEM_Latency"].parent = o["DRAM_Bound"] o["Store_Bound"].parent = o["Memory_Bound"] o["Store_Latency"].parent = o["Store_Bound"] o["False_Sharing"].parent = o["Store_Bound"] o["Split_Stores"].parent = o["Store_Bound"] o["DTLB_Store"].parent = o["Store_Bound"] o["Core_Bound"].parent = o["Backend_Bound"] o["Divider"].parent = o["Core_Bound"] o["Ports_Utilization"].parent = o["Core_Bound"] o["Ports_Utilized_0"].parent = o["Ports_Utilization"] o["Ports_Utilized_1"].parent = o["Ports_Utilization"] o["Ports_Utilized_2"].parent = o["Ports_Utilization"] o["Ports_Utilized_3m"].parent = o["Ports_Utilization"] o["ALU_Op_Utilization"].parent = o["Ports_Utilized_3m"] o["Port_0"].parent = o["ALU_Op_Utilization"] o["Port_1"].parent = o["ALU_Op_Utilization"] o["Port_5"].parent = o["ALU_Op_Utilization"] o["Port_6"].parent = o["ALU_Op_Utilization"] o["Load_Op_Utilization"].parent = o["Ports_Utilized_3m"] o["Port_2"].parent = o["Load_Op_Utilization"] o["Port_3"].parent = o["Load_Op_Utilization"] o["Store_Op_Utilization"].parent = o["Ports_Utilized_3m"] o["Port_4"].parent = o["Store_Op_Utilization"] o["Port_7"].parent = o["Store_Op_Utilization"] o["Light_Operations"].parent = o["Retiring"] o["Heavy_Operations"].parent = o["Retiring"] o["Microcode_Sequencer"].parent = o["Heavy_Operations"] o["Assists"].parent = o["Microcode_Sequencer"] o["CISC"].parent = o["Microcode_Sequencer"] # user visible metrics n = Metric_IPC() ; r.metric(n) ; o["IPC"] = n n = Metric_UopPI() ; r.metric(n) ; o["UopPI"] = n n = Metric_UpTB() ; r.metric(n) ; o["UpTB"] = n n = Metric_CPI() ; r.metric(n) ; o["CPI"] = n n = Metric_CLKS() ; r.metric(n) ; o["CLKS"] = n n = Metric_SLOTS() ; r.metric(n) ; o["SLOTS"] = n n = Metric_CoreIPC() ; r.metric(n) ; o["CoreIPC"] = n n = Metric_ILP() ; r.metric(n) ; o["ILP"] = n n = Metric_CORE_CLKS() ; r.metric(n) ; o["CORE_CLKS"] = n n = Metric_IpLoad() ; r.metric(n) ; o["IpLoad"] = n n = Metric_IpStore() ; r.metric(n) ; o["IpStore"] = n n = Metric_IpBranch() ; r.metric(n) ; o["IpBranch"] = n n = Metric_IpCall() ; r.metric(n) ; o["IpCall"] = n n = Metric_IpTB() ; r.metric(n) ; o["IpTB"] = n n = Metric_BpTkBranch() ; r.metric(n) ; o["BpTkBranch"] = n n = Metric_Instructions() ; r.metric(n) ; o["Instructions"] = n n = Metric_Retire() ; r.metric(n) ; o["Retire"] = n n = Metric_DSB_Coverage() ; r.metric(n) ; o["DSB_Coverage"] = n n = Metric_IpUnknown_Branch() ; r.metric(n) ; o["IpUnknown_Branch"] = n n = Metric_IpMispredict() ; r.metric(n) ; o["IpMispredict"] = n n = Metric_IpMisp_Indirect() ; r.metric(n) ; o["IpMisp_Indirect"] = n n = Metric_Load_Miss_Real_Latency() ; r.metric(n) ; o["Load_Miss_Real_Latency"] = n n = Metric_MLP() ; r.metric(n) ; o["MLP"] = n n = Metric_L1MPKI() ; r.metric(n) ; o["L1MPKI"] = n n = Metric_L2MPKI() ; r.metric(n) ; o["L2MPKI"] = n n = Metric_L2MPKI_RFO() ; r.metric(n) ; o["L2MPKI_RFO"] = n n = Metric_L3MPKI() ; r.metric(n) ; o["L3MPKI"] = n n = Metric_L1D_Cache_Fill_BW() ; r.metric(n) ; o["L1D_Cache_Fill_BW"] = n n = Metric_L2_Cache_Fill_BW() ; r.metric(n) ; o["L2_Cache_Fill_BW"] = n n = Metric_L3_Cache_Fill_BW() ; r.metric(n) ; o["L3_Cache_Fill_BW"] = n n = Metric_Page_Walks_Utilization() ; r.metric(n) ; o["Page_Walks_Utilization"] = n n = Metric_L1D_Cache_Fill_BW_2T() ; r.metric(n) ; o["L1D_Cache_Fill_BW_2T"] = n n = Metric_L2_Cache_Fill_BW_2T() ; r.metric(n) ; o["L2_Cache_Fill_BW_2T"] = n n = Metric_L3_Cache_Fill_BW_2T() ; r.metric(n) ; o["L3_Cache_Fill_BW_2T"] = n n = Metric_Load_L2_Miss_Latency() ; r.metric(n) ; o["Load_L2_Miss_Latency"] = n n = Metric_Load_L2_MLP() ; r.metric(n) ; o["Load_L2_MLP"] = n n = Metric_Data_L2_MLP() ; r.metric(n) ; o["Data_L2_MLP"] = n n = Metric_CPU_Utilization() ; r.metric(n) ; o["CPU_Utilization"] = n n = Metric_CPUs_Utilized() ; r.metric(n) ; o["CPUs_Utilized"] = n n = Metric_Core_Frequency() ; r.metric(n) ; o["Core_Frequency"] = n n = Metric_Turbo_Utilization() ; r.metric(n) ; o["Turbo_Utilization"] = n n = Metric_SMT_2T_Utilization() ; r.metric(n) ; o["SMT_2T_Utilization"] = n n = Metric_Kernel_Utilization() ; r.metric(n) ; o["Kernel_Utilization"] = n n = Metric_Kernel_CPI() ; r.metric(n) ; o["Kernel_CPI"] = n n = Metric_DRAM_BW_Use() ; r.metric(n) ; o["DRAM_BW_Use"] = n n = Metric_Power() ; r.metric(n) ; o["Power"] = n n = Metric_Time() ; r.metric(n) ; o["Time"] = n n = Metric_Socket_CLKS() ; r.metric(n) ; o["Socket_CLKS"] = n n = Metric_IpFarBranch() ; r.metric(n) ; o["IpFarBranch"] = n # references between groups o["Fetch_Bandwidth"].Frontend_Bound = o["Frontend_Bound"] o["Fetch_Bandwidth"].Fetch_Latency = o["Fetch_Latency"] o["Branch_Mispredicts"].Bad_Speculation = o["Bad_Speculation"] o["Machine_Clears"].Bad_Speculation = o["Bad_Speculation"] o["Machine_Clears"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Backend_Bound"].Retiring = o["Retiring"] o["Backend_Bound"].Bad_Speculation = o["Bad_Speculation"] o["Backend_Bound"].Frontend_Bound = o["Frontend_Bound"] o["Memory_Bound"].Retiring = o["Retiring"] o["Memory_Bound"].Bad_Speculation = o["Bad_Speculation"] o["Memory_Bound"].Frontend_Bound = o["Frontend_Bound"] o["Memory_Bound"].Backend_Bound = o["Backend_Bound"] o["Memory_Bound"].Fetch_Latency = o["Fetch_Latency"] o["MEM_Latency"].MEM_Bandwidth = o["MEM_Bandwidth"] o["Core_Bound"].Retiring = o["Retiring"] o["Core_Bound"].Frontend_Bound = o["Frontend_Bound"] o["Core_Bound"].Memory_Bound = o["Memory_Bound"] o["Core_Bound"].Backend_Bound = o["Backend_Bound"] o["Core_Bound"].Bad_Speculation = o["Bad_Speculation"] o["Core_Bound"].Fetch_Latency = o["Fetch_Latency"] o["Ports_Utilization"].Fetch_Latency = o["Fetch_Latency"] o["Ports_Utilized_0"].Fetch_Latency = o["Fetch_Latency"] o["Retiring"].Heavy_Operations = o["Heavy_Operations"] o["Light_Operations"].Retiring = o["Retiring"] o["Light_Operations"].Heavy_Operations = o["Heavy_Operations"] o["Light_Operations"].Microcode_Sequencer = o["Microcode_Sequencer"] o["Heavy_Operations"].Microcode_Sequencer = o["Microcode_Sequencer"] o["CISC"].Microcode_Sequencer = o["Microcode_Sequencer"] o["CISC"].Assists = o["Assists"] # siblings cross-tree o["MS_Switches"].sibling = (o["Machine_Clears"], o["L1_Bound"], o["Microcode_Sequencer"],) o["LCP"].sibling = (o["DSB_Switches"], o["Fetch_Bandwidth"],) o["DSB_Switches"].sibling = (o["LCP"], o["Fetch_Bandwidth"],) o["Fetch_Bandwidth"].sibling = (o["LCP"], o["DSB_Switches"],) o["Machine_Clears"].sibling = (o["MS_Switches"], o["L1_Bound"], o["Contested_Accesses"], o["Data_Sharing"], o["False_Sharing"], o["Microcode_Sequencer"],) o["L1_Bound"].sibling = (o["MS_Switches"], o["Machine_Clears"], o["Ports_Utilized_1"], o["Microcode_Sequencer"],) o["DTLB_Load"].sibling = (o["DTLB_Store"],) o["Lock_Latency"].sibling = (o["Store_Latency"],) o["FB_Full"].sibling = (o["SQ_Full"], o["MEM_Bandwidth"], o["Store_Latency"],) o["Contested_Accesses"].sibling = (o["Machine_Clears"], o["Data_Sharing"], o["False_Sharing"],) o["Data_Sharing"].sibling = (o["Machine_Clears"], o["Contested_Accesses"], o["False_Sharing"],) o["L3_Hit_Latency"].sibling = (o["MEM_Latency"],) o["L3_Hit_Latency"].overlap = True o["SQ_Full"].sibling = (o["FB_Full"], o["MEM_Bandwidth"],) o["MEM_Bandwidth"].sibling = (o["FB_Full"], o["SQ_Full"],) o["MEM_Latency"].sibling = (o["L3_Hit_Latency"],) o["Store_Latency"].sibling = (o["Lock_Latency"], o["FB_Full"],) o["Store_Latency"].overlap = True o["False_Sharing"].sibling = (o["Machine_Clears"], o["Contested_Accesses"], o["Data_Sharing"],) o["Split_Stores"].sibling = (o["Port_4"],) o["DTLB_Store"].sibling = (o["DTLB_Load"],) o["Ports_Utilized_1"].sibling = (o["L1_Bound"],) o["Ports_Utilized_2"].sibling = (o["Port_0"], o["Port_1"], o["Port_5"], o["Port_6"],) o["Port_0"].sibling = (o["Ports_Utilized_2"], o["Port_1"], o["Port_5"], o["Port_6"],) o["Port_1"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_5"], o["Port_6"],) o["Port_5"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_6"],) o["Port_6"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"],) o["Port_4"].sibling = (o["Split_Stores"],) o["Microcode_Sequencer"].sibling = (o["MS_Switches"], o["Machine_Clears"], o["L1_Bound"],) o["IpTB"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],) o["DSB_Coverage"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],) o["DRAM_BW_Use"].sibling = (o["FB_Full"], o["SQ_Full"], o["MEM_Bandwidth"],)
117,322
Python
.py
2,939
34.275264
423
0.657544
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,901
skl_client_ratios.py
andikleen_pmu-tools/skl_client_ratios.py
# -*- coding: latin-1 -*- # # auto generated TopDown/TMA 4.8-full-perf description for Intel 6th/7th gen Core (code named Skykale/Kabylake/Coffeelake) # Please see http://ark.intel.com for more details on these CPUs. # # References: # http://bit.ly/tma-ispass14 # http://halobates.de/blog/p/262 # https://sites.google.com/site/analysismethods/yasin-pubs # https://download.01.org/perfmon/ # https://github.com/andikleen/pmu-tools/wiki/toplev-manual # # Helpers print_error = lambda msg: False smt_enabled = False ebs_mode = False version = "4.8-full-perf" base_frequency = -1.0 Memory = 0 Average_Frequency = 0.0 num_cores = 1 num_threads = 1 num_sockets = 1 def handle_error(obj, msg): print_error(msg) obj.errcount += 1 obj.val = 0 obj.thresh = False def handle_error_metric(obj, msg): print_error(msg) obj.errcount += 1 obj.val = 0 # Constants Exe_Ports = 8 Mem_L2_Store_Cost = 9 Mem_STLB_Hit_Cost = 9 BAClear_Cost = 9 MS_Switches_Cost = 2 Avg_Assist_Cost = 34 Pipeline_Width = 4 OneMillion = 1000000 OneBillion = 1000000000 Energy_Unit = 61 Errata_Whitelist = "SKL091" EBS_Mode = 0 DS = 0 # Aux. formulas def Backend_Bound_Cycles(self, EV, level): return EV("CYCLE_ACTIVITY.STALLS_TOTAL", level) + Few_Uops_Executed_Threshold(self, EV, level) + EV("EXE_ACTIVITY.BOUND_ON_STORES", level) def Br_DoI_Jumps(self, EV, level): return EV("BR_INST_RETIRED.NEAR_TAKEN", level) - (EV("BR_INST_RETIRED.COND", level) - EV("BR_INST_RETIRED.NOT_TAKEN", level)) - 2 * EV("BR_INST_RETIRED.NEAR_CALL", level) def Branching_Retired(self, EV, level): return (EV("BR_INST_RETIRED.ALL_BRANCHES", level) + 2 * EV("BR_INST_RETIRED.NEAR_CALL", level) + EV("INST_RETIRED.NOP", level)) / SLOTS(self, EV, level) def Serialize_Core(self, EV, level): return self.Core_Bound.compute(EV) * (self.Serializing_Operation.compute(EV) + self.Core_Bound.compute(EV) * EV("RS_EVENTS.EMPTY_CYCLES", level) / CLKS(self, EV, level) * self.Ports_Utilized_0.compute(EV)) / (self.Serializing_Operation.compute(EV) + self.Ports_Utilization.compute(EV) + self.Divider.compute(EV)) def Umisp(self, EV, level): return 10 * self.Microcode_Sequencer.compute(EV) * self.Other_Mispredicts.compute(EV) / self.Branch_Mispredicts.compute(EV) def Assist(self, EV, level): return (self.Microcode_Sequencer.compute(EV) / (self.Microcode_Sequencer.compute(EV) + self.Few_Uops_Instructions.compute(EV))) * (self.Assists.compute(EV) / self.Microcode_Sequencer.compute(EV)) def Assist_Frontend(self, EV, level): return Assist(self, EV, level) * self.Fetch_Latency.compute(EV) * (self.MS_Switches.compute(EV) + self.Branch_Resteers.compute(EV) * (self.Clears_Resteers.compute(EV) + self.Mispredicts_Resteers.compute(EV) * Umisp(self, EV, level)) / (self.Clears_Resteers.compute(EV) + self.Unknown_Branches.compute(EV) + self.Mispredicts_Resteers.compute(EV))) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV)) def Assist_Retired(self, EV, level): return Assist(self, EV, level) * self.Heavy_Operations.compute(EV) def Core_Bound_Cycles(self, EV, level): return self.Ports_Utilized_0.compute(EV) * CLKS(self, EV, level) + Few_Uops_Executed_Threshold(self, EV, level) def Cycles_1_Port_Utilized(self, EV, level): return (EV("UOPS_EXECUTED.CORE_CYCLES_GE_1", level) - EV("UOPS_EXECUTED.CORE_CYCLES_GE_2", level)) / 2 if smt_enabled else EV("EXE_ACTIVITY.1_PORTS_UTIL", level) def Cycles_2_Ports_Utilized(self, EV, level): return (EV("UOPS_EXECUTED.CORE_CYCLES_GE_2", level) - EV("UOPS_EXECUTED.CORE_CYCLES_GE_3", level)) / 2 if smt_enabled else EV("EXE_ACTIVITY.2_PORTS_UTIL", level) def Cycles_3m_Ports_Utilized(self, EV, level): return EV("UOPS_EXECUTED.CORE_CYCLES_GE_3", level) / 2 if smt_enabled else EV("UOPS_EXECUTED.CORE_CYCLES_GE_3", level) def DurationTimeInSeconds(self, EV, level): return EV("interval-ms", 0) / 1000 def Execute_Cycles(self, EV, level): return (EV("UOPS_EXECUTED.CORE_CYCLES_GE_1", level) / 2) if smt_enabled else EV("UOPS_EXECUTED.THREAD:c1", level) # factor used for metrics associating fixed costs for FB Hits - according to probability theory if all FB Hits come at a random rate in original L1_Miss cost interval then the average cost for each one is 0.5 of the fixed cost def FB_Factor(self, EV, level): return 1 + FBHit_per_L1Miss(self, EV, level) / 2 def FBHit_per_L1Miss(self, EV, level): return EV("MEM_LOAD_RETIRED.FB_HIT", level) / EV("MEM_LOAD_RETIRED.L1_MISS", level) def Fetched_Uops(self, EV, level): return EV("IDQ.DSB_UOPS", level) + EV("LSD.UOPS", level) + EV("IDQ.MITE_UOPS", level) + EV("IDQ.MS_UOPS", level) def Few_Uops_Executed_Threshold(self, EV, level): return EV("EXE_ACTIVITY.1_PORTS_UTIL", level) + self.Retiring.compute(EV) * EV("EXE_ACTIVITY.2_PORTS_UTIL", level) # Floating Point computational (arithmetic) Operations Count def FLOP_Count(self, EV, level): return EV("FP_ARITH_INST_RETIRED.SCALAR", level) + 2 * EV("FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", level) + 4 * EV("FP_ARITH_INST_RETIRED.4_FLOPS", level) + 8 * EV("FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE", level) # Floating Point computational (arithmetic) Operations Count def FP_Arith_Scalar(self, EV, level): return EV("FP_ARITH_INST_RETIRED.SCALAR", level) # Floating Point computational (arithmetic) Operations Count def FP_Arith_Vector(self, EV, level): return EV("FP_ARITH_INST_RETIRED.VECTOR", level) def HighIPC(self, EV, level): val = IPC(self, EV, level) / Pipeline_Width return val def L2_Bound_Ratio(self, EV, level): return (EV("CYCLE_ACTIVITY.STALLS_L1D_MISS", level) - EV("CYCLE_ACTIVITY.STALLS_L2_MISS", level)) / CLKS(self, EV, level) def Light_Ops_Sum(self, EV, level): return self.FP_Arith.compute(EV) + self.Memory_Operations.compute(EV) + self.Fused_Instructions.compute(EV) + self.Non_Fused_Branches.compute(EV) def LOAD_L2_HIT(self, EV, level): return EV("MEM_LOAD_RETIRED.L2_HIT", level) * (1 + FBHit_per_L1Miss(self, EV, level)) def LOAD_L3_HIT(self, EV, level): return EV("MEM_LOAD_RETIRED.L3_HIT", level) * FB_Factor(self, EV, level) def LOAD_XSNP_HIT(self, EV, level): return EV("MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT", level) def LOAD_XSNP_HITM(self, EV, level): return EV("MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM", level) def LOAD_XSNP_MISS(self, EV, level): return EV("MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS", level) def MEM_Bound_Ratio(self, EV, level): return EV("CYCLE_ACTIVITY.STALLS_L3_MISS", level) / CLKS(self, EV, level) + L2_Bound_Ratio(self, EV, level) - self.L2_Bound.compute(EV) def Mem_Lock_St_Fraction(self, EV, level): return EV("MEM_INST_RETIRED.LOCK_LOADS", level) / EV("MEM_INST_RETIRED.ALL_STORES", level) def Memory_Bound_Fraction(self, EV, level): return (EV("CYCLE_ACTIVITY.STALLS_MEM_ANY", level) + EV("EXE_ACTIVITY.BOUND_ON_STORES", level)) / Backend_Bound_Cycles(self, EV, level) def Mispred_Clears_Fraction(self, EV, level): return EV("BR_MISP_RETIRED.ALL_BRANCHES", level) / (EV("BR_MISP_RETIRED.ALL_BRANCHES", level) + EV("MACHINE_CLEARS.COUNT", level)) def OCR_all_rfo_l3_hit_snoop_hitm(self, EV, level): return EV("OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM", level) def ORO_Demand_RFO_C1(self, EV, level): return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO", level)) , level ) def ORO_DRD_Any_Cycles(self, EV, level): return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", level)) , level ) def ORO_DRD_BW_Cycles(self, EV, level): return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD:c4", level)) , level ) def SQ_Full_Cycles(self, EV, level): return (EV("OFFCORE_REQUESTS_BUFFER.SQ_FULL", level) / 2) if smt_enabled else EV("OFFCORE_REQUESTS_BUFFER.SQ_FULL", level) def Store_L2_Hit_Cycles(self, EV, level): return EV("L2_RQSTS.RFO_HIT", level) * Mem_L2_Store_Cost *(1 - Mem_Lock_St_Fraction(self, EV, level)) def Mem_XSNP_HitM_Cost(self, EV, level): return 22 * Core_Frequency(self, EV, level) def Mem_XSNP_Hit_Cost(self, EV, level): return 20 * Core_Frequency(self, EV, level) def Mem_XSNP_None_Cost(self, EV, level): return 10 * Core_Frequency(self, EV, level) def Mem_L2_Hit_Cost(self, EV, level): return 3.5 * Core_Frequency(self, EV, level) def Recovery_Cycles(self, EV, level): return (EV("INT_MISC.RECOVERY_CYCLES_ANY", level) / 2) if smt_enabled else EV("INT_MISC.RECOVERY_CYCLES", level) def Retire_Fraction(self, EV, level): return Retired_Slots(self, EV, level) / EV("UOPS_ISSUED.ANY", level) # Retired slots per Logical Processor def Retired_Slots(self, EV, level): return EV("UOPS_RETIRED.RETIRE_SLOTS", level) # Number of logical processors (enabled or online) on the target system def Num_CPUs(self, EV, level): return 8 if smt_enabled else 4 # A system parameter for dependent-loads (pointer chasing like access pattern) of the workload. An integer fraction in range from 0 (no dependent loads) to 100 (all loads are dependent loads) def Dependent_Loads_Weight(self, EV, level): return 20 # Total pipeline cost of Branch Misprediction related bottlenecks def Mispredictions(self, EV, level): val = 100 *(1 - Umisp(self, EV, level)) * (self.Branch_Mispredicts.compute(EV) + self.Fetch_Latency.compute(EV) * self.Mispredicts_Resteers.compute(EV) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV))) self.thresh = (val > 20) return val # Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses) def Big_Code(self, EV, level): val = 100 * self.Fetch_Latency.compute(EV) * (self.ITLB_Misses.compute(EV) + self.ICache_Misses.compute(EV) + self.Unknown_Branches.compute(EV)) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV)) self.thresh = (val > 20) return val # Total pipeline cost of instruction fetch bandwidth related bottlenecks (when the front-end could not sustain operations delivery to the back-end) def Instruction_Fetch_BW(self, EV, level): val = 100 *(self.Frontend_Bound.compute(EV) - (1 - Umisp(self, EV, level)) * self.Fetch_Latency.compute(EV) * self.Mispredicts_Resteers.compute(EV) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV)) - Assist_Frontend(self, EV, level)) - Big_Code(self, EV, level) self.thresh = (val > 20) return val # Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks def Cache_Memory_Bandwidth(self, EV, level): val = 100 *((self.Memory_Bound.compute(EV) * (self.DRAM_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * (self.MEM_Bandwidth.compute(EV) / (self.MEM_Latency.compute(EV) + self.MEM_Bandwidth.compute(EV)))) + (self.Memory_Bound.compute(EV) * (self.L3_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * (self.SQ_Full.compute(EV) / (self.L3_Hit_Latency.compute(EV) + self.Contested_Accesses.compute(EV) + self.SQ_Full.compute(EV) + self.Data_Sharing.compute(EV)))) + (self.Memory_Bound.compute(EV) * (self.L1_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * (self.FB_Full.compute(EV) / (self.Store_Fwd_Blk.compute(EV) + self.L1_Hit_Latency.compute(EV) + self.DTLB_Load.compute(EV) + self.G4K_Aliasing.compute(EV) + self.Lock_Latency.compute(EV) + self.Split_Loads.compute(EV) + self.FB_Full.compute(EV))))) self.thresh = (val > 20) return val # Total pipeline cost of external Memory- or Cache-Latency related bottlenecks def Cache_Memory_Latency(self, EV, level): val = 100 *((self.Memory_Bound.compute(EV) * (self.DRAM_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * (self.MEM_Latency.compute(EV) / (self.MEM_Latency.compute(EV) + self.MEM_Bandwidth.compute(EV)))) + (self.Memory_Bound.compute(EV) * (self.L3_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * (self.L3_Hit_Latency.compute(EV) / (self.L3_Hit_Latency.compute(EV) + self.Contested_Accesses.compute(EV) + self.SQ_Full.compute(EV) + self.Data_Sharing.compute(EV)))) + (self.Memory_Bound.compute(EV) * self.L2_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) + (self.Memory_Bound.compute(EV) * (self.Store_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * (self.Store_Latency.compute(EV) / (self.Split_Stores.compute(EV) + self.DTLB_Store.compute(EV) + self.Store_Latency.compute(EV) + self.False_Sharing.compute(EV)))) + (self.Memory_Bound.compute(EV) * (self.L1_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * (self.L1_Hit_Latency.compute(EV) / (self.Store_Fwd_Blk.compute(EV) + self.L1_Hit_Latency.compute(EV) + self.DTLB_Load.compute(EV) + self.G4K_Aliasing.compute(EV) + self.Lock_Latency.compute(EV) + self.Split_Loads.compute(EV) + self.FB_Full.compute(EV))))) self.thresh = (val > 20) return val # Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs) def Memory_Data_TLBs(self, EV, level): val = 100 *(self.Memory_Bound.compute(EV) * (self.L1_Bound.compute(EV) / max(self.Memory_Bound.compute(EV) , (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV)))) * (self.DTLB_Load.compute(EV) / max(self.L1_Bound.compute(EV) , (self.Store_Fwd_Blk.compute(EV) + self.L1_Hit_Latency.compute(EV) + self.DTLB_Load.compute(EV) + self.G4K_Aliasing.compute(EV) + self.Lock_Latency.compute(EV) + self.Split_Loads.compute(EV) + self.FB_Full.compute(EV)))) + (self.Memory_Bound.compute(EV) * (self.Store_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * (self.DTLB_Store.compute(EV) / (self.Split_Stores.compute(EV) + self.DTLB_Store.compute(EV) + self.Store_Latency.compute(EV) + self.False_Sharing.compute(EV))))) self.thresh = (val > 20) return val # Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors) def Memory_Synchronization(self, EV, level): val = 100 *(self.Memory_Bound.compute(EV) * ((self.L3_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * (self.Contested_Accesses.compute(EV) + self.Data_Sharing.compute(EV)) / (self.L3_Hit_Latency.compute(EV) + self.Contested_Accesses.compute(EV) + self.SQ_Full.compute(EV) + self.Data_Sharing.compute(EV)) + (self.Store_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * self.False_Sharing.compute(EV) / ((self.Split_Stores.compute(EV) + self.DTLB_Store.compute(EV) + self.Store_Latency.compute(EV) + self.False_Sharing.compute(EV)) - self.Store_Latency.compute(EV))) + self.Machine_Clears.compute(EV) * (1 - self.Other_Nukes.compute(EV) / (self.Other_Nukes.compute(EV)))) self.thresh = (val > 10) return val # Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy. def Compute_Bound_Est(self, EV, level): val = 100 *((self.Core_Bound.compute(EV) * self.Divider.compute(EV) / (self.Serializing_Operation.compute(EV) + self.Ports_Utilization.compute(EV) + self.Divider.compute(EV))) + (self.Core_Bound.compute(EV) * (self.Ports_Utilization.compute(EV) / (self.Serializing_Operation.compute(EV) + self.Ports_Utilization.compute(EV) + self.Divider.compute(EV))) * (self.Ports_Utilized_3m.compute(EV) / (self.Ports_Utilized_0.compute(EV) + self.Ports_Utilized_1.compute(EV) + self.Ports_Utilized_2.compute(EV) + self.Ports_Utilized_3m.compute(EV))))) self.thresh = (val > 20) return val # Total pipeline cost of irregular execution (e.g. FP-assists in HPC, Wait time with work imbalance multithreaded workloads, overhead in system services or virtualized environments) def Irregular_Overhead(self, EV, level): val = 100 *(Assist_Frontend(self, EV, level) + Umisp(self, EV, level) * self.Branch_Mispredicts.compute(EV) + (self.Machine_Clears.compute(EV) * self.Other_Nukes.compute(EV) / (self.Other_Nukes.compute(EV))) + Serialize_Core(self, EV, level) + Assist_Retired(self, EV, level)) self.thresh = (val > 10) return val # Total pipeline cost of remaining bottlenecks in the back-end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls. def Other_Bottlenecks(self, EV, level): val = 100 -(Big_Code(self, EV, level) + Instruction_Fetch_BW(self, EV, level) + Mispredictions(self, EV, level) + Cache_Memory_Bandwidth(self, EV, level) + Cache_Memory_Latency(self, EV, level) + Memory_Data_TLBs(self, EV, level) + Memory_Synchronization(self, EV, level) + Compute_Bound_Est(self, EV, level) + Irregular_Overhead(self, EV, level) + Branching_Overhead(self, EV, level) + Useful_Work(self, EV, level)) self.thresh = (val > 20) return val # Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA. Examples include function calls; loops and alignments. (A lower bound). Consider Loop Unrolling or function inlining optimizations def Branching_Overhead(self, EV, level): val = 100 * Branching_Retired(self, EV, level) self.thresh = (val > 5) return val # Total pipeline cost of "useful operations" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead. def Useful_Work(self, EV, level): val = 100 *(self.Retiring.compute(EV) - Branching_Retired(self, EV, level) - Assist_Retired(self, EV, level)) self.thresh = (val > 20) return val # Probability of Core Bound bottleneck hidden by SMT-profiling artifacts. Tip: consider analysis with SMT disabled def Core_Bound_Likely(self, EV, level): val = 100 *(1 - self.Core_Bound.compute(EV) / self.Ports_Utilization.compute(EV) if self.Core_Bound.compute(EV)< self.Ports_Utilization.compute(EV) else 1) if SMT_2T_Utilization(self, EV, level)> 0.5 else 0 self.thresh = (val > 0.5) return val # Instructions Per Cycle (per Logical Processor) def IPC(self, EV, level): return EV("INST_RETIRED.ANY", level) / CLKS(self, EV, level) # Uops Per Instruction def UopPI(self, EV, level): val = Retired_Slots(self, EV, level) / EV("INST_RETIRED.ANY", level) self.thresh = (val > 1.05) return val # Uops per taken branch def UpTB(self, EV, level): val = Retired_Slots(self, EV, level) / EV("BR_INST_RETIRED.NEAR_TAKEN", level) self.thresh = val < Pipeline_Width * 1.5 return val # Cycles Per Instruction (per Logical Processor) def CPI(self, EV, level): return 1 / IPC(self, EV, level) # Per-Logical Processor actual clocks when the Logical Processor is active. def CLKS(self, EV, level): return EV("CPU_CLK_UNHALTED.THREAD", level) # Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward) def SLOTS(self, EV, level): return Pipeline_Width * CORE_CLKS(self, EV, level) # The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of "execute" at rename stage. def Execute_per_Issue(self, EV, level): return EV("UOPS_EXECUTED.THREAD", level) / EV("UOPS_ISSUED.ANY", level) # Instructions Per Cycle across hyper-threads (per physical core) def CoreIPC(self, EV, level): return EV("INST_RETIRED.ANY", level) / CORE_CLKS(self, EV, level) # Floating Point Operations Per Cycle def FLOPc(self, EV, level): return FLOP_Count(self, EV, level) / CORE_CLKS(self, EV, level) # Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to Fused-Multiply Add FMA counting - common. def FP_Arith_Utilization(self, EV, level): return (FP_Arith_Scalar(self, EV, level) + FP_Arith_Vector(self, EV, level)) / (2 * CORE_CLKS(self, EV, level)) # Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor) def ILP(self, EV, level): return EV("UOPS_EXECUTED.THREAD", level) / EV("UOPS_EXECUTED.THREAD:c1", level) # uops Executed per Cycle def EPC(self, EV, level): return EV("UOPS_EXECUTED.THREAD", level) / CLKS(self, EV, level) # Core actual clocks when any Logical Processor is active on the Physical Core def CORE_CLKS(self, EV, level): return ((EV("CPU_CLK_UNHALTED.THREAD", level) / 2) * (1 + EV("CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE", level) / EV("CPU_CLK_UNHALTED.REF_XCLK", level))) if ebs_mode else(EV("CPU_CLK_UNHALTED.THREAD_ANY", level) / 2) if smt_enabled else CLKS(self, EV, level) # Instructions per Load (lower number means higher occurrence rate). Tip: reduce memory accesses. #Link Opt Guide section: Minimize Register Spills def IpLoad(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("MEM_INST_RETIRED.ALL_LOADS", level) self.thresh = (val < 3) return val # Instructions per Store (lower number means higher occurrence rate). Tip: reduce memory accesses. #Link Opt Guide section: Minimize Register Spills def IpStore(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("MEM_INST_RETIRED.ALL_STORES", level) self.thresh = (val < 8) return val # Instructions per Branch (lower number means higher occurrence rate) def IpBranch(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.ALL_BRANCHES", level) self.thresh = (val < 8) return val # Instructions per (near) call (lower number means higher occurrence rate) def IpCall(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.NEAR_CALL", level) self.thresh = (val < 200) return val # Instructions per taken branch def IpTB(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.NEAR_TAKEN", level) self.thresh = val < Pipeline_Width * 2 + 1 return val # Branch instructions per taken branch. . Can be used to approximate PGO-likelihood for non-loopy codes. def BpTkBranch(self, EV, level): return EV("BR_INST_RETIRED.ALL_BRANCHES", level) / EV("BR_INST_RETIRED.NEAR_TAKEN", level) # Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate). Reference: Tuning Performance via Metrics with Expectations. https://doi.org/10.1109/LCA.2019.2916408 def IpFLOP(self, EV, level): val = EV("INST_RETIRED.ANY", level) / FLOP_Count(self, EV, level) self.thresh = (val < 10) return val # Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW. def IpArith(self, EV, level): val = EV("INST_RETIRED.ANY", level) / (FP_Arith_Scalar(self, EV, level) + FP_Arith_Vector(self, EV, level)) self.thresh = (val < 10) return val # Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. def IpArith_Scalar_SP(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("FP_ARITH_INST_RETIRED.SCALAR_SINGLE", level) self.thresh = (val < 10) return val # Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. def IpArith_Scalar_DP(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("FP_ARITH_INST_RETIRED.SCALAR_DOUBLE", level) self.thresh = (val < 10) return val # Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. def IpArith_AVX128(self, EV, level): val = EV("INST_RETIRED.ANY", level) / (EV("FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", level) + EV("FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE", level)) self.thresh = (val < 10) return val # Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. def IpArith_AVX256(self, EV, level): val = EV("INST_RETIRED.ANY", level) / (EV("FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE", level) + EV("FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE", level)) self.thresh = (val < 10) return val # Instructions per PAUSE (lower number means higher occurrence rate) def IpPause(self, EV, level): return Instructions(self, EV, level) / EV("ROB_MISC_EVENTS.PAUSE_INST", level) # Instructions per Software prefetch instruction (of any type: NTA/T0/T1/T2/Prefetch) (lower number means higher occurrence rate) def IpSWPF(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("SW_PREFETCH_ACCESS.T0:u0xF", level) self.thresh = (val < 100) return val # Total number of retired Instructions def Instructions(self, EV, level): return EV("INST_RETIRED.ANY", level) # Average number of Uops retired in cycles where at least one uop has retired. def Retire(self, EV, level): return Retired_Slots(self, EV, level) / EV("UOPS_RETIRED.RETIRE_SLOTS:c1", level) # Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate) def IpAssist(self, EV, level): val = EV("INST_RETIRED.ANY", level) / (EV("FP_ASSIST.ANY", level) + EV("OTHER_ASSISTS.ANY", level)) self.thresh = (val < 100000) return val def Execute(self, EV, level): return EV("UOPS_EXECUTED.THREAD", level) / Execute_Cycles(self, EV, level) # Average number of uops fetched from LSD per cycle def Fetch_LSD(self, EV, level): return EV("LSD.UOPS", level) / EV("LSD.CYCLES_ACTIVE", level) # Average number of uops fetched from DSB per cycle def Fetch_DSB(self, EV, level): return EV("IDQ.DSB_UOPS", level) / EV("IDQ.DSB_CYCLES_ANY", level) # Average number of uops fetched from MITE per cycle def Fetch_MITE(self, EV, level): return EV("IDQ.MITE_UOPS", level) / EV("IDQ.MITE_CYCLES", level) # Average number of Uops issued by front-end when it issued something def Fetch_UpC(self, EV, level): return EV("UOPS_ISSUED.ANY", level) / EV("UOPS_ISSUED.ANY:c1", level) # Fraction of Uops delivered by the LSD (Loop Stream Detector; aka Loop Cache) def LSD_Coverage(self, EV, level): return EV("LSD.UOPS", level) / Fetched_Uops(self, EV, level) # Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). See section 'Decoded ICache' in Optimization Manual. http://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-optimization-manual.html def DSB_Coverage(self, EV, level): val = EV("IDQ.DSB_UOPS", level) / Fetched_Uops(self, EV, level) self.thresh = (val < 0.7) and HighIPC(self, EV, 1) return val # Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details. def DSB_Switch_Cost(self, EV, level): return EV("DSB2MITE_SWITCHES.PENALTY_CYCLES", level) / EV("DSB2MITE_SWITCHES.COUNT", level) # Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck. def DSB_Misses(self, EV, level): val = 100 *(self.Fetch_Latency.compute(EV) * self.DSB_Switches.compute(EV) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV)) + self.Fetch_Bandwidth.compute(EV) * self.MITE.compute(EV) / (self.LSD.compute(EV) + self.MITE.compute(EV) + self.DSB.compute(EV))) self.thresh = (val > 10) return val # Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck. def DSB_Bandwidth(self, EV, level): val = 100 *(self.Frontend_Bound.compute(EV) * (self.Fetch_Bandwidth.compute(EV) / (self.Fetch_Bandwidth.compute(EV) + self.Fetch_Latency.compute(EV))) * (self.DSB.compute(EV) / (self.LSD.compute(EV) + self.MITE.compute(EV) + self.DSB.compute(EV)))) self.thresh = (val > 10) return val # Average Latency for L1 instruction cache misses def ICache_Miss_Latency(self, EV, level): return EV("ICACHE_16B.IFDATA_STALL", level) / EV("ICACHE_16B.IFDATA_STALL:c1:e1", level) + 2 # Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck. def IC_Misses(self, EV, level): val = 100 *(self.Fetch_Latency.compute(EV) * self.ICache_Misses.compute(EV) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV))) self.thresh = (val > 5) return val # Instructions per non-speculative DSB miss (lower number means higher occurrence rate) def IpDSB_Miss_Ret(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("FRONTEND_RETIRED.ANY_DSB_MISS", level) self.thresh = (val < 50) return val # Instructions per speculative Unknown Branch Misprediction (BAClear) (lower number means higher occurrence rate) def IpUnknown_Branch(self, EV, level): return Instructions(self, EV, level) / EV("BACLEARS.ANY", level) # L2 cache true code cacheline misses per kilo instruction def L2MPKI_Code(self, EV, level): return 1000 * EV("FRONTEND_RETIRED.L2_MISS", level) / EV("INST_RETIRED.ANY", level) # L2 cache speculative code cacheline misses per kilo instruction def L2MPKI_Code_All(self, EV, level): return 1000 * EV("L2_RQSTS.CODE_RD_MISS", level) / EV("INST_RETIRED.ANY", level) # Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate) def IpMispredict(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_MISP_RETIRED.ALL_BRANCHES", level) self.thresh = (val < 200) return val # Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate). def IpMisp_Indirect(self, EV, level): val = Instructions(self, EV, level) / (Retire_Fraction(self, EV, level) * EV("BR_MISP_EXEC.INDIRECT", level)) self.thresh = (val < 1000) return val # Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear) def Branch_Misprediction_Cost(self, EV, level): return Mispredictions(self, EV, level) * SLOTS(self, EV, level) / EV("BR_MISP_RETIRED.ALL_BRANCHES", level) / 100 # Speculative to Retired ratio of all clears (covering Mispredicts and nukes) def Spec_Clears_Ratio(self, EV, level): return EV("INT_MISC.CLEARS_COUNT", level) / (EV("BR_MISP_RETIRED.ALL_BRANCHES", level) + EV("MACHINE_CLEARS.COUNT", level)) # Fraction of branches that are non-taken conditionals def Cond_NT(self, EV, level): return EV("BR_INST_RETIRED.NOT_TAKEN", level) / EV("BR_INST_RETIRED.ALL_BRANCHES", level) # Fraction of branches that are taken conditionals def Cond_TK(self, EV, level): return (EV("BR_INST_RETIRED.CONDITIONAL", level) - EV("BR_INST_RETIRED.NOT_TAKEN", level)) / EV("BR_INST_RETIRED.ALL_BRANCHES", level) # Fraction of branches that are CALL or RET def CallRet(self, EV, level): return (EV("BR_INST_RETIRED.NEAR_CALL", level) + EV("BR_INST_RETIRED.NEAR_RETURN", level)) / EV("BR_INST_RETIRED.ALL_BRANCHES", level) # Fraction of branches that are unconditional (direct or indirect) jumps def Jump(self, EV, level): return Br_DoI_Jumps(self, EV, level) / EV("BR_INST_RETIRED.ALL_BRANCHES", level) # Actual Average Latency for L1 data-cache miss demand load operations (in core cycles) def Load_Miss_Real_Latency(self, EV, level): return EV("L1D_PEND_MISS.PENDING", level) / (EV("MEM_LOAD_RETIRED.L1_MISS", level) + EV("MEM_LOAD_RETIRED.FB_HIT", level)) # Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor) def MLP(self, EV, level): return EV("L1D_PEND_MISS.PENDING", level) / EV("L1D_PEND_MISS.PENDING_CYCLES", level) # L1 cache true misses per kilo instruction for retired demand loads def L1MPKI(self, EV, level): return 1000 * EV("MEM_LOAD_RETIRED.L1_MISS", level) / EV("INST_RETIRED.ANY", level) # L1 cache true misses per kilo instruction for all demand loads (including speculative) def L1MPKI_Load(self, EV, level): return 1000 * EV("L2_RQSTS.ALL_DEMAND_DATA_RD", level) / EV("INST_RETIRED.ANY", level) # L2 cache true misses per kilo instruction for retired demand loads def L2MPKI(self, EV, level): return 1000 * EV("MEM_LOAD_RETIRED.L2_MISS", level) / EV("INST_RETIRED.ANY", level) # L2 cache misses per kilo instruction for all request types (including speculative) def L2MPKI_All(self, EV, level): return 1000 * EV("L2_RQSTS.MISS", level) / EV("INST_RETIRED.ANY", level) # L2 cache misses per kilo instruction for all demand loads (including speculative) def L2MPKI_Load(self, EV, level): return 1000 * EV("L2_RQSTS.DEMAND_DATA_RD_MISS", level) / EV("INST_RETIRED.ANY", level) # Offcore requests (L2 cache miss) per kilo instruction for demand RFOs def L2MPKI_RFO(self, EV, level): return 1000 * EV("OFFCORE_REQUESTS.DEMAND_RFO", level) / EV("INST_RETIRED.ANY", level) # L2 cache hits per kilo instruction for all request types (including speculative) def L2HPKI_All(self, EV, level): return 1000 *(EV("L2_RQSTS.REFERENCES", level) - EV("L2_RQSTS.MISS", level)) / EV("INST_RETIRED.ANY", level) # L2 cache hits per kilo instruction for all demand loads (including speculative) def L2HPKI_Load(self, EV, level): return 1000 * EV("L2_RQSTS.DEMAND_DATA_RD_HIT", level) / EV("INST_RETIRED.ANY", level) # L3 cache true misses per kilo instruction for retired demand loads def L3MPKI(self, EV, level): return 1000 * EV("MEM_LOAD_RETIRED.L3_MISS", level) / EV("INST_RETIRED.ANY", level) # Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries) def FB_HPKI(self, EV, level): return 1000 * EV("MEM_LOAD_RETIRED.FB_HIT", level) / EV("INST_RETIRED.ANY", level) def L1D_Cache_Fill_BW(self, EV, level): return 64 * EV("L1D.REPLACEMENT", level) / OneBillion / Time(self, EV, level) def L2_Cache_Fill_BW(self, EV, level): return 64 * EV("L2_LINES_IN.ALL", level) / OneBillion / Time(self, EV, level) def L3_Cache_Fill_BW(self, EV, level): return 64 * EV("LONGEST_LAT_CACHE.MISS", level) / OneBillion / Time(self, EV, level) def L3_Cache_Access_BW(self, EV, level): return 64 * EV("OFFCORE_REQUESTS.ALL_REQUESTS", level) / OneBillion / Time(self, EV, level) # Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses def Page_Walks_Utilization(self, EV, level): val = (EV("ITLB_MISSES.WALK_PENDING", level) + EV("DTLB_LOAD_MISSES.WALK_PENDING", level) + EV("DTLB_STORE_MISSES.WALK_PENDING", level) + EV("EPT.WALK_PENDING", level)) / (2 * CORE_CLKS(self, EV, level)) self.thresh = (val > 0.5) return val # STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk) def Code_STLB_MPKI(self, EV, level): return 1000 * EV("ITLB_MISSES.WALK_COMPLETED", level) / EV("INST_RETIRED.ANY", level) # STLB (2nd level TLB) data load speculative misses per kilo instruction (misses of any page-size that complete the page walk) def Load_STLB_MPKI(self, EV, level): return 1000 * EV("DTLB_LOAD_MISSES.WALK_COMPLETED", level) / EV("INST_RETIRED.ANY", level) # STLB (2nd level TLB) data store speculative misses per kilo instruction (misses of any page-size that complete the page walk) def Store_STLB_MPKI(self, EV, level): return 1000 * EV("DTLB_STORE_MISSES.WALK_COMPLETED", level) / EV("INST_RETIRED.ANY", level) # Average per-core data fill bandwidth to the L1 data cache [GB / sec] def L1D_Cache_Fill_BW_2T(self, EV, level): return L1D_Cache_Fill_BW(self, EV, level) # Average per-core data fill bandwidth to the L2 cache [GB / sec] def L2_Cache_Fill_BW_2T(self, EV, level): return L2_Cache_Fill_BW(self, EV, level) # Average per-core data fill bandwidth to the L3 cache [GB / sec] def L3_Cache_Fill_BW_2T(self, EV, level): return L3_Cache_Fill_BW(self, EV, level) # Average per-core data access bandwidth to the L3 cache [GB / sec] def L3_Cache_Access_BW_2T(self, EV, level): return L3_Cache_Access_BW(self, EV, level) # Average Latency for L2 cache miss demand Loads def Load_L2_Miss_Latency(self, EV, level): return EV("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD", level) / EV("OFFCORE_REQUESTS.DEMAND_DATA_RD", level) # Average Parallel L2 cache miss demand Loads def Load_L2_MLP(self, EV, level): return EV("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD", level) / EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD", level) # Average Parallel L2 cache miss data reads def Data_L2_MLP(self, EV, level): return EV("OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD", level) / EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", level) # Un-cacheable retired load per kilo instruction def UC_Load_PKI(self, EV, level): return 1000 * EV("MEM_LOAD_MISC_RETIRED.UC", level) / EV("INST_RETIRED.ANY", level) # Average CPU Utilization (percentage) def CPU_Utilization(self, EV, level): return CPUs_Utilized(self, EV, level) / Num_CPUs(self, EV, level) # Average number of utilized CPUs def CPUs_Utilized(self, EV, level): return EV("CPU_CLK_UNHALTED.REF_TSC", level) / EV("msr/tsc/", 0) # Measured Average Core Frequency for unhalted processors [GHz] def Core_Frequency(self, EV, level): return Turbo_Utilization(self, EV, level) * EV("msr/tsc/", 0) / OneBillion / Time(self, EV, level) # Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width def GFLOPs(self, EV, level): return (FLOP_Count(self, EV, level) / OneBillion) / Time(self, EV, level) # Average Frequency Utilization relative nominal frequency def Turbo_Utilization(self, EV, level): return CLKS(self, EV, level) / EV("CPU_CLK_UNHALTED.REF_TSC", level) # Fraction of cycles where both hardware Logical Processors were active def SMT_2T_Utilization(self, EV, level): return 1 - EV("CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE", level) / (EV("CPU_CLK_UNHALTED.REF_XCLK_ANY", level) / 2) if smt_enabled else 0 # Fraction of cycles spent in the Operating System (OS) Kernel mode def Kernel_Utilization(self, EV, level): val = EV("CPU_CLK_UNHALTED.THREAD_P:SUP", level) / EV("CPU_CLK_UNHALTED.THREAD", level) self.thresh = (val > 0.05) return val # Cycles Per Instruction for the Operating System (OS) Kernel mode def Kernel_CPI(self, EV, level): return EV("CPU_CLK_UNHALTED.THREAD_P:SUP", level) / EV("INST_RETIRED.ANY_P:SUP", level) # Average external Memory Bandwidth Use for reads and writes [GB / sec] def DRAM_BW_Use(self, EV, level): return 64 *(EV("UNC_ARB_TRK_REQUESTS.ALL", level) + EV("UNC_ARB_COH_TRK_REQUESTS.ALL", level)) / OneMillion / Time(self, EV, level) / 1000 # Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches. def MEM_Read_Latency(self, EV, level): return OneBillion *(EV("UNC_ARB_TRK_OCCUPANCY.DATA_READ", level) / EV("UNC_ARB_TRK_REQUESTS.DATA_READ", level)) / (Socket_CLKS(self, EV, level) / Time(self, EV, level)) # Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches def MEM_Parallel_Reads(self, EV, level): return EV("UNC_ARB_TRK_OCCUPANCY.DATA_READ", level) / EV("UNC_ARB_TRK_OCCUPANCY.DATA_READ:c1", level) # Total package Power in Watts def Power(self, EV, level): return EV("UNC_PKG_ENERGY_STATUS", level) * Energy_Unit /(Time(self, EV, level) * OneMillion ) # Run duration time in seconds def Time(self, EV, level): val = EV("interval-s", 0) self.thresh = (val < 1) return val # Socket actual clocks when any core is active on that socket def Socket_CLKS(self, EV, level): return EV("UNC_CLOCK.SOCKET", level) # Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate] def IpFarBranch(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.FAR_BRANCH:USER", level) self.thresh = (val < 1000000) return val # Event groups class Frontend_Bound: name = "Frontend_Bound" domain = "Slots" area = "FE" level = 1 htoff = False sample = ['FRONTEND_RETIRED.LATENCY_GE_4:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvFB', 'BvIO', 'TmaL1', 'PGO']) maxval = None def compute(self, EV): try: self.val = EV("IDQ_UOPS_NOT_DELIVERED.CORE", 1) / SLOTS(self, EV, 1) self.thresh = (self.val > 0.15) except ZeroDivisionError: handle_error(self, "Frontend_Bound zero division") return self.val desc = """ This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.""" class Fetch_Latency: name = "Fetch_Latency" domain = "Slots" area = "FE" level = 2 htoff = False sample = ['FRONTEND_RETIRED.LATENCY_GE_16:pp', 'FRONTEND_RETIRED.LATENCY_GE_8:pp'] errcount = 0 sibling = None metricgroup = frozenset(['Frontend', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = Pipeline_Width * EV("IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE", 2) / SLOTS(self, EV, 2) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Fetch_Latency zero division") return self.val desc = """ This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction- cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period.""" class ICache_Misses: name = "ICache_Misses" domain = "Clocks" area = "FE" level = 3 htoff = False sample = ['FRONTEND_RETIRED.L2_MISS:pp', 'FRONTEND_RETIRED.L1I_MISS:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BigFootprint', 'BvBC', 'FetchLat', 'IcMiss']) maxval = None def compute(self, EV): try: self.val = (EV("ICACHE_16B.IFDATA_STALL", 3) + 2 * EV("ICACHE_16B.IFDATA_STALL:c1:e1", 3)) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "ICache_Misses zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to instruction cache misses.. Using compiler's Profile-Guided Optimization (PGO) can reduce i-cache misses through improved hot code layout.""" class ITLB_Misses: name = "ITLB_Misses" domain = "Clocks" area = "FE" level = 3 htoff = False sample = ['FRONTEND_RETIRED.STLB_MISS:pp', 'FRONTEND_RETIRED.ITLB_MISS:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BigFootprint', 'BvBC', 'FetchLat', 'MemoryTLB']) maxval = None def compute(self, EV): try: self.val = EV("ICACHE_TAG.STALLS", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "ITLB_Misses zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses.. Consider large 2M pages for code (selectively prefer hot large-size function, due to limited 2M entries). Linux options: standard binaries use libhugetlbfs; Hfsort.. https://github. com/libhugetlbfs/libhugetlbfs;https://research.fb.com/public ations/optimizing-function-placement-for-large-scale-data- center-applications-2/""" class Branch_Resteers: name = "Branch_Resteers" domain = "Clocks" area = "FE" level = 3 htoff = False sample = ['BR_MISP_RETIRED.ALL_BRANCHES'] errcount = 0 sibling = None metricgroup = frozenset(['FetchLat']) maxval = None def compute(self, EV): try: self.val = EV("INT_MISC.CLEAR_RESTEER_CYCLES", 3) / CLKS(self, EV, 3) + self.Unknown_Branches.compute(EV) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Branch_Resteers zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings.""" class Mispredicts_Resteers: name = "Mispredicts_Resteers" domain = "Clocks" area = "FE" level = 4 htoff = False sample = ['INT_MISC.CLEAR_RESTEER_CYCLES'] errcount = 0 sibling = None metricgroup = frozenset(['BadSpec', 'BrMispredicts', 'BvMP']) maxval = None def compute(self, EV): try: self.val = Mispred_Clears_Fraction(self, EV, 4) * EV("INT_MISC.CLEAR_RESTEER_CYCLES", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Mispredicts_Resteers zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage.""" class Clears_Resteers: name = "Clears_Resteers" domain = "Clocks" area = "FE" level = 4 htoff = False sample = ['INT_MISC.CLEAR_RESTEER_CYCLES'] errcount = 0 sibling = None metricgroup = frozenset(['BadSpec', 'MachineClears']) maxval = None def compute(self, EV): try: self.val = (1 - Mispred_Clears_Fraction(self, EV, 4)) * EV("INT_MISC.CLEAR_RESTEER_CYCLES", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Clears_Resteers zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears.""" class Unknown_Branches: name = "Unknown_Branches" domain = "Clocks" area = "FE" level = 4 htoff = False sample = ['BACLEARS.ANY'] errcount = 0 sibling = None metricgroup = frozenset(['BigFootprint', 'BvBC', 'FetchLat']) maxval = None def compute(self, EV): try: self.val = BAClear_Cost * EV("BACLEARS.ANY", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Unknown_Branches zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches""" class MS_Switches: name = "MS_Switches" domain = "Clocks_Estimated" area = "FE" level = 3 htoff = False sample = ['IDQ.MS_SWITCHES'] errcount = 0 sibling = None metricgroup = frozenset(['FetchLat', 'MicroSeq']) maxval = 1.0 def compute(self, EV): try: self.val = MS_Switches_Cost * EV("IDQ.MS_SWITCHES", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "MS_Switches zero division") return self.val desc = """ This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals.""" class LCP: name = "LCP" domain = "Clocks" area = "FE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['FetchLat']) maxval = None def compute(self, EV): try: self.val = EV("DECODE.LCP", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "LCP zero division") return self.val desc = """ This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this.""" class DSB_Switches: name = "DSB_Switches" domain = "Clocks" area = "FE" level = 3 htoff = False sample = ['FRONTEND_RETIRED.DSB_MISS:pp'] errcount = 0 sibling = None metricgroup = frozenset(['DSBmiss', 'FetchLat']) maxval = None def compute(self, EV): try: self.val = EV("DSB2MITE_SWITCHES.PENALTY_CYCLES", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "DSB_Switches zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty.. See section 'Optimization for Decoded Icache' in Optimization Manual:. http://www.intel.com/content/www/us/en /architecture-and-technology/64-ia-32-architectures- optimization-manual.html""" class Fetch_Bandwidth: name = "Fetch_Bandwidth" domain = "Slots" area = "FE" level = 2 htoff = False sample = ['FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1:pp', 'FRONTEND_RETIRED.LATENCY_GE_1:pp', 'FRONTEND_RETIRED.LATENCY_GE_2:pp'] errcount = 0 sibling = None metricgroup = frozenset(['FetchBW', 'Frontend', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = self.Frontend_Bound.compute(EV) - self.Fetch_Latency.compute(EV) self.thresh = (self.val > 0.2) except ZeroDivisionError: handle_error(self, "Fetch_Bandwidth zero division") return self.val desc = """ This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend.""" class MITE: name = "MITE" domain = "Slots_Estimated" area = "FE" level = 3 htoff = False sample = ['FRONTEND_RETIRED.ANY_DSB_MISS'] errcount = 0 sibling = None metricgroup = frozenset(['DSBmiss', 'FetchBW']) maxval = None def compute(self, EV): try: self.val = (EV("IDQ.ALL_MITE_CYCLES_ANY_UOPS", 3) - EV("IDQ.ALL_MITE_CYCLES_4_UOPS", 3)) / CORE_CLKS(self, EV, 3) / 2 self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "MITE zero division") return self.val desc = """ This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck.. Consider tuning codegen of 'small hotspots' that can fit in DSB. Read about 'Decoded ICache' in Optimization Manual:. http://www.intel.com/content/www/us/en /architecture-and-technology/64-ia-32-architectures- optimization-manual.html""" class Decoder0_Alone: name = "Decoder0_Alone" domain = "Slots_Estimated" area = "FE" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['DSBmiss', 'FetchBW']) maxval = None def compute(self, EV): try: self.val = (EV("INST_DECODED.DECODERS:c1", 4) - EV("INST_DECODED.DECODERS:c2", 4)) / CORE_CLKS(self, EV, 4) / 2 self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Decoder0_Alone zero division") return self.val desc = """ This metric represents fraction of cycles where decoder-0 was the only active decoder""" class DSB: name = "DSB" domain = "Slots_Estimated" area = "FE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['DSB', 'FetchBW']) maxval = None def compute(self, EV): try: self.val = (EV("IDQ.DSB_CYCLES_ANY", 3) - EV("IDQ.DSB_CYCLES_OK", 3)) / CORE_CLKS(self, EV, 3) / 2 self.thresh = (self.val > 0.15) and self.parent.thresh except ZeroDivisionError: handle_error(self, "DSB zero division") return self.val desc = """ This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.""" class LSD: name = "LSD" domain = "Slots_Estimated" area = "FE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['FetchBW', 'LSD']) maxval = None def compute(self, EV): try: self.val = (EV("LSD.CYCLES_ACTIVE", 3) - EV("LSD.CYCLES_OK", 3)) / CORE_CLKS(self, EV, 3) / 2 self.thresh = (self.val > 0.15) and self.parent.thresh except ZeroDivisionError: handle_error(self, "LSD zero division") return self.val desc = """ This metric represents Core fraction of cycles in which CPU was likely limited due to LSD (Loop Stream Detector) unit. LSD typically does well sustaining Uop supply. However; in some rare cases; optimal uop-delivery could not be reached for small loops whose size (in terms of number of uops) does not suit well the LSD structure.""" class Bad_Speculation: name = "Bad_Speculation" domain = "Slots" area = "BAD" level = 1 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['TmaL1']) maxval = None def compute(self, EV): try: self.val = (EV("UOPS_ISSUED.ANY", 1) - Retired_Slots(self, EV, 1) + Pipeline_Width * Recovery_Cycles(self, EV, 1)) / SLOTS(self, EV, 1) self.thresh = (self.val > 0.15) except ZeroDivisionError: handle_error(self, "Bad_Speculation zero division") return self.val desc = """ This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss- predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.""" class Branch_Mispredicts: name = "Branch_Mispredicts" domain = "Slots" area = "BAD" level = 2 htoff = False sample = ['BR_MISP_RETIRED.ALL_BRANCHES:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BadSpec', 'BrMispredicts', 'BvMP', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = Mispred_Clears_Fraction(self, EV, 2) * self.Bad_Speculation.compute(EV) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Branch_Mispredicts zero division") return self.val desc = """ This metric represents fraction of slots the CPU has wasted due to Branch Misprediction. These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path.. Using profile feedback in the compiler may help. Please see the Optimization Manual for general strategies for addressing branch misprediction issues.. http://www.intel.com/content/www/us/en/architecture-and- technology/64-ia-32-architectures-optimization-manual.html""" class Other_Mispredicts: name = "Other_Mispredicts" domain = "Slots" area = "BAD" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvIO', 'BrMispredicts']) maxval = None def compute(self, EV): try: self.val = max(self.Branch_Mispredicts.compute(EV) * (1 - EV("BR_MISP_RETIRED.ALL_BRANCHES", 3) / (EV("INT_MISC.CLEARS_COUNT", 3) - EV("MACHINE_CLEARS.COUNT", 3))) , 0.0001 ) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Other_Mispredicts zero division") return self.val desc = """ This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types).""" class Machine_Clears: name = "Machine_Clears" domain = "Slots" area = "BAD" level = 2 htoff = False sample = ['MACHINE_CLEARS.COUNT'] errcount = 0 sibling = None metricgroup = frozenset(['BadSpec', 'BvMS', 'MachineClears', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = self.Bad_Speculation.compute(EV) - self.Branch_Mispredicts.compute(EV) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Machine_Clears zero division") return self.val desc = """ This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes.. See \"Memory Disambiguation\" in Optimization Manual and:. https://software.intel.com/sites/default/files/ m/d/4/1/d/8/sma.pdf""" class Other_Nukes: name = "Other_Nukes" domain = "Slots" area = "BAD" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvIO', 'Machine_Clears']) maxval = None def compute(self, EV): try: self.val = max(self.Machine_Clears.compute(EV) * (1 - EV("MACHINE_CLEARS.MEMORY_ORDERING", 3) / EV("MACHINE_CLEARS.COUNT", 3)) , 0.0001 ) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Other_Nukes zero division") return self.val desc = """ This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering.""" class Backend_Bound: name = "Backend_Bound" domain = "Slots" area = "BE" level = 1 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvOB', 'TmaL1']) maxval = None def compute(self, EV): try: self.val = 1 - self.Frontend_Bound.compute(EV) - (EV("UOPS_ISSUED.ANY", 1) + Pipeline_Width * Recovery_Cycles(self, EV, 1)) / SLOTS(self, EV, 1) self.thresh = (self.val > 0.2) except ZeroDivisionError: handle_error(self, "Backend_Bound zero division") return self.val desc = """ This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.""" class Memory_Bound: name = "Memory_Bound" domain = "Slots" area = "BE/Mem" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Backend', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = Memory_Bound_Fraction(self, EV, 2) * self.Backend_Bound.compute(EV) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Memory_Bound zero division") return self.val desc = """ This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).""" class L1_Bound: name = "L1_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = ['MEM_LOAD_RETIRED.L1_HIT:pp', 'MEM_LOAD_RETIRED.FB_HIT:pp'] errcount = 0 sibling = None metricgroup = frozenset(['CacheHits', 'MemoryBound', 'TmaL3mem']) maxval = None def compute(self, EV): try: self.val = max((EV("CYCLE_ACTIVITY.STALLS_MEM_ANY", 3) - EV("CYCLE_ACTIVITY.STALLS_L1D_MISS", 3)) / CLKS(self, EV, 3) , 0 ) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "L1_Bound zero division") return self.val desc = """ This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache.""" class DTLB_Load: name = "DTLB_Load" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_INST_RETIRED.STLB_MISS_LOADS:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvMT', 'MemoryTLB']) maxval = 1.0 def compute(self, EV): try: self.val = min(Mem_STLB_Hit_Cost * EV("DTLB_LOAD_MISSES.STLB_HIT:c1", 4) + EV("DTLB_LOAD_MISSES.WALK_ACTIVE", 4) , max(EV("CYCLE_ACTIVITY.CYCLES_MEM_ANY", 4) - EV("CYCLE_ACTIVITY.CYCLES_L1D_MISS", 4) , 0)) / CLKS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "DTLB_Load zero division") return self.val desc = """ This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss..""" class Load_STLB_Hit: name = "Load_STLB_Hit" domain = "Clocks_Estimated" area = "BE/Mem" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['MemoryTLB']) maxval = 1.0 def compute(self, EV): try: self.val = self.DTLB_Load.compute(EV) - self.Load_STLB_Miss.compute(EV) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Load_STLB_Hit zero division") return self.val desc = """ This metric roughly estimates the fraction of cycles where the (first level) DTLB was missed by load accesses, that later on hit in second-level TLB (STLB)""" class Load_STLB_Miss: name = "Load_STLB_Miss" domain = "Clocks_Calculated" area = "BE/Mem" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['MemoryTLB']) maxval = 1.0 def compute(self, EV): try: self.val = EV("DTLB_LOAD_MISSES.WALK_ACTIVE", 5) / CLKS(self, EV, 5) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Load_STLB_Miss zero division") return self.val desc = """ This metric estimates the fraction of cycles where the Second-level TLB (STLB) was missed by load accesses, performing a hardware page walk""" class Store_Fwd_Blk: name = "Store_Fwd_Blk" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = 1.0 def compute(self, EV): try: self.val = 13 * EV("LD_BLOCKS.STORE_FORWARD", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Store_Fwd_Blk zero division") return self.val desc = """ This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading.""" class L1_Hit_Latency: name = "L1_Hit_Latency" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_LOAD_RETIRED.L1_HIT'] errcount = 0 sibling = None metricgroup = frozenset(['BvML', 'MemoryLat']) maxval = 1.0 def compute(self, EV): try: self.val = min(2 *(EV("MEM_INST_RETIRED.ALL_LOADS", 4) - EV("MEM_LOAD_RETIRED.FB_HIT", 4) - EV("MEM_LOAD_RETIRED.L1_MISS", 4)) * Dependent_Loads_Weight(self, EV, 4) / 100 , max(EV("CYCLE_ACTIVITY.CYCLES_MEM_ANY", 4) - EV("CYCLE_ACTIVITY.CYCLES_L1D_MISS", 4) , 0)) / CLKS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "L1_Hit_Latency zero division") return self.val desc = """ This metric roughly estimates fraction of cycles with demand load accesses that hit the L1 cache. The short latency of the L1 data cache may be exposed in pointer-chasing memory access patterns as an example.""" class Lock_Latency: name = "Lock_Latency" domain = "Clocks" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_INST_RETIRED.LOCK_LOADS'] errcount = 0 sibling = None metricgroup = frozenset(['Offcore']) maxval = 1.0 def compute(self, EV): try: self.val = (12 * max(0 , EV("MEM_INST_RETIRED.LOCK_LOADS", 4) - EV("L2_RQSTS.ALL_RFO", 4)) + Mem_Lock_St_Fraction(self, EV, 4) * (Mem_L2_Store_Cost * EV("L2_RQSTS.RFO_HIT", 4) + ORO_Demand_RFO_C1(self, EV, 4))) / CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Lock_Latency zero division") return self.val desc = """ This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them.""" class Split_Loads: name = "Split_Loads" domain = "Clocks_Calculated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_INST_RETIRED.SPLIT_LOADS:pp'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = 1.0 def compute(self, EV): try: self.val = Load_Miss_Real_Latency(self, EV, 4) * EV("LD_BLOCKS.NO_SR", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Split_Loads zero division") return self.val desc = """ This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. . Consider aligning data or hot structure fields. See the Optimization Manual for more details""" class G4K_Aliasing: name = "4K_Aliasing" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("LD_BLOCKS_PARTIAL.ADDRESS_ALIAS", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "G4K_Aliasing zero division") return self.val desc = """ This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset. False match is possible; which incur a few cycles load re-issue. However; the short re-issue duration is often hidden by the out-of-order core and HW optimizations; hence a user may safely ignore a high value of this metric unless it manages to propagate up into parent nodes of the hierarchy (e.g. to L1_Bound).. Consider reducing independent loads/stores accesses with 4K offsets. See the Optimization Manual for more details""" class FB_Full: name = "FB_Full" domain = "Clocks_Calculated" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvMB', 'MemoryBW']) maxval = None def compute(self, EV): try: self.val = Load_Miss_Real_Latency(self, EV, 4) * EV("L1D_PEND_MISS.FB_FULL:c1", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.3) except ZeroDivisionError: handle_error(self, "FB_Full zero division") return self.val desc = """ This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory).. See $issueBW and $issueSL hints. Avoid software prefetches if indeed memory BW limited.""" class L2_Bound: name = "L2_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = ['MEM_LOAD_RETIRED.L2_HIT:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvML', 'CacheHits', 'MemoryBound', 'TmaL3mem']) maxval = None def compute(self, EV): try: self.val = (LOAD_L2_HIT(self, EV, 3) / (LOAD_L2_HIT(self, EV, 3) + EV("L1D_PEND_MISS.FB_FULL:c1", 3))) * L2_Bound_Ratio(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "L2_Bound zero division") return self.val desc = """ This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance.""" class L3_Bound: name = "L3_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = ['MEM_LOAD_RETIRED.L3_HIT:pp'] errcount = 0 sibling = None metricgroup = frozenset(['CacheHits', 'MemoryBound', 'TmaL3mem']) maxval = None def compute(self, EV): try: self.val = (EV("CYCLE_ACTIVITY.STALLS_L2_MISS", 3) - EV("CYCLE_ACTIVITY.STALLS_L3_MISS", 3)) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "L3_Bound zero division") return self.val desc = """ This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance.""" class Contested_Accesses: name = "Contested_Accesses" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM:pp', 'MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvMS', 'DataSharing', 'Offcore', 'Snoop']) maxval = 1.0 def compute(self, EV): try: self.val = ((Mem_XSNP_HitM_Cost(self, EV, 4) - Mem_L2_Hit_Cost(self, EV, 4)) * LOAD_XSNP_HITM(self, EV, 4) + (Mem_XSNP_Hit_Cost(self, EV, 4) - Mem_L2_Hit_Cost(self, EV, 4)) * LOAD_XSNP_MISS(self, EV, 4)) * FB_Factor(self, EV, 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Contested_Accesses zero division") return self.val desc = """ This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing.""" class Data_Sharing: name = "Data_Sharing" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvMS', 'Offcore', 'Snoop']) maxval = 1.0 def compute(self, EV): try: self.val = (Mem_XSNP_Hit_Cost(self, EV, 4) - Mem_L2_Hit_Cost(self, EV, 4)) * LOAD_XSNP_HIT(self, EV, 4) * FB_Factor(self, EV, 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Data_Sharing zero division") return self.val desc = """ This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance.""" class L3_Hit_Latency: name = "L3_Hit_Latency" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_LOAD_RETIRED.L3_HIT:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvML', 'MemoryLat']) maxval = 1.0 def compute(self, EV): try: self.val = (Mem_XSNP_None_Cost(self, EV, 4) - Mem_L2_Hit_Cost(self, EV, 4)) * LOAD_L3_HIT(self, EV, 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "L3_Hit_Latency zero division") return self.val desc = """ This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings.""" class SQ_Full: name = "SQ_Full" domain = "Clocks" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvMB', 'MemoryBW', 'Offcore']) maxval = None def compute(self, EV): try: self.val = SQ_Full_Cycles(self, EV, 4) / CORE_CLKS(self, EV, 4) self.thresh = (self.val > 0.3) and self.parent.thresh except ZeroDivisionError: handle_error(self, "SQ_Full zero division") return self.val desc = """ This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors).""" class DRAM_Bound: name = "DRAM_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = ['MEM_LOAD_RETIRED.L3_MISS:pp'] errcount = 0 sibling = None metricgroup = frozenset(['MemoryBound', 'TmaL3mem']) maxval = 1.0 def compute(self, EV): try: self.val = MEM_Bound_Ratio(self, EV, 3) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "DRAM_Bound zero division") return self.val desc = """ This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance.""" class MEM_Bandwidth: name = "MEM_Bandwidth" domain = "Clocks" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvMB', 'MemoryBW', 'Offcore']) maxval = None def compute(self, EV): try: self.val = ORO_DRD_BW_Cycles(self, EV, 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "MEM_Bandwidth zero division") return self.val desc = """ This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that).. Improve data accesses to reduce cacheline transfers from/to memory. Examples: 1) Consume all bytes of a each cacheline before it is evicted (e.g. reorder structure elements and split non-hot ones), 2) merge computed-limited with BW-limited loops, 3) NUMA optimizations in multi-socket system. Note: software prefetches will not help BW-limited application..""" class MEM_Latency: name = "MEM_Latency" domain = "Clocks" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvML', 'MemoryLat', 'Offcore']) maxval = None def compute(self, EV): try: self.val = ORO_DRD_Any_Cycles(self, EV, 4) / CLKS(self, EV, 4) - self.MEM_Bandwidth.compute(EV) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "MEM_Latency zero division") return self.val desc = """ This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that).. Improve data accesses or interleave them with compute. Examples: 1) Data layout re-structuring, 2) Software Prefetches (also through the compiler)..""" class Store_Bound: name = "Store_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = ['MEM_INST_RETIRED.ALL_STORES:pp'] errcount = 0 sibling = None metricgroup = frozenset(['MemoryBound', 'TmaL3mem']) maxval = None def compute(self, EV): try: self.val = EV("EXE_ACTIVITY.BOUND_ON_STORES", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Store_Bound zero division") return self.val desc = """ This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck.""" class Store_Latency: name = "Store_Latency" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvML', 'MemoryLat', 'Offcore']) maxval = 1.0 def compute(self, EV): try: self.val = (Store_L2_Hit_Cycles(self, EV, 4) + (1 - Mem_Lock_St_Fraction(self, EV, 4)) * ORO_Demand_RFO_C1(self, EV, 4)) / CLKS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Store_Latency zero division") return self.val desc = """ This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Consider to avoid/reduce unnecessary (or easily load-able/computable) memory store.""" class False_Sharing: name = "False_Sharing" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM:pp', 'OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM'] errcount = 0 sibling = None metricgroup = frozenset(['BvMS', 'DataSharing', 'Offcore', 'Snoop']) maxval = 1.0 def compute(self, EV): try: self.val = Mem_XSNP_HitM_Cost(self, EV, 4) * OCR_all_rfo_l3_hit_snoop_hitm(self, EV, 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "False_Sharing zero division") return self.val desc = """ This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. . False Sharing can be easily avoided by padding to make Logical Processors access different lines.""" class Split_Stores: name = "Split_Stores" domain = "Core_Utilization" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_INST_RETIRED.SPLIT_STORES:pp'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("MEM_INST_RETIRED.SPLIT_STORES", 4) / CORE_CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Split_Stores zero division") return self.val desc = """ This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity.""" class DTLB_Store: name = "DTLB_Store" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_INST_RETIRED.STLB_MISS_STORES:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvMT', 'MemoryTLB']) maxval = 1.0 def compute(self, EV): try: self.val = (Mem_STLB_Hit_Cost * EV("DTLB_STORE_MISSES.STLB_HIT:c1", 4) + EV("DTLB_STORE_MISSES.WALK_ACTIVE", 4)) / CORE_CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "DTLB_Store zero division") return self.val desc = """ This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently- used data.""" class Store_STLB_Hit: name = "Store_STLB_Hit" domain = "Clocks_Estimated" area = "BE/Mem" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['MemoryTLB']) maxval = 1.0 def compute(self, EV): try: self.val = self.DTLB_Store.compute(EV) - self.Store_STLB_Miss.compute(EV) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Store_STLB_Hit zero division") return self.val desc = """ This metric roughly estimates the fraction of cycles where the TLB was missed by store accesses, hitting in the second- level TLB (STLB)""" class Store_STLB_Miss: name = "Store_STLB_Miss" domain = "Clocks_Calculated" area = "BE/Mem" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['MemoryTLB']) maxval = 1.0 def compute(self, EV): try: self.val = EV("DTLB_STORE_MISSES.WALK_ACTIVE", 5) / CORE_CLKS(self, EV, 5) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Store_STLB_Miss zero division") return self.val desc = """ This metric estimates the fraction of cycles where the STLB was missed by store accesses, performing a hardware page walk""" class Core_Bound: name = "Core_Bound" domain = "Slots" area = "BE/Core" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Backend', 'TmaL2', 'Compute']) maxval = None def compute(self, EV): try: self.val = self.Backend_Bound.compute(EV) - self.Memory_Bound.compute(EV) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Core_Bound zero division") return self.val desc = """ This metric represents fraction of slots where Core non- memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).. Tip: consider Port Saturation analysis as next step.""" class Divider: name = "Divider" domain = "Clocks" area = "BE/Core" level = 3 htoff = False sample = ['ARITH.DIVIDER_ACTIVE'] errcount = 0 sibling = None metricgroup = frozenset(['BvCB']) maxval = 1.0 def compute(self, EV): try: self.val = EV("ARITH.DIVIDER_ACTIVE", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Divider zero division") return self.val desc = """ This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication.""" class Serializing_Operation: name = "Serializing_Operation" domain = "Clocks" area = "BE/Core" level = 3 htoff = False sample = ['PARTIAL_RAT_STALLS.SCOREBOARD'] errcount = 0 sibling = None metricgroup = frozenset(['BvIO', 'PortsUtil']) maxval = None def compute(self, EV): try: self.val = EV("PARTIAL_RAT_STALLS.SCOREBOARD", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Serializing_Operation zero division") return self.val desc = """ This metric represents fraction of cycles the CPU issue- pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out- of-order execution which may limit performance.""" class Slow_Pause: name = "Slow_Pause" domain = "Clocks" area = "BE/Core" level = 4 htoff = False sample = ['ROB_MISC_EVENTS.PAUSE_INST'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = 140 * EV("ROB_MISC_EVENTS.PAUSE_INST", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Slow_Pause zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions.""" class Ports_Utilization: name = "Ports_Utilization" domain = "Clocks" area = "BE/Core" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['PortsUtil']) maxval = None def compute(self, EV): try: self.val = Core_Bound_Cycles(self, EV, 3) / CLKS(self, EV, 3) if (EV("ARITH.DIVIDER_ACTIVE", 3)<(EV("CYCLE_ACTIVITY.STALLS_TOTAL", 3) - EV("CYCLE_ACTIVITY.STALLS_MEM_ANY", 3))) else Few_Uops_Executed_Threshold(self, EV, 3) / CLKS(self, EV, 3) EV("CYCLE_ACTIVITY.STALLS_MEM_ANY", 3) EV("ARITH.DIVIDER_ACTIVE", 3) EV("CYCLE_ACTIVITY.STALLS_TOTAL", 3) self.thresh = (self.val > 0.15) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Ports_Utilization zero division") return self.val desc = """ This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.. Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop.""" class Ports_Utilized_0: name = "Ports_Utilized_0" domain = "Clocks" area = "BE/Core" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['PortsUtil']) maxval = None def compute(self, EV): try: self.val = EV("EXE_ACTIVITY.EXE_BOUND_0_PORTS", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Ports_Utilized_0 zero division") return self.val desc = """ This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric.. Check assembly view and Appendix C in Optimization Manual to find out instructions with say 5 or more cycles latency.. http://www.intel.com/content/www/us/en/architecture-and- technology/64-ia-32-architectures-optimization-manual.html""" class Mixing_Vectors: name = "Mixing_Vectors" domain = "Clocks" area = "BE/Core" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = 1.0 def compute(self, EV): try: self.val = EV("UOPS_ISSUED.VECTOR_WIDTH_MISMATCH", 5) / EV("UOPS_ISSUED.ANY", 5) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "Mixing_Vectors zero division") return self.val desc = """ This metric estimates penalty in terms of percentage of injected blend uops out of all Uops Issued -- the Count Domain. Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic.""" class Ports_Utilized_1: name = "Ports_Utilized_1" domain = "Clocks" area = "BE/Core" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['PortsUtil']) maxval = None def compute(self, EV): try: self.val = Cycles_1_Port_Utilized(self, EV, 4) / CORE_CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Ports_Utilized_1 zero division") return self.val desc = """ This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). This can be due to heavy data-dependency among software instructions; or over oversubscribing a particular hardware resource. In some other cases with high 1_Port_Utilized and L1_Bound; this metric can point to L1 data-cache latency bottleneck that may not necessarily manifest with complete execution starvation (due to the short L1 latency e.g. walking a linked list) - looking at the assembly can be helpful.""" class Ports_Utilized_2: name = "Ports_Utilized_2" domain = "Clocks" area = "BE/Core" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['PortsUtil']) maxval = None def compute(self, EV): try: self.val = Cycles_2_Ports_Utilized(self, EV, 4) / CORE_CLKS(self, EV, 4) self.thresh = (self.val > 0.15) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Ports_Utilized_2 zero division") return self.val desc = """ This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto- Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop.""" class Ports_Utilized_3m: name = "Ports_Utilized_3m" domain = "Clocks" area = "BE/Core" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvCB', 'PortsUtil']) maxval = None def compute(self, EV): try: self.val = Cycles_3m_Ports_Utilized(self, EV, 4) / CORE_CLKS(self, EV, 4) self.thresh = (self.val > 0.4) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Ports_Utilized_3m zero division") return self.val desc = """ This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise).""" class ALU_Op_Utilization: name = "ALU_Op_Utilization" domain = "Core_Execution" area = "BE/Core" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = (EV("UOPS_DISPATCHED_PORT.PORT_0", 5) + EV("UOPS_DISPATCHED_PORT.PORT_1", 5) + EV("UOPS_DISPATCHED_PORT.PORT_5", 5) + EV("UOPS_DISPATCHED_PORT.PORT_6", 5)) / (4 * CORE_CLKS(self, EV, 5)) self.thresh = (self.val > 0.4) except ZeroDivisionError: handle_error(self, "ALU_Op_Utilization zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.""" class Port_0: name = "Port_0" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED_PORT.PORT_0'] errcount = 0 sibling = None metricgroup = frozenset(['Compute']) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_0", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_0 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ALU and 2nd branch""" class Port_1: name = "Port_1" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED_PORT.PORT_1'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_1", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_1 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU)""" class Port_5: name = "Port_5" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED_PORT.PORT_5'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_5", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_5 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ALU. See section 'Handling Port 5 Pressure' in Optimization Manual:. http://www.intel.com/content/www/us/en/architecture-and- technology/64-ia-32-architectures-optimization-manual.html""" class Port_6: name = "Port_6" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED_PORT.PORT_6'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_6", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_6 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 Primary Branch and simple ALU""" class Load_Op_Utilization: name = "Load_Op_Utilization" domain = "Core_Execution" area = "BE/Core" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = (EV("UOPS_DISPATCHED_PORT.PORT_2", 5) + EV("UOPS_DISPATCHED_PORT.PORT_3", 5) + EV("UOPS_DISPATCHED_PORT.PORT_7", 5) - EV("UOPS_DISPATCHED_PORT.PORT_4", 5)) / (2 * CORE_CLKS(self, EV, 5)) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Load_Op_Utilization zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations""" class Port_2: name = "Port_2" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED_PORT.PORT_2'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_2", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_2 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 2 Loads and Store-address""" class Port_3: name = "Port_3" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED_PORT.PORT_3'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_3", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_3 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 3 Loads and Store-address""" class Store_Op_Utilization: name = "Store_Op_Utilization" domain = "Core_Execution" area = "BE/Core" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_4", 5) / CORE_CLKS(self, EV, 5) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Store_Op_Utilization zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port for Store operations""" class Port_4: name = "Port_4" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED_PORT.PORT_4'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_4", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_4 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 4 (Store-data)""" class Port_7: name = "Port_7" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED_PORT.PORT_7'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_7", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_7 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 7 simple Store-address""" class Retiring: name = "Retiring" domain = "Slots" area = "RET" level = 1 htoff = False sample = ['UOPS_RETIRED.RETIRE_SLOTS'] errcount = 0 sibling = None metricgroup = frozenset(['BvUW', 'TmaL1']) maxval = None def compute(self, EV): try: self.val = Retired_Slots(self, EV, 1) / SLOTS(self, EV, 1) self.thresh = (self.val > 0.7) or self.Heavy_Operations.thresh except ZeroDivisionError: handle_error(self, "Retiring zero division") return self.val desc = """ This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved. Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. . A high Retiring value for non-vectorized code may be a good hint for programmer to consider vectorizing his code. Doing so essentially lets more computations be done without significantly increasing number of instructions thus improving the performance.""" class Light_Operations: name = "Light_Operations" domain = "Slots" area = "RET" level = 2 htoff = False sample = ['INST_RETIRED.PREC_DIST'] errcount = 0 sibling = None metricgroup = frozenset(['Retire', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = self.Retiring.compute(EV) - self.Heavy_Operations.compute(EV) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Light_Operations zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. . Focus on techniques that reduce instruction count or result in more efficient instructions generation such as vectorization.""" class FP_Arith: name = "FP_Arith" domain = "Uops" area = "RET" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['HPC']) maxval = None def compute(self, EV): try: self.val = self.X87_Use.compute(EV) + self.FP_Scalar.compute(EV) + self.FP_Vector.compute(EV) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "FP_Arith zero division") return self.val desc = """ This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.""" class X87_Use: name = "X87_Use" domain = "Uops" area = "RET" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Compute']) maxval = None def compute(self, EV): try: self.val = self.Retiring.compute(EV) * EV("UOPS_EXECUTED.X87", 4) / EV("UOPS_EXECUTED.THREAD", 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "X87_Use zero division") return self.val desc = """ This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.. Tip: consider compiler flags to generate newer AVX (or SSE) instruction sets; which typically perform better and feature vectors.""" class FP_Scalar: name = "FP_Scalar" domain = "Uops" area = "RET" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Compute', 'Flops']) maxval = None def compute(self, EV): try: self.val = FP_Arith_Scalar(self, EV, 4) / Retired_Slots(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "FP_Scalar zero division") return self.val desc = """ This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting.. Investigate what limits (compiler) generation of vector code.""" class FP_Vector: name = "FP_Vector" domain = "Uops" area = "RET" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Compute', 'Flops']) maxval = 1.0 def compute(self, EV): try: self.val = FP_Arith_Vector(self, EV, 4) / Retired_Slots(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "FP_Vector zero division") return self.val desc = """ This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting.. Check if vector width is expected""" class FP_Vector_128b: name = "FP_Vector_128b" domain = "Uops" area = "RET" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Compute', 'Flops']) maxval = 1.0 def compute(self, EV): try: self.val = (EV("FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", 5) + EV("FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE", 5)) / Retired_Slots(self, EV, 5) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "FP_Vector_128b zero division") return self.val desc = """ This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting.. Try to exploit wider vector length""" class FP_Vector_256b: name = "FP_Vector_256b" domain = "Uops" area = "RET" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Compute', 'Flops']) maxval = 1.0 def compute(self, EV): try: self.val = (EV("FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE", 5) + EV("FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE", 5)) / Retired_Slots(self, EV, 5) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "FP_Vector_256b zero division") return self.val desc = """ This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting.. Try to exploit wider vector length""" class Memory_Operations: name = "Memory_Operations" domain = "Slots" area = "RET" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Pipeline']) maxval = None def compute(self, EV): try: self.val = self.Light_Operations.compute(EV) * EV("MEM_INST_RETIRED.ANY", 3) / EV("INST_RETIRED.ANY", 3) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Memory_Operations zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring memory operations -- uops for memory load or store accesses.""" class Fused_Instructions: name = "Fused_Instructions" domain = "Slots" area = "RET" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Branches', 'BvBO', 'Pipeline']) maxval = None def compute(self, EV): try: self.val = self.Light_Operations.compute(EV) * EV("UOPS_RETIRED.MACRO_FUSED", 3) / Retired_Slots(self, EV, 3) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Fused_Instructions zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions. CMP+JCC or DEC+JCC are common examples of legacy fusions. {}. See section 'Optimizing for Macro-fusion' in Optimization Manual:""" class Non_Fused_Branches: name = "Non_Fused_Branches" domain = "Slots" area = "RET" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Branches', 'BvBO', 'Pipeline']) maxval = None def compute(self, EV): try: self.val = self.Light_Operations.compute(EV) * (EV("BR_INST_RETIRED.ALL_BRANCHES", 3) - EV("UOPS_RETIRED.MACRO_FUSED", 3)) / Retired_Slots(self, EV, 3) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Non_Fused_Branches zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused. Non- conditional branches like direct JMP or CALL would count here. Can be used to examine fusible conditional jumps that were not fused.""" class Other_Light_Ops: name = "Other_Light_Ops" domain = "Slots" area = "RET" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Pipeline']) maxval = None def compute(self, EV): try: self.val = max(0 , self.Light_Operations.compute(EV) - Light_Ops_Sum(self, EV, 3)) self.thresh = (self.val > 0.3) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Other_Light_Ops zero division") return self.val desc = """ This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes. May undercount due to FMA double counting""" class Nop_Instructions: name = "Nop_Instructions" domain = "Slots" area = "RET" level = 4 htoff = False sample = ['INST_RETIRED.NOP'] errcount = 0 sibling = None metricgroup = frozenset(['BvBO', 'Pipeline']) maxval = None def compute(self, EV): try: self.val = self.Light_Operations.compute(EV) * EV("INST_RETIRED.NOP", 4) / Retired_Slots(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Nop_Instructions zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body.. Improve Codegen by correctly placing NOPs outside hot sections (e.g. outside loop body).""" class Heavy_Operations: name = "Heavy_Operations" domain = "Slots" area = "RET" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Retire', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = (Retired_Slots(self, EV, 2) + EV("UOPS_RETIRED.MACRO_FUSED", 2) - EV("INST_RETIRED.ANY", 2)) / SLOTS(self, EV, 2) self.thresh = (self.val > 0.1) except ZeroDivisionError: handle_error(self, "Heavy_Operations zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.""" class Few_Uops_Instructions: name = "Few_Uops_Instructions" domain = "Slots" area = "RET" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = self.Heavy_Operations.compute(EV) - self.Microcode_Sequencer.compute(EV) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Few_Uops_Instructions zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring instructions that that are decoder into two or up to four uops. This highly-correlates with the number of uops in such instructions.""" class Microcode_Sequencer: name = "Microcode_Sequencer" domain = "Slots" area = "RET" level = 3 htoff = False sample = ['IDQ.MS_UOPS'] errcount = 0 sibling = None metricgroup = frozenset(['MicroSeq']) maxval = None def compute(self, EV): try: self.val = Retire_Fraction(self, EV, 3) * EV("IDQ.MS_UOPS", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Microcode_Sequencer zero division") return self.val desc = """ This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided..""" class Assists: name = "Assists" domain = "Slots_Estimated" area = "RET" level = 4 htoff = False sample = ['OTHER_ASSISTS.ANY'] errcount = 0 sibling = None metricgroup = frozenset(['BvIO']) maxval = 1.0 def compute(self, EV): try: self.val = Avg_Assist_Cost *(EV("FP_ASSIST.ANY", 4) + EV("OTHER_ASSISTS.ANY", 4)) / SLOTS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Assists zero division") return self.val desc = """ This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases.""" class FP_Assists: name = "FP_Assists" domain = "Slots_Estimated" area = "RET" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['HPC']) maxval = None def compute(self, EV): try: self.val = 34 * EV("FP_ASSIST.ANY", 5) / SLOTS(self, EV, 5) self.thresh = (self.val > 0.1) except ZeroDivisionError: handle_error(self, "FP_Assists zero division") return self.val desc = """ This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists. FP Assist may apply when working with very small floating point values (so-called Denormals).. Consider DAZ (Denormals Are Zero) and/or FTZ (Flush To Zero) options in your compiler; \"-ffast-math\" with -O2 in GCC for example. This option may improve performance if the denormal values are not critical in your application. Also note that the DAZ and FTZ modes are not compatible with the IEEE Standard 754.. https://www.intel.com/content/www/us/en/develop/docume ntation/vtune-help/top/reference/cpu-metrics-reference/bad- speculation-back-end-bound-pipeline-slots/fp-assists.html""" class CISC: name = "CISC" domain = "Slots" area = "RET" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = max(0 , self.Microcode_Sequencer.compute(EV) - self.Assists.compute(EV)) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "CISC zero division") return self.val desc = """ This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources.""" class Metric_Mispredictions: name = "Mispredictions" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['Bad', 'BadSpec', 'BrMispredicts', 'BvMP']) sibling = None def compute(self, EV): try: self.val = Mispredictions(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Mispredictions zero division") desc = """ Total pipeline cost of Branch Misprediction related bottlenecks""" class Metric_Big_Code: name = "Big_Code" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvBC', 'BigFootprint', 'Fed', 'Frontend', 'IcMiss', 'MemoryTLB']) sibling = None def compute(self, EV): try: self.val = Big_Code(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Big_Code zero division") desc = """ Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)""" class Metric_Instruction_Fetch_BW: name = "Instruction_Fetch_BW" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvFB', 'Fed', 'FetchBW', 'Frontend']) sibling = None def compute(self, EV): try: self.val = Instruction_Fetch_BW(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Instruction_Fetch_BW zero division") desc = """ Total pipeline cost of instruction fetch bandwidth related bottlenecks (when the front-end could not sustain operations delivery to the back-end)""" class Metric_Cache_Memory_Bandwidth: name = "Cache_Memory_Bandwidth" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvMB', 'Mem', 'MemoryBW', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Cache_Memory_Bandwidth(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Cache_Memory_Bandwidth zero division") desc = """ Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks""" class Metric_Cache_Memory_Latency: name = "Cache_Memory_Latency" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvML', 'Mem', 'MemoryLat', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Cache_Memory_Latency(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Cache_Memory_Latency zero division") desc = """ Total pipeline cost of external Memory- or Cache-Latency related bottlenecks""" class Metric_Memory_Data_TLBs: name = "Memory_Data_TLBs" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvMT', 'Mem', 'MemoryTLB', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Memory_Data_TLBs(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Memory_Data_TLBs zero division") desc = """ Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)""" class Metric_Memory_Synchronization: name = "Memory_Synchronization" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvMS', 'Mem', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Memory_Synchronization(self, EV, 0) self.thresh = (self.val > 10) except ZeroDivisionError: handle_error_metric(self, "Memory_Synchronization zero division") desc = """ Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors)""" class Metric_Compute_Bound_Est: name = "Compute_Bound_Est" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvCB', 'Cor']) sibling = None def compute(self, EV): try: self.val = Compute_Bound_Est(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Compute_Bound_Est zero division") desc = """ Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy.""" class Metric_Irregular_Overhead: name = "Irregular_Overhead" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['Bad', 'BvIO', 'Cor', 'Ret']) sibling = None def compute(self, EV): try: self.val = Irregular_Overhead(self, EV, 0) self.thresh = (self.val > 10) except ZeroDivisionError: handle_error_metric(self, "Irregular_Overhead zero division") desc = """ Total pipeline cost of irregular execution (e.g. FP-assists in HPC, Wait time with work imbalance multithreaded workloads, overhead in system services or virtualized environments)""" class Metric_Other_Bottlenecks: name = "Other_Bottlenecks" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvOB', 'Cor', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Other_Bottlenecks(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Other_Bottlenecks zero division") desc = """ Total pipeline cost of remaining bottlenecks in the back- end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls.""" class Metric_Branching_Overhead: name = "Branching_Overhead" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvBO', 'Ret']) sibling = None def compute(self, EV): try: self.val = Branching_Overhead(self, EV, 0) self.thresh = (self.val > 5) except ZeroDivisionError: handle_error_metric(self, "Branching_Overhead zero division") desc = """ Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA. Examples include function calls; loops and alignments. (A lower bound). Consider Loop Unrolling or function inlining optimizations""" class Metric_Useful_Work: name = "Useful_Work" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvUW', 'Ret']) sibling = None def compute(self, EV): try: self.val = Useful_Work(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Useful_Work zero division") desc = """ Total pipeline cost of \"useful operations\" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead.""" class Metric_Core_Bound_Likely: name = "Core_Bound_Likely" domain = "Metric" maxval = 1.0 errcount = 0 area = "Info.Botlnk.L0" metricgroup = frozenset(['Cor', 'SMT']) sibling = None def compute(self, EV): try: self.val = Core_Bound_Likely(self, EV, 0) self.thresh = (self.val > 0.5) except ZeroDivisionError: handle_error_metric(self, "Core_Bound_Likely zero division") desc = """ Probability of Core Bound bottleneck hidden by SMT-profiling artifacts. Tip: consider analysis with SMT disabled""" class Metric_IPC: name = "IPC" domain = "Metric" maxval = Pipeline_Width + 2 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Ret', 'Summary']) sibling = None def compute(self, EV): try: self.val = IPC(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IPC zero division") desc = """ Instructions Per Cycle (per Logical Processor)""" class Metric_UopPI: name = "UopPI" domain = "Metric" maxval = 2.0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Pipeline', 'Ret', 'Retire']) sibling = None def compute(self, EV): try: self.val = UopPI(self, EV, 0) self.thresh = (self.val > 1.05) except ZeroDivisionError: handle_error_metric(self, "UopPI zero division") desc = """ Uops Per Instruction""" class Metric_UpTB: name = "UpTB" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Branches', 'Fed', 'FetchBW']) sibling = None def compute(self, EV): try: self.val = UpTB(self, EV, 0) self.thresh = self.val < Pipeline_Width * 1.5 except ZeroDivisionError: handle_error_metric(self, "UpTB zero division") desc = """ Uops per taken branch""" class Metric_CPI: name = "CPI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Pipeline', 'Mem']) sibling = None def compute(self, EV): try: self.val = CPI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CPI zero division") desc = """ Cycles Per Instruction (per Logical Processor)""" class Metric_CLKS: name = "CLKS" domain = "Count" maxval = 0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Pipeline']) sibling = None def compute(self, EV): try: self.val = CLKS(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CLKS zero division") desc = """ Per-Logical Processor actual clocks when the Logical Processor is active.""" class Metric_SLOTS: name = "SLOTS" domain = "Count" maxval = 0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['TmaL1']) sibling = None def compute(self, EV): try: self.val = SLOTS(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "SLOTS zero division") desc = """ Total issue-pipeline slots (per-Physical Core till ICL; per- Logical Processor ICL onward)""" class Metric_Execute_per_Issue: name = "Execute_per_Issue" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Cor', 'Pipeline']) sibling = None def compute(self, EV): try: self.val = Execute_per_Issue(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Execute_per_Issue zero division") desc = """ The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage.""" class Metric_CoreIPC: name = "CoreIPC" domain = "Core_Metric" maxval = Pipeline_Width + 2 errcount = 0 area = "Info.Core" metricgroup = frozenset(['Ret', 'SMT', 'TmaL1']) sibling = None def compute(self, EV): try: self.val = CoreIPC(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CoreIPC zero division") desc = """ Instructions Per Cycle across hyper-threads (per physical core)""" class Metric_FLOPc: name = "FLOPc" domain = "Core_Metric" maxval = 10.0 errcount = 0 area = "Info.Core" metricgroup = frozenset(['Ret', 'Flops']) sibling = None def compute(self, EV): try: self.val = FLOPc(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "FLOPc zero division") desc = """ Floating Point Operations Per Cycle""" class Metric_FP_Arith_Utilization: name = "FP_Arith_Utilization" domain = "Core_Metric" maxval = 2.0 errcount = 0 area = "Info.Core" metricgroup = frozenset(['Cor', 'Flops', 'HPC']) sibling = None def compute(self, EV): try: self.val = FP_Arith_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "FP_Arith_Utilization zero division") desc = """ Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to Fused-Multiply Add FMA counting - common.""" class Metric_ILP: name = "ILP" domain = "Metric" maxval = Exe_Ports errcount = 0 area = "Info.Core" metricgroup = frozenset(['Backend', 'Cor', 'Pipeline', 'PortsUtil']) sibling = None def compute(self, EV): try: self.val = ILP(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "ILP zero division") desc = """ Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical- processor)""" class Metric_EPC: name = "EPC" domain = "Metric" maxval = 20.0 errcount = 0 area = "Info.Core" metricgroup = frozenset(['Power']) sibling = None def compute(self, EV): try: self.val = EPC(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "EPC zero division") desc = """ uops Executed per Cycle""" class Metric_CORE_CLKS: name = "CORE_CLKS" domain = "Count" maxval = 0 errcount = 0 area = "Info.Core" metricgroup = frozenset(['SMT']) sibling = None def compute(self, EV): try: self.val = CORE_CLKS(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CORE_CLKS zero division") desc = """ Core actual clocks when any Logical Processor is active on the Physical Core""" class Metric_IpLoad: name = "IpLoad" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['InsType']) sibling = None def compute(self, EV): try: self.val = IpLoad(self, EV, 0) self.thresh = (self.val < 3) except ZeroDivisionError: handle_error_metric(self, "IpLoad zero division") desc = """ Instructions per Load (lower number means higher occurrence rate). Tip: reduce memory accesses.""" class Metric_IpStore: name = "IpStore" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['InsType']) sibling = None def compute(self, EV): try: self.val = IpStore(self, EV, 0) self.thresh = (self.val < 8) except ZeroDivisionError: handle_error_metric(self, "IpStore zero division") desc = """ Instructions per Store (lower number means higher occurrence rate). Tip: reduce memory accesses.""" class Metric_IpBranch: name = "IpBranch" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Branches', 'Fed', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpBranch(self, EV, 0) self.thresh = (self.val < 8) except ZeroDivisionError: handle_error_metric(self, "IpBranch zero division") desc = """ Instructions per Branch (lower number means higher occurrence rate)""" class Metric_IpCall: name = "IpCall" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Branches', 'Fed', 'PGO']) sibling = None def compute(self, EV): try: self.val = IpCall(self, EV, 0) self.thresh = (self.val < 200) except ZeroDivisionError: handle_error_metric(self, "IpCall zero division") desc = """ Instructions per (near) call (lower number means higher occurrence rate)""" class Metric_IpTB: name = "IpTB" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Branches', 'Fed', 'FetchBW', 'Frontend', 'PGO']) sibling = None def compute(self, EV): try: self.val = IpTB(self, EV, 0) self.thresh = self.val < Pipeline_Width * 2 + 1 except ZeroDivisionError: handle_error_metric(self, "IpTB zero division") desc = """ Instructions per taken branch""" class Metric_BpTkBranch: name = "BpTkBranch" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Branches', 'Fed', 'PGO']) sibling = None def compute(self, EV): try: self.val = BpTkBranch(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "BpTkBranch zero division") desc = """ Branch instructions per taken branch. . Can be used to approximate PGO-likelihood for non-loopy codes.""" class Metric_IpFLOP: name = "IpFLOP" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Flops', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpFLOP(self, EV, 0) self.thresh = (self.val < 10) except ZeroDivisionError: handle_error_metric(self, "IpFLOP zero division") desc = """ Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate). Reference: Tuning Performance via Metrics with Expectations. https://doi.org/10.1109/LCA.2019.2916408""" class Metric_IpArith: name = "IpArith" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Flops', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpArith(self, EV, 0) self.thresh = (self.val < 10) except ZeroDivisionError: handle_error_metric(self, "IpArith zero division") desc = """ Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW.""" class Metric_IpArith_Scalar_SP: name = "IpArith_Scalar_SP" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Flops', 'FpScalar', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpArith_Scalar_SP(self, EV, 0) self.thresh = (self.val < 10) except ZeroDivisionError: handle_error_metric(self, "IpArith_Scalar_SP zero division") desc = """ Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.""" class Metric_IpArith_Scalar_DP: name = "IpArith_Scalar_DP" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Flops', 'FpScalar', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpArith_Scalar_DP(self, EV, 0) self.thresh = (self.val < 10) except ZeroDivisionError: handle_error_metric(self, "IpArith_Scalar_DP zero division") desc = """ Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.""" class Metric_IpArith_AVX128: name = "IpArith_AVX128" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Flops', 'FpVector', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpArith_AVX128(self, EV, 0) self.thresh = (self.val < 10) except ZeroDivisionError: handle_error_metric(self, "IpArith_AVX128 zero division") desc = """ Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.""" class Metric_IpArith_AVX256: name = "IpArith_AVX256" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Flops', 'FpVector', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpArith_AVX256(self, EV, 0) self.thresh = (self.val < 10) except ZeroDivisionError: handle_error_metric(self, "IpArith_AVX256 zero division") desc = """ Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.""" class Metric_IpPause: name = "IpPause" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Flops', 'FpVector', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpPause(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IpPause zero division") desc = """ Instructions per PAUSE (lower number means higher occurrence rate)""" class Metric_IpSWPF: name = "IpSWPF" domain = "Inst_Metric" maxval = 1000 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Prefetches']) sibling = None def compute(self, EV): try: self.val = IpSWPF(self, EV, 0) self.thresh = (self.val < 100) except ZeroDivisionError: handle_error_metric(self, "IpSWPF zero division") desc = """ Instructions per Software prefetch instruction (of any type: NTA/T0/T1/T2/Prefetch) (lower number means higher occurrence rate)""" class Metric_Instructions: name = "Instructions" domain = "Count" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Summary', 'TmaL1']) sibling = None def compute(self, EV): try: self.val = Instructions(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Instructions zero division") desc = """ Total number of retired Instructions""" class Metric_Retire: name = "Retire" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Pipeline" metricgroup = frozenset(['Pipeline', 'Ret']) sibling = None def compute(self, EV): try: self.val = Retire(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Retire zero division") desc = """ Average number of Uops retired in cycles where at least one uop has retired.""" class Metric_IpAssist: name = "IpAssist" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Pipeline" metricgroup = frozenset(['MicroSeq', 'Pipeline', 'Ret', 'Retire']) sibling = None def compute(self, EV): try: self.val = IpAssist(self, EV, 0) self.thresh = (self.val < 100000) except ZeroDivisionError: handle_error_metric(self, "IpAssist zero division") desc = """ Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate)""" class Metric_Execute: name = "Execute" domain = "Metric" maxval = Exe_Ports errcount = 0 area = "Info.Pipeline" metricgroup = frozenset(['Cor', 'Pipeline', 'PortsUtil', 'SMT']) sibling = None def compute(self, EV): try: self.val = Execute(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Execute zero division") desc = """ """ class Metric_Fetch_LSD: name = "Fetch_LSD" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Pipeline" metricgroup = frozenset(['Fed', 'FetchBW']) sibling = None def compute(self, EV): try: self.val = Fetch_LSD(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Fetch_LSD zero division") desc = """ Average number of uops fetched from LSD per cycle""" class Metric_Fetch_DSB: name = "Fetch_DSB" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Pipeline" metricgroup = frozenset(['Fed', 'FetchBW']) sibling = None def compute(self, EV): try: self.val = Fetch_DSB(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Fetch_DSB zero division") desc = """ Average number of uops fetched from DSB per cycle""" class Metric_Fetch_MITE: name = "Fetch_MITE" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Pipeline" metricgroup = frozenset(['Fed', 'FetchBW']) sibling = None def compute(self, EV): try: self.val = Fetch_MITE(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Fetch_MITE zero division") desc = """ Average number of uops fetched from MITE per cycle""" class Metric_Fetch_UpC: name = "Fetch_UpC" domain = "Metric" maxval = 6.0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['Fed', 'FetchBW']) sibling = None def compute(self, EV): try: self.val = Fetch_UpC(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Fetch_UpC zero division") desc = """ Average number of Uops issued by front-end when it issued something""" class Metric_LSD_Coverage: name = "LSD_Coverage" domain = "Metric" maxval = 1.0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['Fed', 'LSD']) sibling = None def compute(self, EV): try: self.val = LSD_Coverage(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "LSD_Coverage zero division") desc = """ Fraction of Uops delivered by the LSD (Loop Stream Detector; aka Loop Cache)""" class Metric_DSB_Coverage: name = "DSB_Coverage" domain = "Metric" maxval = 1.0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['DSB', 'Fed', 'FetchBW']) sibling = None def compute(self, EV): try: self.val = DSB_Coverage(self, EV, 0) self.thresh = (self.val < 0.7) and HighIPC(self, EV, 1) except ZeroDivisionError: handle_error_metric(self, "DSB_Coverage zero division") desc = """ Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). See section 'Decoded ICache' in Optimization Manual. http://www.intel.com/content/www/us/en/architecture- and-technology/64-ia-32-architectures-optimization- manual.html""" class Metric_DSB_Switch_Cost: name = "DSB_Switch_Cost" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['DSBmiss']) sibling = None def compute(self, EV): try: self.val = DSB_Switch_Cost(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "DSB_Switch_Cost zero division") desc = """ Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.""" class Metric_DSB_Misses: name = "DSB_Misses" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Botlnk.L2" metricgroup = frozenset(['DSBmiss', 'Fed']) sibling = None def compute(self, EV): try: self.val = DSB_Misses(self, EV, 0) self.thresh = (self.val > 10) except ZeroDivisionError: handle_error_metric(self, "DSB_Misses zero division") desc = """ Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck.""" class Metric_DSB_Bandwidth: name = "DSB_Bandwidth" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Botlnk.L2" metricgroup = frozenset(['DSB', 'FetchBW']) sibling = None def compute(self, EV): try: self.val = DSB_Bandwidth(self, EV, 0) self.thresh = (self.val > 10) except ZeroDivisionError: handle_error_metric(self, "DSB_Bandwidth zero division") desc = """ Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck.""" class Metric_ICache_Miss_Latency: name = "ICache_Miss_Latency" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['Fed', 'FetchLat', 'IcMiss']) sibling = None def compute(self, EV): try: self.val = ICache_Miss_Latency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "ICache_Miss_Latency zero division") desc = """ Average Latency for L1 instruction cache misses""" class Metric_IC_Misses: name = "IC_Misses" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Botlnk.L2" metricgroup = frozenset(['Fed', 'FetchLat', 'IcMiss']) sibling = None def compute(self, EV): try: self.val = IC_Misses(self, EV, 0) self.thresh = (self.val > 5) except ZeroDivisionError: handle_error_metric(self, "IC_Misses zero division") desc = """ Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck.""" class Metric_IpDSB_Miss_Ret: name = "IpDSB_Miss_Ret" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['DSBmiss', 'Fed']) sibling = None def compute(self, EV): try: self.val = IpDSB_Miss_Ret(self, EV, 0) self.thresh = (self.val < 50) except ZeroDivisionError: handle_error_metric(self, "IpDSB_Miss_Ret zero division") desc = """ Instructions per non-speculative DSB miss (lower number means higher occurrence rate)""" class Metric_IpUnknown_Branch: name = "IpUnknown_Branch" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['Fed']) sibling = None def compute(self, EV): try: self.val = IpUnknown_Branch(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IpUnknown_Branch zero division") desc = """ Instructions per speculative Unknown Branch Misprediction (BAClear) (lower number means higher occurrence rate)""" class Metric_L2MPKI_Code: name = "L2MPKI_Code" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['IcMiss']) sibling = None def compute(self, EV): try: self.val = L2MPKI_Code(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2MPKI_Code zero division") desc = """ L2 cache true code cacheline misses per kilo instruction""" class Metric_L2MPKI_Code_All: name = "L2MPKI_Code_All" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['IcMiss']) sibling = None def compute(self, EV): try: self.val = L2MPKI_Code_All(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2MPKI_Code_All zero division") desc = """ L2 cache speculative code cacheline misses per kilo instruction""" class Metric_IpMispredict: name = "IpMispredict" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Bad_Spec" metricgroup = frozenset(['Bad', 'BadSpec', 'BrMispredicts']) sibling = None def compute(self, EV): try: self.val = IpMispredict(self, EV, 0) self.thresh = (self.val < 200) except ZeroDivisionError: handle_error_metric(self, "IpMispredict zero division") desc = """ Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)""" class Metric_IpMisp_Indirect: name = "IpMisp_Indirect" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Bad_Spec" metricgroup = frozenset(['Bad', 'BrMispredicts']) sibling = None def compute(self, EV): try: self.val = IpMisp_Indirect(self, EV, 0) self.thresh = (self.val < 1000) except ZeroDivisionError: handle_error_metric(self, "IpMisp_Indirect zero division") desc = """ Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).""" class Metric_Branch_Misprediction_Cost: name = "Branch_Misprediction_Cost" domain = "Core_Metric" maxval = 300 errcount = 0 area = "Info.Bad_Spec" metricgroup = frozenset(['Bad', 'BrMispredicts']) sibling = None def compute(self, EV): try: self.val = Branch_Misprediction_Cost(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Branch_Misprediction_Cost zero division") desc = """ Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)""" class Metric_Spec_Clears_Ratio: name = "Spec_Clears_Ratio" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Bad_Spec" metricgroup = frozenset(['BrMispredicts']) sibling = None def compute(self, EV): try: self.val = Spec_Clears_Ratio(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Spec_Clears_Ratio zero division") desc = """ Speculative to Retired ratio of all clears (covering Mispredicts and nukes)""" class Metric_Cond_NT: name = "Cond_NT" domain = "Fraction" maxval = 1.0 errcount = 0 area = "Info.Branches" metricgroup = frozenset(['Bad', 'Branches', 'CodeGen', 'PGO']) sibling = None def compute(self, EV): try: self.val = Cond_NT(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Cond_NT zero division") desc = """ Fraction of branches that are non-taken conditionals""" class Metric_Cond_TK: name = "Cond_TK" domain = "Fraction" maxval = 1.0 errcount = 0 area = "Info.Branches" metricgroup = frozenset(['Bad', 'Branches', 'CodeGen', 'PGO']) sibling = None def compute(self, EV): try: self.val = Cond_TK(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Cond_TK zero division") desc = """ Fraction of branches that are taken conditionals""" class Metric_CallRet: name = "CallRet" domain = "Fraction" maxval = 1.0 errcount = 0 area = "Info.Branches" metricgroup = frozenset(['Bad', 'Branches']) sibling = None def compute(self, EV): try: self.val = CallRet(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CallRet zero division") desc = """ Fraction of branches that are CALL or RET""" class Metric_Jump: name = "Jump" domain = "Fraction" maxval = 1.0 errcount = 0 area = "Info.Branches" metricgroup = frozenset(['Bad', 'Branches']) sibling = None def compute(self, EV): try: self.val = Jump(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Jump zero division") desc = """ Fraction of branches that are unconditional (direct or indirect) jumps""" class Metric_Load_Miss_Real_Latency: name = "Load_Miss_Real_Latency" domain = "Clocks_Latency" maxval = 1000 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBound', 'MemoryLat']) sibling = None def compute(self, EV): try: self.val = Load_Miss_Real_Latency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Load_Miss_Real_Latency zero division") desc = """ Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)""" class Metric_MLP: name = "MLP" domain = "Metric" maxval = 10.0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBound', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = MLP(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "MLP zero division") desc = """ Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)""" class Metric_L1MPKI: name = "L1MPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheHits', 'Mem']) sibling = None def compute(self, EV): try: self.val = L1MPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L1MPKI zero division") desc = """ L1 cache true misses per kilo instruction for retired demand loads""" class Metric_L1MPKI_Load: name = "L1MPKI_Load" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheHits', 'Mem']) sibling = None def compute(self, EV): try: self.val = L1MPKI_Load(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L1MPKI_Load zero division") desc = """ L1 cache true misses per kilo instruction for all demand loads (including speculative)""" class Metric_L2MPKI: name = "L2MPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'Backend', 'CacheHits']) sibling = None def compute(self, EV): try: self.val = L2MPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2MPKI zero division") desc = """ L2 cache true misses per kilo instruction for retired demand loads""" class Metric_L2MPKI_All: name = "L2MPKI_All" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheHits', 'Mem', 'Offcore']) sibling = None def compute(self, EV): try: self.val = L2MPKI_All(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2MPKI_All zero division") desc = """ L2 cache misses per kilo instruction for all request types (including speculative)""" class Metric_L2MPKI_Load: name = "L2MPKI_Load" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheHits', 'Mem']) sibling = None def compute(self, EV): try: self.val = L2MPKI_Load(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2MPKI_Load zero division") desc = """ L2 cache misses per kilo instruction for all demand loads (including speculative)""" class Metric_L2MPKI_RFO: name = "L2MPKI_RFO" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheMisses', 'Offcore']) sibling = None def compute(self, EV): try: self.val = L2MPKI_RFO(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2MPKI_RFO zero division") desc = """ Offcore requests (L2 cache miss) per kilo instruction for demand RFOs""" class Metric_L2HPKI_All: name = "L2HPKI_All" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheHits', 'Mem']) sibling = None def compute(self, EV): try: self.val = L2HPKI_All(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2HPKI_All zero division") desc = """ L2 cache hits per kilo instruction for all request types (including speculative)""" class Metric_L2HPKI_Load: name = "L2HPKI_Load" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheHits', 'Mem']) sibling = None def compute(self, EV): try: self.val = L2HPKI_Load(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2HPKI_Load zero division") desc = """ L2 cache hits per kilo instruction for all demand loads (including speculative)""" class Metric_L3MPKI: name = "L3MPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem']) sibling = None def compute(self, EV): try: self.val = L3MPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L3MPKI zero division") desc = """ L3 cache true misses per kilo instruction for retired demand loads""" class Metric_FB_HPKI: name = "FB_HPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheHits', 'Mem']) sibling = None def compute(self, EV): try: self.val = FB_HPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "FB_HPKI zero division") desc = """ Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss- handling entries)""" class Metric_L1D_Cache_Fill_BW: name = "L1D_Cache_Fill_BW" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L1D_Cache_Fill_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L1D_Cache_Fill_BW zero division") desc = """ """ class Metric_L2_Cache_Fill_BW: name = "L2_Cache_Fill_BW" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L2_Cache_Fill_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2_Cache_Fill_BW zero division") desc = """ """ class Metric_L3_Cache_Fill_BW: name = "L3_Cache_Fill_BW" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L3_Cache_Fill_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L3_Cache_Fill_BW zero division") desc = """ """ class Metric_L3_Cache_Access_BW: name = "L3_Cache_Access_BW" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBW', 'Offcore']) sibling = None def compute(self, EV): try: self.val = L3_Cache_Access_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L3_Cache_Access_BW zero division") desc = """ """ class Metric_Page_Walks_Utilization: name = "Page_Walks_Utilization" domain = "Core_Metric" maxval = 1.0 errcount = 0 area = "Info.Memory.TLB" metricgroup = frozenset(['Mem', 'MemoryTLB']) sibling = None def compute(self, EV): try: self.val = Page_Walks_Utilization(self, EV, 0) self.thresh = (self.val > 0.5) except ZeroDivisionError: handle_error_metric(self, "Page_Walks_Utilization zero division") desc = """ Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses""" class Metric_Code_STLB_MPKI: name = "Code_STLB_MPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory.TLB" metricgroup = frozenset(['Fed', 'MemoryTLB']) sibling = None def compute(self, EV): try: self.val = Code_STLB_MPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Code_STLB_MPKI zero division") desc = """ STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)""" class Metric_Load_STLB_MPKI: name = "Load_STLB_MPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory.TLB" metricgroup = frozenset(['Mem', 'MemoryTLB']) sibling = None def compute(self, EV): try: self.val = Load_STLB_MPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Load_STLB_MPKI zero division") desc = """ STLB (2nd level TLB) data load speculative misses per kilo instruction (misses of any page-size that complete the page walk)""" class Metric_Store_STLB_MPKI: name = "Store_STLB_MPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory.TLB" metricgroup = frozenset(['Mem', 'MemoryTLB']) sibling = None def compute(self, EV): try: self.val = Store_STLB_MPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Store_STLB_MPKI zero division") desc = """ STLB (2nd level TLB) data store speculative misses per kilo instruction (misses of any page-size that complete the page walk)""" class Metric_L1D_Cache_Fill_BW_2T: name = "L1D_Cache_Fill_BW_2T" domain = "Core_Metric" maxval = 0 errcount = 0 area = "Info.Memory.Core" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L1D_Cache_Fill_BW_2T(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L1D_Cache_Fill_BW_2T zero division") desc = """ Average per-core data fill bandwidth to the L1 data cache [GB / sec]""" class Metric_L2_Cache_Fill_BW_2T: name = "L2_Cache_Fill_BW_2T" domain = "Core_Metric" maxval = 0 errcount = 0 area = "Info.Memory.Core" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L2_Cache_Fill_BW_2T(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2_Cache_Fill_BW_2T zero division") desc = """ Average per-core data fill bandwidth to the L2 cache [GB / sec]""" class Metric_L3_Cache_Fill_BW_2T: name = "L3_Cache_Fill_BW_2T" domain = "Core_Metric" maxval = 0 errcount = 0 area = "Info.Memory.Core" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L3_Cache_Fill_BW_2T(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L3_Cache_Fill_BW_2T zero division") desc = """ Average per-core data fill bandwidth to the L3 cache [GB / sec]""" class Metric_L3_Cache_Access_BW_2T: name = "L3_Cache_Access_BW_2T" domain = "Core_Metric" maxval = 0 errcount = 0 area = "Info.Memory.Core" metricgroup = frozenset(['Mem', 'MemoryBW', 'Offcore']) sibling = None def compute(self, EV): try: self.val = L3_Cache_Access_BW_2T(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L3_Cache_Access_BW_2T zero division") desc = """ Average per-core data access bandwidth to the L3 cache [GB / sec]""" class Metric_Load_L2_Miss_Latency: name = "Load_L2_Miss_Latency" domain = "Clocks_Latency" maxval = 1000 errcount = 0 area = "Info.Memory.Latency" metricgroup = frozenset(['Memory_Lat', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Load_L2_Miss_Latency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Load_L2_Miss_Latency zero division") desc = """ Average Latency for L2 cache miss demand Loads""" class Metric_Load_L2_MLP: name = "Load_L2_MLP" domain = "Metric" maxval = 100 errcount = 0 area = "Info.Memory.Latency" metricgroup = frozenset(['Memory_BW', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Load_L2_MLP(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Load_L2_MLP zero division") desc = """ Average Parallel L2 cache miss demand Loads""" class Metric_Data_L2_MLP: name = "Data_L2_MLP" domain = "Metric" maxval = 100 errcount = 0 area = "Info.Memory.Latency" metricgroup = frozenset(['Memory_BW', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Data_L2_MLP(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Data_L2_MLP zero division") desc = """ Average Parallel L2 cache miss data reads""" class Metric_UC_Load_PKI: name = "UC_Load_PKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory.Mix" metricgroup = frozenset(['Mem']) sibling = None def compute(self, EV): try: self.val = UC_Load_PKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "UC_Load_PKI zero division") desc = """ Un-cacheable retired load per kilo instruction""" class Metric_CPU_Utilization: name = "CPU_Utilization" domain = "Metric" maxval = 1 errcount = 0 area = "Info.System" metricgroup = frozenset(['HPC', 'Summary']) sibling = None def compute(self, EV): try: self.val = CPU_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CPU_Utilization zero division") desc = """ Average CPU Utilization (percentage)""" class Metric_CPUs_Utilized: name = "CPUs_Utilized" domain = "Metric" maxval = 300 errcount = 0 area = "Info.System" metricgroup = frozenset(['Summary']) sibling = None def compute(self, EV): try: self.val = CPUs_Utilized(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CPUs_Utilized zero division") desc = """ Average number of utilized CPUs""" class Metric_Core_Frequency: name = "Core_Frequency" domain = "SystemMetric" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Summary', 'Power']) sibling = None def compute(self, EV): try: self.val = Core_Frequency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Core_Frequency zero division") desc = """ Measured Average Core Frequency for unhalted processors [GHz]""" class Metric_GFLOPs: name = "GFLOPs" domain = "Metric" maxval = 200 errcount = 0 area = "Info.System" metricgroup = frozenset(['Cor', 'Flops', 'HPC']) sibling = None def compute(self, EV): try: self.val = GFLOPs(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "GFLOPs zero division") desc = """ Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width""" class Metric_Turbo_Utilization: name = "Turbo_Utilization" domain = "Core_Metric" maxval = 10.0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Power']) sibling = None def compute(self, EV): try: self.val = Turbo_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Turbo_Utilization zero division") desc = """ Average Frequency Utilization relative nominal frequency""" class Metric_SMT_2T_Utilization: name = "SMT_2T_Utilization" domain = "Core_Metric" maxval = 1.0 errcount = 0 area = "Info.System" metricgroup = frozenset(['SMT']) sibling = None def compute(self, EV): try: self.val = SMT_2T_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "SMT_2T_Utilization zero division") desc = """ Fraction of cycles where both hardware Logical Processors were active""" class Metric_Kernel_Utilization: name = "Kernel_Utilization" domain = "Metric" maxval = 1.0 errcount = 0 area = "Info.System" metricgroup = frozenset(['OS']) sibling = None def compute(self, EV): try: self.val = Kernel_Utilization(self, EV, 0) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error_metric(self, "Kernel_Utilization zero division") desc = """ Fraction of cycles spent in the Operating System (OS) Kernel mode""" class Metric_Kernel_CPI: name = "Kernel_CPI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['OS']) sibling = None def compute(self, EV): try: self.val = Kernel_CPI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Kernel_CPI zero division") desc = """ Cycles Per Instruction for the Operating System (OS) Kernel mode""" class Metric_DRAM_BW_Use: name = "DRAM_BW_Use" domain = "GB/sec" maxval = 200 errcount = 0 area = "Info.System" metricgroup = frozenset(['HPC', 'MemOffcore', 'MemoryBW', 'SoC']) sibling = None def compute(self, EV): try: self.val = DRAM_BW_Use(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "DRAM_BW_Use zero division") desc = """ Average external Memory Bandwidth Use for reads and writes [GB / sec]""" class Metric_MEM_Read_Latency: name = "MEM_Read_Latency" domain = "NanoSeconds" maxval = 1000 errcount = 0 area = "Info.System" metricgroup = frozenset(['Mem', 'MemoryLat', 'SoC']) sibling = None def compute(self, EV): try: self.val = MEM_Read_Latency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "MEM_Read_Latency zero division") desc = """ Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches.""" class Metric_MEM_Parallel_Reads: name = "MEM_Parallel_Reads" domain = "SystemMetric" maxval = 100 errcount = 0 area = "Info.System" metricgroup = frozenset(['Mem', 'MemoryBW', 'SoC']) sibling = None def compute(self, EV): try: self.val = MEM_Parallel_Reads(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "MEM_Parallel_Reads zero division") desc = """ Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches""" class Metric_Power: name = "Power" domain = "SystemMetric" maxval = 200 errcount = 0 area = "Info.System" metricgroup = frozenset(['Power', 'SoC']) sibling = None def compute(self, EV): try: self.val = Power(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Power zero division") desc = """ Total package Power in Watts""" class Metric_Time: name = "Time" domain = "Seconds" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Summary']) sibling = None def compute(self, EV): try: self.val = Time(self, EV, 0) self.thresh = (self.val < 1) except ZeroDivisionError: handle_error_metric(self, "Time zero division") desc = """ Run duration time in seconds""" class Metric_Socket_CLKS: name = "Socket_CLKS" domain = "Count" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['SoC']) sibling = None def compute(self, EV): try: self.val = Socket_CLKS(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Socket_CLKS zero division") desc = """ Socket actual clocks when any core is active on that socket""" class Metric_IpFarBranch: name = "IpFarBranch" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Branches', 'OS']) sibling = None def compute(self, EV): try: self.val = IpFarBranch(self, EV, 0) self.thresh = (self.val < 1000000) except ZeroDivisionError: handle_error_metric(self, "IpFarBranch zero division") desc = """ Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]""" # Schedule class Setup: def __init__(self, r): o = dict() n = Frontend_Bound() ; r.run(n) ; o["Frontend_Bound"] = n n = Fetch_Latency() ; r.run(n) ; o["Fetch_Latency"] = n n = ICache_Misses() ; r.run(n) ; o["ICache_Misses"] = n n = ITLB_Misses() ; r.run(n) ; o["ITLB_Misses"] = n n = Branch_Resteers() ; r.run(n) ; o["Branch_Resteers"] = n n = Mispredicts_Resteers() ; r.run(n) ; o["Mispredicts_Resteers"] = n n = Clears_Resteers() ; r.run(n) ; o["Clears_Resteers"] = n n = Unknown_Branches() ; r.run(n) ; o["Unknown_Branches"] = n n = MS_Switches() ; r.run(n) ; o["MS_Switches"] = n n = LCP() ; r.run(n) ; o["LCP"] = n n = DSB_Switches() ; r.run(n) ; o["DSB_Switches"] = n n = Fetch_Bandwidth() ; r.run(n) ; o["Fetch_Bandwidth"] = n n = MITE() ; r.run(n) ; o["MITE"] = n n = Decoder0_Alone() ; r.run(n) ; o["Decoder0_Alone"] = n n = DSB() ; r.run(n) ; o["DSB"] = n n = LSD() ; r.run(n) ; o["LSD"] = n n = Bad_Speculation() ; r.run(n) ; o["Bad_Speculation"] = n n = Branch_Mispredicts() ; r.run(n) ; o["Branch_Mispredicts"] = n n = Other_Mispredicts() ; r.run(n) ; o["Other_Mispredicts"] = n n = Machine_Clears() ; r.run(n) ; o["Machine_Clears"] = n n = Other_Nukes() ; r.run(n) ; o["Other_Nukes"] = n n = Backend_Bound() ; r.run(n) ; o["Backend_Bound"] = n n = Memory_Bound() ; r.run(n) ; o["Memory_Bound"] = n n = L1_Bound() ; r.run(n) ; o["L1_Bound"] = n n = DTLB_Load() ; r.run(n) ; o["DTLB_Load"] = n n = Load_STLB_Hit() ; r.run(n) ; o["Load_STLB_Hit"] = n n = Load_STLB_Miss() ; r.run(n) ; o["Load_STLB_Miss"] = n n = Store_Fwd_Blk() ; r.run(n) ; o["Store_Fwd_Blk"] = n n = L1_Hit_Latency() ; r.run(n) ; o["L1_Hit_Latency"] = n n = Lock_Latency() ; r.run(n) ; o["Lock_Latency"] = n n = Split_Loads() ; r.run(n) ; o["Split_Loads"] = n n = G4K_Aliasing() ; r.run(n) ; o["G4K_Aliasing"] = n n = FB_Full() ; r.run(n) ; o["FB_Full"] = n n = L2_Bound() ; r.run(n) ; o["L2_Bound"] = n n = L3_Bound() ; r.run(n) ; o["L3_Bound"] = n n = Contested_Accesses() ; r.run(n) ; o["Contested_Accesses"] = n n = Data_Sharing() ; r.run(n) ; o["Data_Sharing"] = n n = L3_Hit_Latency() ; r.run(n) ; o["L3_Hit_Latency"] = n n = SQ_Full() ; r.run(n) ; o["SQ_Full"] = n n = DRAM_Bound() ; r.run(n) ; o["DRAM_Bound"] = n n = MEM_Bandwidth() ; r.run(n) ; o["MEM_Bandwidth"] = n n = MEM_Latency() ; r.run(n) ; o["MEM_Latency"] = n n = Store_Bound() ; r.run(n) ; o["Store_Bound"] = n n = Store_Latency() ; r.run(n) ; o["Store_Latency"] = n n = False_Sharing() ; r.run(n) ; o["False_Sharing"] = n n = Split_Stores() ; r.run(n) ; o["Split_Stores"] = n n = DTLB_Store() ; r.run(n) ; o["DTLB_Store"] = n n = Store_STLB_Hit() ; r.run(n) ; o["Store_STLB_Hit"] = n n = Store_STLB_Miss() ; r.run(n) ; o["Store_STLB_Miss"] = n n = Core_Bound() ; r.run(n) ; o["Core_Bound"] = n n = Divider() ; r.run(n) ; o["Divider"] = n n = Serializing_Operation() ; r.run(n) ; o["Serializing_Operation"] = n n = Slow_Pause() ; r.run(n) ; o["Slow_Pause"] = n n = Ports_Utilization() ; r.run(n) ; o["Ports_Utilization"] = n n = Ports_Utilized_0() ; r.run(n) ; o["Ports_Utilized_0"] = n n = Mixing_Vectors() ; r.run(n) ; o["Mixing_Vectors"] = n n = Ports_Utilized_1() ; r.run(n) ; o["Ports_Utilized_1"] = n n = Ports_Utilized_2() ; r.run(n) ; o["Ports_Utilized_2"] = n n = Ports_Utilized_3m() ; r.run(n) ; o["Ports_Utilized_3m"] = n n = ALU_Op_Utilization() ; r.run(n) ; o["ALU_Op_Utilization"] = n n = Port_0() ; r.run(n) ; o["Port_0"] = n n = Port_1() ; r.run(n) ; o["Port_1"] = n n = Port_5() ; r.run(n) ; o["Port_5"] = n n = Port_6() ; r.run(n) ; o["Port_6"] = n n = Load_Op_Utilization() ; r.run(n) ; o["Load_Op_Utilization"] = n n = Port_2() ; r.run(n) ; o["Port_2"] = n n = Port_3() ; r.run(n) ; o["Port_3"] = n n = Store_Op_Utilization() ; r.run(n) ; o["Store_Op_Utilization"] = n n = Port_4() ; r.run(n) ; o["Port_4"] = n n = Port_7() ; r.run(n) ; o["Port_7"] = n n = Retiring() ; r.run(n) ; o["Retiring"] = n n = Light_Operations() ; r.run(n) ; o["Light_Operations"] = n n = FP_Arith() ; r.run(n) ; o["FP_Arith"] = n n = X87_Use() ; r.run(n) ; o["X87_Use"] = n n = FP_Scalar() ; r.run(n) ; o["FP_Scalar"] = n n = FP_Vector() ; r.run(n) ; o["FP_Vector"] = n n = FP_Vector_128b() ; r.run(n) ; o["FP_Vector_128b"] = n n = FP_Vector_256b() ; r.run(n) ; o["FP_Vector_256b"] = n n = Memory_Operations() ; r.run(n) ; o["Memory_Operations"] = n n = Fused_Instructions() ; r.run(n) ; o["Fused_Instructions"] = n n = Non_Fused_Branches() ; r.run(n) ; o["Non_Fused_Branches"] = n n = Other_Light_Ops() ; r.run(n) ; o["Other_Light_Ops"] = n n = Nop_Instructions() ; r.run(n) ; o["Nop_Instructions"] = n n = Heavy_Operations() ; r.run(n) ; o["Heavy_Operations"] = n n = Few_Uops_Instructions() ; r.run(n) ; o["Few_Uops_Instructions"] = n n = Microcode_Sequencer() ; r.run(n) ; o["Microcode_Sequencer"] = n n = Assists() ; r.run(n) ; o["Assists"] = n n = FP_Assists() ; r.run(n) ; o["FP_Assists"] = n n = CISC() ; r.run(n) ; o["CISC"] = n # parents o["Fetch_Latency"].parent = o["Frontend_Bound"] o["ICache_Misses"].parent = o["Fetch_Latency"] o["ITLB_Misses"].parent = o["Fetch_Latency"] o["Branch_Resteers"].parent = o["Fetch_Latency"] o["Mispredicts_Resteers"].parent = o["Branch_Resteers"] o["Clears_Resteers"].parent = o["Branch_Resteers"] o["Unknown_Branches"].parent = o["Branch_Resteers"] o["MS_Switches"].parent = o["Fetch_Latency"] o["LCP"].parent = o["Fetch_Latency"] o["DSB_Switches"].parent = o["Fetch_Latency"] o["Fetch_Bandwidth"].parent = o["Frontend_Bound"] o["MITE"].parent = o["Fetch_Bandwidth"] o["Decoder0_Alone"].parent = o["MITE"] o["DSB"].parent = o["Fetch_Bandwidth"] o["LSD"].parent = o["Fetch_Bandwidth"] o["Branch_Mispredicts"].parent = o["Bad_Speculation"] o["Other_Mispredicts"].parent = o["Branch_Mispredicts"] o["Machine_Clears"].parent = o["Bad_Speculation"] o["Other_Nukes"].parent = o["Machine_Clears"] o["Memory_Bound"].parent = o["Backend_Bound"] o["L1_Bound"].parent = o["Memory_Bound"] o["DTLB_Load"].parent = o["L1_Bound"] o["Load_STLB_Hit"].parent = o["DTLB_Load"] o["Load_STLB_Miss"].parent = o["DTLB_Load"] o["Store_Fwd_Blk"].parent = o["L1_Bound"] o["L1_Hit_Latency"].parent = o["L1_Bound"] o["Lock_Latency"].parent = o["L1_Bound"] o["Split_Loads"].parent = o["L1_Bound"] o["G4K_Aliasing"].parent = o["L1_Bound"] o["FB_Full"].parent = o["L1_Bound"] o["L2_Bound"].parent = o["Memory_Bound"] o["L3_Bound"].parent = o["Memory_Bound"] o["Contested_Accesses"].parent = o["L3_Bound"] o["Data_Sharing"].parent = o["L3_Bound"] o["L3_Hit_Latency"].parent = o["L3_Bound"] o["SQ_Full"].parent = o["L3_Bound"] o["DRAM_Bound"].parent = o["Memory_Bound"] o["MEM_Bandwidth"].parent = o["DRAM_Bound"] o["MEM_Latency"].parent = o["DRAM_Bound"] o["Store_Bound"].parent = o["Memory_Bound"] o["Store_Latency"].parent = o["Store_Bound"] o["False_Sharing"].parent = o["Store_Bound"] o["Split_Stores"].parent = o["Store_Bound"] o["DTLB_Store"].parent = o["Store_Bound"] o["Store_STLB_Hit"].parent = o["DTLB_Store"] o["Store_STLB_Miss"].parent = o["DTLB_Store"] o["Core_Bound"].parent = o["Backend_Bound"] o["Divider"].parent = o["Core_Bound"] o["Serializing_Operation"].parent = o["Core_Bound"] o["Slow_Pause"].parent = o["Serializing_Operation"] o["Ports_Utilization"].parent = o["Core_Bound"] o["Ports_Utilized_0"].parent = o["Ports_Utilization"] o["Mixing_Vectors"].parent = o["Ports_Utilized_0"] o["Ports_Utilized_1"].parent = o["Ports_Utilization"] o["Ports_Utilized_2"].parent = o["Ports_Utilization"] o["Ports_Utilized_3m"].parent = o["Ports_Utilization"] o["ALU_Op_Utilization"].parent = o["Ports_Utilized_3m"] o["Port_0"].parent = o["ALU_Op_Utilization"] o["Port_1"].parent = o["ALU_Op_Utilization"] o["Port_5"].parent = o["ALU_Op_Utilization"] o["Port_6"].parent = o["ALU_Op_Utilization"] o["Load_Op_Utilization"].parent = o["Ports_Utilized_3m"] o["Port_2"].parent = o["Load_Op_Utilization"] o["Port_3"].parent = o["Load_Op_Utilization"] o["Store_Op_Utilization"].parent = o["Ports_Utilized_3m"] o["Port_4"].parent = o["Store_Op_Utilization"] o["Port_7"].parent = o["Store_Op_Utilization"] o["Light_Operations"].parent = o["Retiring"] o["FP_Arith"].parent = o["Light_Operations"] o["X87_Use"].parent = o["FP_Arith"] o["FP_Scalar"].parent = o["FP_Arith"] o["FP_Vector"].parent = o["FP_Arith"] o["FP_Vector_128b"].parent = o["FP_Vector"] o["FP_Vector_256b"].parent = o["FP_Vector"] o["Memory_Operations"].parent = o["Light_Operations"] o["Fused_Instructions"].parent = o["Light_Operations"] o["Non_Fused_Branches"].parent = o["Light_Operations"] o["Other_Light_Ops"].parent = o["Light_Operations"] o["Nop_Instructions"].parent = o["Other_Light_Ops"] o["Heavy_Operations"].parent = o["Retiring"] o["Few_Uops_Instructions"].parent = o["Heavy_Operations"] o["Microcode_Sequencer"].parent = o["Heavy_Operations"] o["Assists"].parent = o["Microcode_Sequencer"] o["FP_Assists"].parent = o["Assists"] o["CISC"].parent = o["Microcode_Sequencer"] # user visible metrics n = Metric_Mispredictions() ; r.metric(n) ; o["Mispredictions"] = n n = Metric_Big_Code() ; r.metric(n) ; o["Big_Code"] = n n = Metric_Instruction_Fetch_BW() ; r.metric(n) ; o["Instruction_Fetch_BW"] = n n = Metric_Cache_Memory_Bandwidth() ; r.metric(n) ; o["Cache_Memory_Bandwidth"] = n n = Metric_Cache_Memory_Latency() ; r.metric(n) ; o["Cache_Memory_Latency"] = n n = Metric_Memory_Data_TLBs() ; r.metric(n) ; o["Memory_Data_TLBs"] = n n = Metric_Memory_Synchronization() ; r.metric(n) ; o["Memory_Synchronization"] = n n = Metric_Compute_Bound_Est() ; r.metric(n) ; o["Compute_Bound_Est"] = n n = Metric_Irregular_Overhead() ; r.metric(n) ; o["Irregular_Overhead"] = n n = Metric_Other_Bottlenecks() ; r.metric(n) ; o["Other_Bottlenecks"] = n n = Metric_Branching_Overhead() ; r.metric(n) ; o["Branching_Overhead"] = n n = Metric_Useful_Work() ; r.metric(n) ; o["Useful_Work"] = n n = Metric_Core_Bound_Likely() ; r.metric(n) ; o["Core_Bound_Likely"] = n n = Metric_IPC() ; r.metric(n) ; o["IPC"] = n n = Metric_UopPI() ; r.metric(n) ; o["UopPI"] = n n = Metric_UpTB() ; r.metric(n) ; o["UpTB"] = n n = Metric_CPI() ; r.metric(n) ; o["CPI"] = n n = Metric_CLKS() ; r.metric(n) ; o["CLKS"] = n n = Metric_SLOTS() ; r.metric(n) ; o["SLOTS"] = n n = Metric_Execute_per_Issue() ; r.metric(n) ; o["Execute_per_Issue"] = n n = Metric_CoreIPC() ; r.metric(n) ; o["CoreIPC"] = n n = Metric_FLOPc() ; r.metric(n) ; o["FLOPc"] = n n = Metric_FP_Arith_Utilization() ; r.metric(n) ; o["FP_Arith_Utilization"] = n n = Metric_ILP() ; r.metric(n) ; o["ILP"] = n n = Metric_EPC() ; r.metric(n) ; o["EPC"] = n n = Metric_CORE_CLKS() ; r.metric(n) ; o["CORE_CLKS"] = n n = Metric_IpLoad() ; r.metric(n) ; o["IpLoad"] = n n = Metric_IpStore() ; r.metric(n) ; o["IpStore"] = n n = Metric_IpBranch() ; r.metric(n) ; o["IpBranch"] = n n = Metric_IpCall() ; r.metric(n) ; o["IpCall"] = n n = Metric_IpTB() ; r.metric(n) ; o["IpTB"] = n n = Metric_BpTkBranch() ; r.metric(n) ; o["BpTkBranch"] = n n = Metric_IpFLOP() ; r.metric(n) ; o["IpFLOP"] = n n = Metric_IpArith() ; r.metric(n) ; o["IpArith"] = n n = Metric_IpArith_Scalar_SP() ; r.metric(n) ; o["IpArith_Scalar_SP"] = n n = Metric_IpArith_Scalar_DP() ; r.metric(n) ; o["IpArith_Scalar_DP"] = n n = Metric_IpArith_AVX128() ; r.metric(n) ; o["IpArith_AVX128"] = n n = Metric_IpArith_AVX256() ; r.metric(n) ; o["IpArith_AVX256"] = n n = Metric_IpPause() ; r.metric(n) ; o["IpPause"] = n n = Metric_IpSWPF() ; r.metric(n) ; o["IpSWPF"] = n n = Metric_Instructions() ; r.metric(n) ; o["Instructions"] = n n = Metric_Retire() ; r.metric(n) ; o["Retire"] = n n = Metric_IpAssist() ; r.metric(n) ; o["IpAssist"] = n n = Metric_Execute() ; r.metric(n) ; o["Execute"] = n n = Metric_Fetch_LSD() ; r.metric(n) ; o["Fetch_LSD"] = n n = Metric_Fetch_DSB() ; r.metric(n) ; o["Fetch_DSB"] = n n = Metric_Fetch_MITE() ; r.metric(n) ; o["Fetch_MITE"] = n n = Metric_Fetch_UpC() ; r.metric(n) ; o["Fetch_UpC"] = n n = Metric_LSD_Coverage() ; r.metric(n) ; o["LSD_Coverage"] = n n = Metric_DSB_Coverage() ; r.metric(n) ; o["DSB_Coverage"] = n n = Metric_DSB_Switch_Cost() ; r.metric(n) ; o["DSB_Switch_Cost"] = n n = Metric_DSB_Misses() ; r.metric(n) ; o["DSB_Misses"] = n n = Metric_DSB_Bandwidth() ; r.metric(n) ; o["DSB_Bandwidth"] = n n = Metric_ICache_Miss_Latency() ; r.metric(n) ; o["ICache_Miss_Latency"] = n n = Metric_IC_Misses() ; r.metric(n) ; o["IC_Misses"] = n n = Metric_IpDSB_Miss_Ret() ; r.metric(n) ; o["IpDSB_Miss_Ret"] = n n = Metric_IpUnknown_Branch() ; r.metric(n) ; o["IpUnknown_Branch"] = n n = Metric_L2MPKI_Code() ; r.metric(n) ; o["L2MPKI_Code"] = n n = Metric_L2MPKI_Code_All() ; r.metric(n) ; o["L2MPKI_Code_All"] = n n = Metric_IpMispredict() ; r.metric(n) ; o["IpMispredict"] = n n = Metric_IpMisp_Indirect() ; r.metric(n) ; o["IpMisp_Indirect"] = n n = Metric_Branch_Misprediction_Cost() ; r.metric(n) ; o["Branch_Misprediction_Cost"] = n n = Metric_Spec_Clears_Ratio() ; r.metric(n) ; o["Spec_Clears_Ratio"] = n n = Metric_Cond_NT() ; r.metric(n) ; o["Cond_NT"] = n n = Metric_Cond_TK() ; r.metric(n) ; o["Cond_TK"] = n n = Metric_CallRet() ; r.metric(n) ; o["CallRet"] = n n = Metric_Jump() ; r.metric(n) ; o["Jump"] = n n = Metric_Load_Miss_Real_Latency() ; r.metric(n) ; o["Load_Miss_Real_Latency"] = n n = Metric_MLP() ; r.metric(n) ; o["MLP"] = n n = Metric_L1MPKI() ; r.metric(n) ; o["L1MPKI"] = n n = Metric_L1MPKI_Load() ; r.metric(n) ; o["L1MPKI_Load"] = n n = Metric_L2MPKI() ; r.metric(n) ; o["L2MPKI"] = n n = Metric_L2MPKI_All() ; r.metric(n) ; o["L2MPKI_All"] = n n = Metric_L2MPKI_Load() ; r.metric(n) ; o["L2MPKI_Load"] = n n = Metric_L2MPKI_RFO() ; r.metric(n) ; o["L2MPKI_RFO"] = n n = Metric_L2HPKI_All() ; r.metric(n) ; o["L2HPKI_All"] = n n = Metric_L2HPKI_Load() ; r.metric(n) ; o["L2HPKI_Load"] = n n = Metric_L3MPKI() ; r.metric(n) ; o["L3MPKI"] = n n = Metric_FB_HPKI() ; r.metric(n) ; o["FB_HPKI"] = n n = Metric_L1D_Cache_Fill_BW() ; r.metric(n) ; o["L1D_Cache_Fill_BW"] = n n = Metric_L2_Cache_Fill_BW() ; r.metric(n) ; o["L2_Cache_Fill_BW"] = n n = Metric_L3_Cache_Fill_BW() ; r.metric(n) ; o["L3_Cache_Fill_BW"] = n n = Metric_L3_Cache_Access_BW() ; r.metric(n) ; o["L3_Cache_Access_BW"] = n n = Metric_Page_Walks_Utilization() ; r.metric(n) ; o["Page_Walks_Utilization"] = n n = Metric_Code_STLB_MPKI() ; r.metric(n) ; o["Code_STLB_MPKI"] = n n = Metric_Load_STLB_MPKI() ; r.metric(n) ; o["Load_STLB_MPKI"] = n n = Metric_Store_STLB_MPKI() ; r.metric(n) ; o["Store_STLB_MPKI"] = n n = Metric_L1D_Cache_Fill_BW_2T() ; r.metric(n) ; o["L1D_Cache_Fill_BW_2T"] = n n = Metric_L2_Cache_Fill_BW_2T() ; r.metric(n) ; o["L2_Cache_Fill_BW_2T"] = n n = Metric_L3_Cache_Fill_BW_2T() ; r.metric(n) ; o["L3_Cache_Fill_BW_2T"] = n n = Metric_L3_Cache_Access_BW_2T() ; r.metric(n) ; o["L3_Cache_Access_BW_2T"] = n n = Metric_Load_L2_Miss_Latency() ; r.metric(n) ; o["Load_L2_Miss_Latency"] = n n = Metric_Load_L2_MLP() ; r.metric(n) ; o["Load_L2_MLP"] = n n = Metric_Data_L2_MLP() ; r.metric(n) ; o["Data_L2_MLP"] = n n = Metric_UC_Load_PKI() ; r.metric(n) ; o["UC_Load_PKI"] = n n = Metric_CPU_Utilization() ; r.metric(n) ; o["CPU_Utilization"] = n n = Metric_CPUs_Utilized() ; r.metric(n) ; o["CPUs_Utilized"] = n n = Metric_Core_Frequency() ; r.metric(n) ; o["Core_Frequency"] = n n = Metric_GFLOPs() ; r.metric(n) ; o["GFLOPs"] = n n = Metric_Turbo_Utilization() ; r.metric(n) ; o["Turbo_Utilization"] = n n = Metric_SMT_2T_Utilization() ; r.metric(n) ; o["SMT_2T_Utilization"] = n n = Metric_Kernel_Utilization() ; r.metric(n) ; o["Kernel_Utilization"] = n n = Metric_Kernel_CPI() ; r.metric(n) ; o["Kernel_CPI"] = n n = Metric_DRAM_BW_Use() ; r.metric(n) ; o["DRAM_BW_Use"] = n n = Metric_MEM_Read_Latency() ; r.metric(n) ; o["MEM_Read_Latency"] = n n = Metric_MEM_Parallel_Reads() ; r.metric(n) ; o["MEM_Parallel_Reads"] = n n = Metric_Power() ; r.metric(n) ; o["Power"] = n n = Metric_Time() ; r.metric(n) ; o["Time"] = n n = Metric_Socket_CLKS() ; r.metric(n) ; o["Socket_CLKS"] = n n = Metric_IpFarBranch() ; r.metric(n) ; o["IpFarBranch"] = n # references between groups o["Branch_Resteers"].Unknown_Branches = o["Unknown_Branches"] o["Fetch_Bandwidth"].Frontend_Bound = o["Frontend_Bound"] o["Fetch_Bandwidth"].Fetch_Latency = o["Fetch_Latency"] o["Branch_Mispredicts"].Bad_Speculation = o["Bad_Speculation"] o["Other_Mispredicts"].Bad_Speculation = o["Bad_Speculation"] o["Other_Mispredicts"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Machine_Clears"].Bad_Speculation = o["Bad_Speculation"] o["Machine_Clears"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Other_Nukes"].Machine_Clears = o["Machine_Clears"] o["Other_Nukes"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Other_Nukes"].Bad_Speculation = o["Bad_Speculation"] o["Backend_Bound"].Frontend_Bound = o["Frontend_Bound"] o["Memory_Bound"].Retiring = o["Retiring"] o["Memory_Bound"].Frontend_Bound = o["Frontend_Bound"] o["Memory_Bound"].Backend_Bound = o["Backend_Bound"] o["Load_STLB_Hit"].Load_STLB_Miss = o["Load_STLB_Miss"] o["Load_STLB_Hit"].DTLB_Load = o["DTLB_Load"] o["DRAM_Bound"].L2_Bound = o["L2_Bound"] o["MEM_Latency"].MEM_Bandwidth = o["MEM_Bandwidth"] o["Store_STLB_Hit"].DTLB_Store = o["DTLB_Store"] o["Store_STLB_Hit"].Store_STLB_Miss = o["Store_STLB_Miss"] o["Core_Bound"].Memory_Bound = o["Memory_Bound"] o["Core_Bound"].Backend_Bound = o["Backend_Bound"] o["Core_Bound"].Retiring = o["Retiring"] o["Core_Bound"].Frontend_Bound = o["Frontend_Bound"] o["Ports_Utilization"].Ports_Utilized_0 = o["Ports_Utilized_0"] o["Ports_Utilization"].Retiring = o["Retiring"] o["Retiring"].Heavy_Operations = o["Heavy_Operations"] o["Light_Operations"].Retiring = o["Retiring"] o["Light_Operations"].Heavy_Operations = o["Heavy_Operations"] o["FP_Arith"].Retiring = o["Retiring"] o["FP_Arith"].FP_Scalar = o["FP_Scalar"] o["FP_Arith"].X87_Use = o["X87_Use"] o["FP_Arith"].FP_Vector = o["FP_Vector"] o["X87_Use"].Retiring = o["Retiring"] o["Memory_Operations"].Retiring = o["Retiring"] o["Memory_Operations"].Light_Operations = o["Light_Operations"] o["Memory_Operations"].Heavy_Operations = o["Heavy_Operations"] o["Fused_Instructions"].Retiring = o["Retiring"] o["Fused_Instructions"].Light_Operations = o["Light_Operations"] o["Fused_Instructions"].Heavy_Operations = o["Heavy_Operations"] o["Non_Fused_Branches"].Retiring = o["Retiring"] o["Non_Fused_Branches"].Light_Operations = o["Light_Operations"] o["Non_Fused_Branches"].Heavy_Operations = o["Heavy_Operations"] o["Other_Light_Ops"].Light_Operations = o["Light_Operations"] o["Other_Light_Ops"].Retiring = o["Retiring"] o["Other_Light_Ops"].Heavy_Operations = o["Heavy_Operations"] o["Other_Light_Ops"].Fused_Instructions = o["Fused_Instructions"] o["Other_Light_Ops"].Non_Fused_Branches = o["Non_Fused_Branches"] o["Other_Light_Ops"].FP_Vector = o["FP_Vector"] o["Other_Light_Ops"].FP_Scalar = o["FP_Scalar"] o["Other_Light_Ops"].FP_Arith = o["FP_Arith"] o["Other_Light_Ops"].X87_Use = o["X87_Use"] o["Other_Light_Ops"].Memory_Operations = o["Memory_Operations"] o["Nop_Instructions"].Retiring = o["Retiring"] o["Nop_Instructions"].Light_Operations = o["Light_Operations"] o["Nop_Instructions"].Heavy_Operations = o["Heavy_Operations"] o["Few_Uops_Instructions"].Heavy_Operations = o["Heavy_Operations"] o["Few_Uops_Instructions"].Microcode_Sequencer = o["Microcode_Sequencer"] o["CISC"].Microcode_Sequencer = o["Microcode_Sequencer"] o["CISC"].Assists = o["Assists"] o["Mispredictions"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Mispredictions"].LCP = o["LCP"] o["Mispredictions"].Other_Mispredicts = o["Other_Mispredicts"] o["Mispredictions"].Microcode_Sequencer = o["Microcode_Sequencer"] o["Mispredictions"].DSB_Switches = o["DSB_Switches"] o["Mispredictions"].Branch_Resteers = o["Branch_Resteers"] o["Mispredictions"].ICache_Misses = o["ICache_Misses"] o["Mispredictions"].MS_Switches = o["MS_Switches"] o["Mispredictions"].Bad_Speculation = o["Bad_Speculation"] o["Mispredictions"].ITLB_Misses = o["ITLB_Misses"] o["Mispredictions"].Unknown_Branches = o["Unknown_Branches"] o["Mispredictions"].Fetch_Latency = o["Fetch_Latency"] o["Mispredictions"].Mispredicts_Resteers = o["Mispredicts_Resteers"] o["Big_Code"].LCP = o["LCP"] o["Big_Code"].ICache_Misses = o["ICache_Misses"] o["Big_Code"].DSB_Switches = o["DSB_Switches"] o["Big_Code"].Branch_Resteers = o["Branch_Resteers"] o["Big_Code"].MS_Switches = o["MS_Switches"] o["Big_Code"].ITLB_Misses = o["ITLB_Misses"] o["Big_Code"].Unknown_Branches = o["Unknown_Branches"] o["Big_Code"].Fetch_Latency = o["Fetch_Latency"] o["Instruction_Fetch_BW"].Heavy_Operations = o["Heavy_Operations"] o["Instruction_Fetch_BW"].ICache_Misses = o["ICache_Misses"] o["Instruction_Fetch_BW"].Microcode_Sequencer = o["Microcode_Sequencer"] o["Instruction_Fetch_BW"].Frontend_Bound = o["Frontend_Bound"] o["Instruction_Fetch_BW"].Bad_Speculation = o["Bad_Speculation"] o["Instruction_Fetch_BW"].ITLB_Misses = o["ITLB_Misses"] o["Instruction_Fetch_BW"].Mispredicts_Resteers = o["Mispredicts_Resteers"] o["Instruction_Fetch_BW"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Instruction_Fetch_BW"].LCP = o["LCP"] o["Instruction_Fetch_BW"].Other_Mispredicts = o["Other_Mispredicts"] o["Instruction_Fetch_BW"].Few_Uops_Instructions = o["Few_Uops_Instructions"] o["Instruction_Fetch_BW"].DSB_Switches = o["DSB_Switches"] o["Instruction_Fetch_BW"].Assists = o["Assists"] o["Instruction_Fetch_BW"].Branch_Resteers = o["Branch_Resteers"] o["Instruction_Fetch_BW"].Clears_Resteers = o["Clears_Resteers"] o["Instruction_Fetch_BW"].MS_Switches = o["MS_Switches"] o["Instruction_Fetch_BW"].Unknown_Branches = o["Unknown_Branches"] o["Instruction_Fetch_BW"].Fetch_Latency = o["Fetch_Latency"] o["Cache_Memory_Bandwidth"].L1_Bound = o["L1_Bound"] o["Cache_Memory_Bandwidth"].DTLB_Load = o["DTLB_Load"] o["Cache_Memory_Bandwidth"].G4K_Aliasing = o["G4K_Aliasing"] o["Cache_Memory_Bandwidth"].Retiring = o["Retiring"] o["Cache_Memory_Bandwidth"].Frontend_Bound = o["Frontend_Bound"] o["Cache_Memory_Bandwidth"].Data_Sharing = o["Data_Sharing"] o["Cache_Memory_Bandwidth"].L2_Bound = o["L2_Bound"] o["Cache_Memory_Bandwidth"].Memory_Bound = o["Memory_Bound"] o["Cache_Memory_Bandwidth"].MEM_Latency = o["MEM_Latency"] o["Cache_Memory_Bandwidth"].Store_Bound = o["Store_Bound"] o["Cache_Memory_Bandwidth"].Split_Loads = o["Split_Loads"] o["Cache_Memory_Bandwidth"].L3_Bound = o["L3_Bound"] o["Cache_Memory_Bandwidth"].FB_Full = o["FB_Full"] o["Cache_Memory_Bandwidth"].Contested_Accesses = o["Contested_Accesses"] o["Cache_Memory_Bandwidth"].Store_Fwd_Blk = o["Store_Fwd_Blk"] o["Cache_Memory_Bandwidth"].L1_Hit_Latency = o["L1_Hit_Latency"] o["Cache_Memory_Bandwidth"].MEM_Bandwidth = o["MEM_Bandwidth"] o["Cache_Memory_Bandwidth"].Lock_Latency = o["Lock_Latency"] o["Cache_Memory_Bandwidth"].SQ_Full = o["SQ_Full"] o["Cache_Memory_Bandwidth"].Backend_Bound = o["Backend_Bound"] o["Cache_Memory_Bandwidth"].L3_Hit_Latency = o["L3_Hit_Latency"] o["Cache_Memory_Bandwidth"].DRAM_Bound = o["DRAM_Bound"] o["Cache_Memory_Latency"].L1_Bound = o["L1_Bound"] o["Cache_Memory_Latency"].DTLB_Load = o["DTLB_Load"] o["Cache_Memory_Latency"].False_Sharing = o["False_Sharing"] o["Cache_Memory_Latency"].L1_Hit_Latency = o["L1_Hit_Latency"] o["Cache_Memory_Latency"].Retiring = o["Retiring"] o["Cache_Memory_Latency"].Frontend_Bound = o["Frontend_Bound"] o["Cache_Memory_Latency"].Data_Sharing = o["Data_Sharing"] o["Cache_Memory_Latency"].L2_Bound = o["L2_Bound"] o["Cache_Memory_Latency"].Memory_Bound = o["Memory_Bound"] o["Cache_Memory_Latency"].SQ_Full = o["SQ_Full"] o["Cache_Memory_Latency"].Store_Bound = o["Store_Bound"] o["Cache_Memory_Latency"].Split_Loads = o["Split_Loads"] o["Cache_Memory_Latency"].L3_Bound = o["L3_Bound"] o["Cache_Memory_Latency"].FB_Full = o["FB_Full"] o["Cache_Memory_Latency"].Contested_Accesses = o["Contested_Accesses"] o["Cache_Memory_Latency"].Store_Fwd_Blk = o["Store_Fwd_Blk"] o["Cache_Memory_Latency"].DTLB_Store = o["DTLB_Store"] o["Cache_Memory_Latency"].MEM_Bandwidth = o["MEM_Bandwidth"] o["Cache_Memory_Latency"].Store_Latency = o["Store_Latency"] o["Cache_Memory_Latency"].Split_Stores = o["Split_Stores"] o["Cache_Memory_Latency"].G4K_Aliasing = o["G4K_Aliasing"] o["Cache_Memory_Latency"].Lock_Latency = o["Lock_Latency"] o["Cache_Memory_Latency"].MEM_Latency = o["MEM_Latency"] o["Cache_Memory_Latency"].Backend_Bound = o["Backend_Bound"] o["Cache_Memory_Latency"].L3_Hit_Latency = o["L3_Hit_Latency"] o["Cache_Memory_Latency"].DRAM_Bound = o["DRAM_Bound"] o["Memory_Data_TLBs"].L1_Bound = o["L1_Bound"] o["Memory_Data_TLBs"].Store_Fwd_Blk = o["Store_Fwd_Blk"] o["Memory_Data_TLBs"].L1_Hit_Latency = o["L1_Hit_Latency"] o["Memory_Data_TLBs"].DTLB_Load = o["DTLB_Load"] o["Memory_Data_TLBs"].Store_Latency = o["Store_Latency"] o["Memory_Data_TLBs"].G4K_Aliasing = o["G4K_Aliasing"] o["Memory_Data_TLBs"].Retiring = o["Retiring"] o["Memory_Data_TLBs"].Split_Stores = o["Split_Stores"] o["Memory_Data_TLBs"].False_Sharing = o["False_Sharing"] o["Memory_Data_TLBs"].Frontend_Bound = o["Frontend_Bound"] o["Memory_Data_TLBs"].DTLB_Store = o["DTLB_Store"] o["Memory_Data_TLBs"].L2_Bound = o["L2_Bound"] o["Memory_Data_TLBs"].Memory_Bound = o["Memory_Bound"] o["Memory_Data_TLBs"].Lock_Latency = o["Lock_Latency"] o["Memory_Data_TLBs"].Backend_Bound = o["Backend_Bound"] o["Memory_Data_TLBs"].Store_Bound = o["Store_Bound"] o["Memory_Data_TLBs"].Split_Loads = o["Split_Loads"] o["Memory_Data_TLBs"].L3_Bound = o["L3_Bound"] o["Memory_Data_TLBs"].FB_Full = o["FB_Full"] o["Memory_Data_TLBs"].DRAM_Bound = o["DRAM_Bound"] o["Memory_Synchronization"].L1_Bound = o["L1_Bound"] o["Memory_Synchronization"].False_Sharing = o["False_Sharing"] o["Memory_Synchronization"].Retiring = o["Retiring"] o["Memory_Synchronization"].Frontend_Bound = o["Frontend_Bound"] o["Memory_Synchronization"].Data_Sharing = o["Data_Sharing"] o["Memory_Synchronization"].Contested_Accesses = o["Contested_Accesses"] o["Memory_Synchronization"].L2_Bound = o["L2_Bound"] o["Memory_Synchronization"].Memory_Bound = o["Memory_Bound"] o["Memory_Synchronization"].SQ_Full = o["SQ_Full"] o["Memory_Synchronization"].Store_Bound = o["Store_Bound"] o["Memory_Synchronization"].Bad_Speculation = o["Bad_Speculation"] o["Memory_Synchronization"].L3_Bound = o["L3_Bound"] o["Memory_Synchronization"].DTLB_Store = o["DTLB_Store"] o["Memory_Synchronization"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Memory_Synchronization"].Store_Latency = o["Store_Latency"] o["Memory_Synchronization"].Split_Stores = o["Split_Stores"] o["Memory_Synchronization"].Machine_Clears = o["Machine_Clears"] o["Memory_Synchronization"].Backend_Bound = o["Backend_Bound"] o["Memory_Synchronization"].L3_Hit_Latency = o["L3_Hit_Latency"] o["Memory_Synchronization"].Other_Nukes = o["Other_Nukes"] o["Memory_Synchronization"].DRAM_Bound = o["DRAM_Bound"] o["Compute_Bound_Est"].Serializing_Operation = o["Serializing_Operation"] o["Compute_Bound_Est"].Ports_Utilization = o["Ports_Utilization"] o["Compute_Bound_Est"].Retiring = o["Retiring"] o["Compute_Bound_Est"].Frontend_Bound = o["Frontend_Bound"] o["Compute_Bound_Est"].Ports_Utilized_2 = o["Ports_Utilized_2"] o["Compute_Bound_Est"].Memory_Bound = o["Memory_Bound"] o["Compute_Bound_Est"].Ports_Utilized_1 = o["Ports_Utilized_1"] o["Compute_Bound_Est"].Core_Bound = o["Core_Bound"] o["Compute_Bound_Est"].Backend_Bound = o["Backend_Bound"] o["Compute_Bound_Est"].Ports_Utilized_3m = o["Ports_Utilized_3m"] o["Compute_Bound_Est"].Divider = o["Divider"] o["Compute_Bound_Est"].Ports_Utilized_0 = o["Ports_Utilized_0"] o["Irregular_Overhead"].Heavy_Operations = o["Heavy_Operations"] o["Irregular_Overhead"].Ports_Utilization = o["Ports_Utilization"] o["Irregular_Overhead"].Retiring = o["Retiring"] o["Irregular_Overhead"].ICache_Misses = o["ICache_Misses"] o["Irregular_Overhead"].Microcode_Sequencer = o["Microcode_Sequencer"] o["Irregular_Overhead"].Frontend_Bound = o["Frontend_Bound"] o["Irregular_Overhead"].Serializing_Operation = o["Serializing_Operation"] o["Irregular_Overhead"].Core_Bound = o["Core_Bound"] o["Irregular_Overhead"].Bad_Speculation = o["Bad_Speculation"] o["Irregular_Overhead"].ITLB_Misses = o["ITLB_Misses"] o["Irregular_Overhead"].Divider = o["Divider"] o["Irregular_Overhead"].Mispredicts_Resteers = o["Mispredicts_Resteers"] o["Irregular_Overhead"].Memory_Bound = o["Memory_Bound"] o["Irregular_Overhead"].Machine_Clears = o["Machine_Clears"] o["Irregular_Overhead"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Irregular_Overhead"].LCP = o["LCP"] o["Irregular_Overhead"].Other_Mispredicts = o["Other_Mispredicts"] o["Irregular_Overhead"].Few_Uops_Instructions = o["Few_Uops_Instructions"] o["Irregular_Overhead"].DSB_Switches = o["DSB_Switches"] o["Irregular_Overhead"].Ports_Utilized_0 = o["Ports_Utilized_0"] o["Irregular_Overhead"].Assists = o["Assists"] o["Irregular_Overhead"].Backend_Bound = o["Backend_Bound"] o["Irregular_Overhead"].Branch_Resteers = o["Branch_Resteers"] o["Irregular_Overhead"].Clears_Resteers = o["Clears_Resteers"] o["Irregular_Overhead"].MS_Switches = o["MS_Switches"] o["Irregular_Overhead"].Other_Nukes = o["Other_Nukes"] o["Irregular_Overhead"].Unknown_Branches = o["Unknown_Branches"] o["Irregular_Overhead"].Fetch_Latency = o["Fetch_Latency"] o["Other_Bottlenecks"].Retiring = o["Retiring"] o["Other_Bottlenecks"].Data_Sharing = o["Data_Sharing"] o["Other_Bottlenecks"].L2_Bound = o["L2_Bound"] o["Other_Bottlenecks"].Contested_Accesses = o["Contested_Accesses"] o["Other_Bottlenecks"].L3_Bound = o["L3_Bound"] o["Other_Bottlenecks"].Machine_Clears = o["Machine_Clears"] o["Other_Bottlenecks"].MEM_Bandwidth = o["MEM_Bandwidth"] o["Other_Bottlenecks"].Store_Latency = o["Store_Latency"] o["Other_Bottlenecks"].Other_Mispredicts = o["Other_Mispredicts"] o["Other_Bottlenecks"].DSB_Switches = o["DSB_Switches"] o["Other_Bottlenecks"].Ports_Utilized_0 = o["Ports_Utilized_0"] o["Other_Bottlenecks"].Ports_Utilized_1 = o["Ports_Utilized_1"] o["Other_Bottlenecks"].Ports_Utilized_2 = o["Ports_Utilized_2"] o["Other_Bottlenecks"].DTLB_Load = o["DTLB_Load"] o["Other_Bottlenecks"].ICache_Misses = o["ICache_Misses"] o["Other_Bottlenecks"].Memory_Bound = o["Memory_Bound"] o["Other_Bottlenecks"].SQ_Full = o["SQ_Full"] o["Other_Bottlenecks"].Store_Bound = o["Store_Bound"] o["Other_Bottlenecks"].Bad_Speculation = o["Bad_Speculation"] o["Other_Bottlenecks"].FB_Full = o["FB_Full"] o["Other_Bottlenecks"].Mispredicts_Resteers = o["Mispredicts_Resteers"] o["Other_Bottlenecks"].Store_Fwd_Blk = o["Store_Fwd_Blk"] o["Other_Bottlenecks"].Split_Stores = o["Split_Stores"] o["Other_Bottlenecks"].Few_Uops_Instructions = o["Few_Uops_Instructions"] o["Other_Bottlenecks"].Other_Nukes = o["Other_Nukes"] o["Other_Bottlenecks"].Unknown_Branches = o["Unknown_Branches"] o["Other_Bottlenecks"].DRAM_Bound = o["DRAM_Bound"] o["Other_Bottlenecks"].L1_Bound = o["L1_Bound"] o["Other_Bottlenecks"].G4K_Aliasing = o["G4K_Aliasing"] o["Other_Bottlenecks"].Core_Bound = o["Core_Bound"] o["Other_Bottlenecks"].Divider = o["Divider"] o["Other_Bottlenecks"].L1_Hit_Latency = o["L1_Hit_Latency"] o["Other_Bottlenecks"].Assists = o["Assists"] o["Other_Bottlenecks"].Backend_Bound = o["Backend_Bound"] o["Other_Bottlenecks"].Branch_Resteers = o["Branch_Resteers"] o["Other_Bottlenecks"].L3_Hit_Latency = o["L3_Hit_Latency"] o["Other_Bottlenecks"].Microcode_Sequencer = o["Microcode_Sequencer"] o["Other_Bottlenecks"].Fetch_Latency = o["Fetch_Latency"] o["Other_Bottlenecks"].Ports_Utilization = o["Ports_Utilization"] o["Other_Bottlenecks"].False_Sharing = o["False_Sharing"] o["Other_Bottlenecks"].Heavy_Operations = o["Heavy_Operations"] o["Other_Bottlenecks"].Frontend_Bound = o["Frontend_Bound"] o["Other_Bottlenecks"].Serializing_Operation = o["Serializing_Operation"] o["Other_Bottlenecks"].MEM_Latency = o["MEM_Latency"] o["Other_Bottlenecks"].Split_Loads = o["Split_Loads"] o["Other_Bottlenecks"].ITLB_Misses = o["ITLB_Misses"] o["Other_Bottlenecks"].DTLB_Store = o["DTLB_Store"] o["Other_Bottlenecks"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Other_Bottlenecks"].LCP = o["LCP"] o["Other_Bottlenecks"].Lock_Latency = o["Lock_Latency"] o["Other_Bottlenecks"].Clears_Resteers = o["Clears_Resteers"] o["Other_Bottlenecks"].MS_Switches = o["MS_Switches"] o["Other_Bottlenecks"].Ports_Utilized_3m = o["Ports_Utilized_3m"] o["Useful_Work"].Retiring = o["Retiring"] o["Useful_Work"].Heavy_Operations = o["Heavy_Operations"] o["Useful_Work"].Microcode_Sequencer = o["Microcode_Sequencer"] o["Useful_Work"].Few_Uops_Instructions = o["Few_Uops_Instructions"] o["Useful_Work"].Assists = o["Assists"] o["Core_Bound_Likely"].Ports_Utilized_0 = o["Ports_Utilized_0"] o["Core_Bound_Likely"].Ports_Utilization = o["Ports_Utilization"] o["Core_Bound_Likely"].Retiring = o["Retiring"] o["Core_Bound_Likely"].Frontend_Bound = o["Frontend_Bound"] o["Core_Bound_Likely"].Memory_Bound = o["Memory_Bound"] o["Core_Bound_Likely"].Core_Bound = o["Core_Bound"] o["Core_Bound_Likely"].Backend_Bound = o["Backend_Bound"] o["DSB_Misses"].LSD = o["LSD"] o["DSB_Misses"].MITE = o["MITE"] o["DSB_Misses"].LCP = o["LCP"] o["DSB_Misses"].Fetch_Bandwidth = o["Fetch_Bandwidth"] o["DSB_Misses"].Frontend_Bound = o["Frontend_Bound"] o["DSB_Misses"].DSB_Switches = o["DSB_Switches"] o["DSB_Misses"].Branch_Resteers = o["Branch_Resteers"] o["DSB_Misses"].ICache_Misses = o["ICache_Misses"] o["DSB_Misses"].MS_Switches = o["MS_Switches"] o["DSB_Misses"].ITLB_Misses = o["ITLB_Misses"] o["DSB_Misses"].DSB = o["DSB"] o["DSB_Misses"].Unknown_Branches = o["Unknown_Branches"] o["DSB_Misses"].Fetch_Latency = o["Fetch_Latency"] o["DSB_Bandwidth"].LSD = o["LSD"] o["DSB_Bandwidth"].Fetch_Bandwidth = o["Fetch_Bandwidth"] o["DSB_Bandwidth"].Frontend_Bound = o["Frontend_Bound"] o["DSB_Bandwidth"].MITE = o["MITE"] o["DSB_Bandwidth"].DSB = o["DSB"] o["DSB_Bandwidth"].Fetch_Latency = o["Fetch_Latency"] o["IC_Misses"].Fetch_Latency = o["Fetch_Latency"] o["IC_Misses"].LCP = o["LCP"] o["IC_Misses"].MS_Switches = o["MS_Switches"] o["IC_Misses"].ICache_Misses = o["ICache_Misses"] o["IC_Misses"].ITLB_Misses = o["ITLB_Misses"] o["IC_Misses"].Unknown_Branches = o["Unknown_Branches"] o["IC_Misses"].DSB_Switches = o["DSB_Switches"] o["IC_Misses"].Branch_Resteers = o["Branch_Resteers"] o["Branch_Misprediction_Cost"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Branch_Misprediction_Cost"].LCP = o["LCP"] o["Branch_Misprediction_Cost"].Other_Mispredicts = o["Other_Mispredicts"] o["Branch_Misprediction_Cost"].Microcode_Sequencer = o["Microcode_Sequencer"] o["Branch_Misprediction_Cost"].DSB_Switches = o["DSB_Switches"] o["Branch_Misprediction_Cost"].Branch_Resteers = o["Branch_Resteers"] o["Branch_Misprediction_Cost"].ICache_Misses = o["ICache_Misses"] o["Branch_Misprediction_Cost"].MS_Switches = o["MS_Switches"] o["Branch_Misprediction_Cost"].Bad_Speculation = o["Bad_Speculation"] o["Branch_Misprediction_Cost"].ITLB_Misses = o["ITLB_Misses"] o["Branch_Misprediction_Cost"].Unknown_Branches = o["Unknown_Branches"] o["Branch_Misprediction_Cost"].Fetch_Latency = o["Fetch_Latency"] o["Branch_Misprediction_Cost"].Mispredicts_Resteers = o["Mispredicts_Resteers"] # siblings cross-tree o["Mispredicts_Resteers"].sibling = (o["Branch_Mispredicts"],) o["Clears_Resteers"].sibling = (o["MS_Switches"], o["Machine_Clears"], o["L1_Bound"], o["Microcode_Sequencer"],) o["MS_Switches"].sibling = (o["Clears_Resteers"], o["Machine_Clears"], o["L1_Bound"], o["Serializing_Operation"], o["Mixing_Vectors"], o["Microcode_Sequencer"],) o["LCP"].sibling = (o["DSB_Switches"], o["Fetch_Bandwidth"],) o["DSB_Switches"].sibling = (o["LCP"], o["Fetch_Bandwidth"],) o["Fetch_Bandwidth"].sibling = (o["LCP"], o["DSB_Switches"],) o["Decoder0_Alone"].sibling = (o["Few_Uops_Instructions"],) o["Branch_Mispredicts"].sibling = (o["Mispredicts_Resteers"],) o["Machine_Clears"].sibling = (o["Clears_Resteers"], o["MS_Switches"], o["L1_Bound"], o["Contested_Accesses"], o["Data_Sharing"], o["False_Sharing"], o["Microcode_Sequencer"],) o["L1_Bound"].sibling = (o["Clears_Resteers"], o["MS_Switches"], o["Machine_Clears"], o["Ports_Utilized_1"], o["Microcode_Sequencer"],) o["DTLB_Load"].sibling = (o["DTLB_Store"],) o["Lock_Latency"].sibling = (o["Store_Latency"],) o["FB_Full"].sibling = (o["SQ_Full"], o["MEM_Bandwidth"], o["Store_Latency"],) o["Contested_Accesses"].sibling = (o["Machine_Clears"], o["Data_Sharing"], o["False_Sharing"],) o["Data_Sharing"].sibling = (o["Machine_Clears"], o["Contested_Accesses"], o["False_Sharing"],) o["L3_Hit_Latency"].sibling = (o["MEM_Latency"],) o["L3_Hit_Latency"].overlap = True o["SQ_Full"].sibling = (o["FB_Full"], o["MEM_Bandwidth"],) o["MEM_Bandwidth"].sibling = (o["FB_Full"], o["SQ_Full"],) o["MEM_Latency"].sibling = (o["L3_Hit_Latency"],) o["Store_Latency"].sibling = (o["Lock_Latency"], o["FB_Full"],) o["Store_Latency"].overlap = True o["False_Sharing"].sibling = (o["Machine_Clears"], o["Contested_Accesses"], o["Data_Sharing"],) o["Split_Stores"].sibling = (o["Port_4"],) o["DTLB_Store"].sibling = (o["DTLB_Load"],) o["Serializing_Operation"].sibling = (o["MS_Switches"],) o["Mixing_Vectors"].sibling = (o["MS_Switches"],) o["Ports_Utilized_1"].sibling = (o["L1_Bound"],) o["Ports_Utilized_2"].sibling = (o["Port_0"], o["Port_1"], o["Port_5"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"],) o["Port_0"].sibling = (o["Ports_Utilized_2"], o["Port_1"], o["Port_5"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"],) o["Port_1"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_5"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"],) o["Port_5"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"],) o["Port_6"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"],) o["Port_4"].sibling = (o["Split_Stores"],) o["FP_Scalar"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"], o["Port_6"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"],) o["FP_Vector"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"], o["Port_6"], o["FP_Scalar"], o["FP_Vector_128b"], o["FP_Vector_256b"],) o["FP_Vector_128b"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_256b"],) o["FP_Vector_256b"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"],) o["Few_Uops_Instructions"].sibling = (o["Decoder0_Alone"],) o["Microcode_Sequencer"].sibling = (o["Clears_Resteers"], o["MS_Switches"], o["Machine_Clears"], o["L1_Bound"],) o["Mispredictions"].sibling = (o["Mispredicts_Resteers"], o["Branch_Mispredicts"],) o["Cache_Memory_Bandwidth"].sibling = (o["FB_Full"], o["SQ_Full"], o["MEM_Bandwidth"],) o["Cache_Memory_Latency"].sibling = (o["L3_Hit_Latency"], o["MEM_Latency"],) o["Memory_Data_TLBs"].sibling = (o["DTLB_Load"], o["DTLB_Store"],) o["Memory_Synchronization"].sibling = (o["DTLB_Load"], o["DTLB_Store"],) o["Irregular_Overhead"].sibling = (o["MS_Switches"], o["Microcode_Sequencer"],) o["IpTB"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],) o["DSB_Coverage"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],) o["DSB_Misses"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],) o["DSB_Bandwidth"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],) o["Branch_Misprediction_Cost"].sibling = (o["Mispredicts_Resteers"], o["Branch_Mispredicts"],) o["DRAM_BW_Use"].sibling = (o["FB_Full"], o["SQ_Full"], o["MEM_Bandwidth"],)
230,237
Python
.py
5,343
37.149916
1,761
0.655937
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,902
objutils.py
andikleen_pmu-tools/objutils.py
# generic utilities for objects def has(obj, name): return name in obj.__class__.__dict__ def safe_ref(obj, name): if has(obj, name): return obj.__class__.__dict__[name] return None def ref_or(obj, name, alt): if has(obj, name): return obj.__class__.__dict__[name] return alt def map_fields(obj, fields): def map_field(name): return safe_ref(obj, name) return list(map(map_field, fields))
445
Python
.py
15
24.933333
43
0.629108
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,903
event-rmap.py
andikleen_pmu-tools/event-rmap.py
#!/usr/bin/env python3 # print currently running events on cpu (default 0) # event-rmap [cpu-num] # xxx no extra modi for now, racy with multi plexing from __future__ import print_function import sys import msr import ocperf from pmudef import (MSR_PEBS_ENABLE, MSR_EVNTSEL, EVENTSEL_ENABLE, EVMASK, EVENTSEL_CMASK, EVENTSEL_EDGE, EVENTSEL_ANY, EVENTSEL_INV, EVENTSEL_PC, MSR_IA32_FIXED_CTR_CTRL) fixednames = ( "inst_retired.any", "cpu_clk_unhalted.thread", "cpu_clk_unhalted.ref_tsc" ) cpu = 0 if len(sys.argv) > 1: cpu = int(sys.argv[1]) emap = ocperf.find_emap() if not emap: print("Unknown CPU or cannot find CPU event table") found = 0 try: pebs_enable = msr.readmsr(MSR_PEBS_ENABLE, cpu) except OSError: pebs_enable = 0 for i in range(0, 8): try: evsel = msr.readmsr(MSR_EVNTSEL + i, cpu) except OSError: break found += 1 if evsel & EVENTSEL_ENABLE: print("%d: %016x: " % (i, evsel), end="") evsel &= EVMASK if emap is None: name = "r%04x", evsel & 0xffff elif evsel in emap.codes: ev = emap.codes[evsel] if ev.msr: try: extra = msr.readmsr(ev.msr) except OSError: print("Cannot read extra MSR %x for %s" % (ev.msr, ev.name)) continue for j in emap.codes.keys(): if j == evsel and extra == emap.codes[j].msrvalue: print(j.name, "msr:%x" % (extra), end="") break else: print("no exact match for %s, msr %x value %x" % (ev.name, ev.msr, ev.msrvalue), end="") else: print(ev.name, end="") else: name = "" for j in emap.codes.keys(): if j & 0xff == evsel & 0xff: name += "%s[%x] " % (emap.codes[j].name, j) if name: print("[no exact match] " + name, end=" ") else: print("r%x" % (evsel), end=" ") if evsel & EVENTSEL_CMASK: print("cmask=%x" % (evsel >> 24), end=" ") if evsel & EVENTSEL_EDGE: print("edge=1", end=" ") if evsel & EVENTSEL_ANY: print("any=1", end=" ") if evsel & EVENTSEL_INV: print("inv=1", end=" ") if evsel & EVENTSEL_PC: print("pc=1", end=" ") if pebs_enable & (1 << i): print("precise=1", end=" ") print() if found == 0: print("Cannot read any MSRs") try: fixed = msr.readmsr(MSR_IA32_FIXED_CTR_CTRL) except OSError: print("Cannot read fixed counter MSR") fixed = 0 for i in range(0, 2): if fixed & (1 << (i*4)): print("fixed %d: %s" % (i, fixednames[i]))
2,961
Python
.py
88
23.761364
99
0.502614
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,904
adl_grt_ratios.py
andikleen_pmu-tools/adl_grt_ratios.py
# -*- coding: latin-1 -*- # # auto generated TopDown/TMA 3.51 description for Intel 12th gen Core (code name Alderlake) with GraceMont # Please see http://ark.intel.com for more details on these CPUs. # # References: # http://bit.ly/tma-ispass14 # http://halobates.de/blog/p/262 # https://sites.google.com/site/analysismethods/yasin-pubs # https://download.01.org/perfmon/ # https://github.com/andikleen/pmu-tools/wiki/toplev-manual # # Helpers print_error = lambda msg: False version = "3.51" base_frequency = -1.0 Memory = 0 Average_Frequency = 0.0 use_aux = False def handle_error(obj, msg): print_error(msg) obj.errcount += 1 obj.val = 0 obj.thresh = False def handle_error_metric(obj, msg): print_error(msg) obj.errcount += 1 obj.val = 0 # Constants # Aux. formulas # pipeline allocation width def Pipeline_Width(self, EV, level): return 5 def MEM_BOUND_STALLS_AT_RET_CORRECTION(self, EV, level): return max((EV("MEM_BOUND_STALLS.LOAD", level) - EV("LD_HEAD.L1_MISS_AT_RET", level)) / CLKS(self, EV, level) , 0 ) def CLKS(self, EV, level): return EV("CPU_CLK_UNHALTED.CORE", level) def CLKS_P(self, EV, level): return EV("CPU_CLK_UNHALTED.CORE_P", level) def SLOTS(self, EV, level): return Pipeline_Width(self, EV, level) * CLKS(self, EV, level) # Instructions Per Cycle def IPC(self, EV, level): return EV("INST_RETIRED.ANY", level) / CLKS(self, EV, level) # Cycles Per Instruction def CPI(self, EV, level): return CLKS(self, EV, level) / EV("INST_RETIRED.ANY", level) # Uops Per Instruction def UPI(self, EV, level): return EV("UOPS_RETIRED.ALL", level) / EV("INST_RETIRED.ANY", level) # Percentage of total non-speculative loads with a store forward or unknown store address block def Store_Fwd_Blocks(self, EV, level): return 100 * EV("LD_BLOCKS.DATA_UNKNOWN", level) / EV("MEM_UOPS_RETIRED.ALL_LOADS", level) # Percentage of total non-speculative loads with a address aliasing block def Address_Alias_Blocks(self, EV, level): return 100 * EV("LD_BLOCKS.4K_ALIAS", level) / EV("MEM_UOPS_RETIRED.ALL_LOADS", level) # Percentage of total non-speculative loads that are splits def Load_Splits(self, EV, level): return 100 * EV("MEM_UOPS_RETIRED.SPLIT_LOADS", level) / EV("MEM_UOPS_RETIRED.ALL_LOADS", level) # Instructions per Branch (lower number means higher occurrence rate) def IpBranch(self, EV, level): return EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.ALL_BRANCHES", level) # Instruction per (near) call (lower number means higher occurrence rate) def IpCall(self, EV, level): return EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.CALL", level) # Instructions per Load def IpLoad(self, EV, level): return EV("INST_RETIRED.ANY", level) / EV("MEM_UOPS_RETIRED.ALL_LOADS", level) # Instructions per Store def IpStore(self, EV, level): return EV("INST_RETIRED.ANY", level) / EV("MEM_UOPS_RETIRED.ALL_STORES", level) # Instructions per retired Branch Misprediction def IpMispredict(self, EV, level): return EV("INST_RETIRED.ANY", level) / EV("BR_MISP_RETIRED.ALL_BRANCHES", level) # Instructions per retired conditional Branch Misprediction where the branch was not taken def IpMisp_Cond_Ntaken(self, EV, level): return EV("INST_RETIRED.ANY", level) / (EV("BR_MISP_RETIRED.COND", level) - EV("BR_MISP_RETIRED.COND_TAKEN", level)) # Instructions per retired conditional Branch Misprediction where the branch was taken def IpMisp_Cond_Taken(self, EV, level): return EV("INST_RETIRED.ANY", level) / EV("BR_MISP_RETIRED.COND_TAKEN", level) # Instructions per retired return Branch Misprediction def IpMisp_Ret(self, EV, level): return EV("INST_RETIRED.ANY", level) / EV("BR_MISP_RETIRED.RETURN", level) # Instructions per retired indirect call or jump Branch Misprediction def IpMisp_Indirect(self, EV, level): return EV("INST_RETIRED.ANY", level) / EV("BR_MISP_RETIRED.INDIRECT", level) # Instructions per Far Branch def IpFarBranch(self, EV, level): return EV("INST_RETIRED.ANY", level) / (EV("BR_INST_RETIRED.FAR_BRANCH", level) / 2 ) # Ratio of all branches which mispredict def Branch_Mispredict_Ratio(self, EV, level): return EV("BR_MISP_RETIRED.ALL_BRANCHES", level) / EV("BR_INST_RETIRED.ALL_BRANCHES", level) # Ratio between Mispredicted branches and unknown branches def Branch_Mispredict_to_Unknown_Branch_Ratio(self, EV, level): return EV("BR_MISP_RETIRED.ALL_BRANCHES", level) / EV("BACLEARS.ANY", level) # Percentage of all uops which are ucode ops def Microcode_Uop_Ratio(self, EV, level): return 100 * EV("UOPS_RETIRED.MS", level) / EV("UOPS_RETIRED.ALL", level) # Percentage of all uops which are FPDiv uops def FPDiv_Uop_Ratio(self, EV, level): return 100 * EV("UOPS_RETIRED.FPDIV", level) / EV("UOPS_RETIRED.ALL", level) # Percentage of all uops which are IDiv uops def IDiv_Uop_Ratio(self, EV, level): return 100 * EV("UOPS_RETIRED.IDIV", level) / EV("UOPS_RETIRED.ALL", level) # Percentage of all uops which are x87 uops def X87_Uop_Ratio(self, EV, level): return 100 * EV("UOPS_RETIRED.X87", level) / EV("UOPS_RETIRED.ALL", level) # Average Frequency Utilization relative nominal frequency def Turbo_Utilization(self, EV, level): return CLKS(self, EV, level) / EV("CPU_CLK_UNHALTED.REF_TSC", level) # Fraction of cycles spent in Kernel mode def Kernel_Utilization(self, EV, level): return EV("CPU_CLK_UNHALTED.CORE:sup", level) / EV("CPU_CLK_UNHALTED.CORE", level) # Average CPU Utilization def CPU_Utilization(self, EV, level): return EV("CPU_CLK_UNHALTED.REF_TSC", level) / EV("msr/tsc/", 0) # Cycle cost per L2 hit def Cycles_per_Demand_Load_L2_Hit(self, EV, level): return EV("MEM_BOUND_STALLS.LOAD_L2_HIT", level) / EV("MEM_LOAD_UOPS_RETIRED.L2_HIT", level) # Cycle cost per LLC hit def Cycles_per_Demand_Load_L3_Hit(self, EV, level): return EV("MEM_BOUND_STALLS.LOAD_LLC_HIT", level) / EV("MEM_LOAD_UOPS_RETIRED.L3_HIT", level) # Cycle cost per DRAM hit def Cycles_per_Demand_Load_DRAM_Hit(self, EV, level): return EV("MEM_BOUND_STALLS.LOAD_DRAM_HIT", level) / EV("MEM_LOAD_UOPS_RETIRED.DRAM_HIT", level) # Percent of instruction miss cost that hit in the L2 def Inst_Miss_Cost_L2Hit_Percent(self, EV, level): return 100 * EV("MEM_BOUND_STALLS.IFETCH_L2_HIT", level) / (EV("MEM_BOUND_STALLS.IFETCH", level)) # Percent of instruction miss cost that hit in the L3 def Inst_Miss_Cost_L3Hit_Percent(self, EV, level): return 100 * EV("MEM_BOUND_STALLS.IFETCH_LLC_HIT", level) / (EV("MEM_BOUND_STALLS.IFETCH", level)) # Percent of instruction miss cost that hit in DRAM def Inst_Miss_Cost_DRAMHit_Percent(self, EV, level): return 100 * EV("MEM_BOUND_STALLS.IFETCH_DRAM_HIT", level) / (EV("MEM_BOUND_STALLS.IFETCH", level)) # load ops retired per 1000 instruction def MemLoadPKI(self, EV, level): return 1000 * EV("MEM_UOPS_RETIRED.ALL_LOADS", level) / EV("INST_RETIRED.ANY", level) # Event groups class Frontend_Bound: name = "Frontend_Bound" domain = "Slots" area = "FE" level = 1 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("TOPDOWN_FE_BOUND.ALL", 1) / SLOTS(self, EV, 1) self.thresh = (self.val > 0.20) except ZeroDivisionError: handle_error(self, "Frontend_Bound zero division") return self.val desc = """ Counts the number of issue slots that were not consumed by the backend due to frontend stalls.""" class Fetch_Latency: name = "Fetch_Latency" domain = "Slots" area = "FE" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("TOPDOWN_FE_BOUND.FRONTEND_LATENCY", 2) / SLOTS(self, EV, 2) self.thresh = (self.val > 0.15) except ZeroDivisionError: handle_error(self, "Fetch_Latency zero division") return self.val desc = """ Counts the number of issue slots that were not delivered by the frontend due to frontend bandwidth restrictions due to decode, predecode, cisc, and other limitations.""" class ICache_Misses: name = "ICache_Misses" domain = "Slots" area = "FE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("TOPDOWN_FE_BOUND.ICACHE", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "ICache_Misses zero division") return self.val desc = """ Counts the number of issue slots that were not delivered by the frontend due to instruction cache misses.""" class ITLB_Misses: name = "ITLB_Misses" domain = "Slots" area = "FE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("TOPDOWN_FE_BOUND.ITLB", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "ITLB_Misses zero division") return self.val desc = """ Counts the number of issue slots that were not delivered by the frontend due to Instruction Table Lookaside Buffer (ITLB) misses.""" class Branch_Detect: name = "Branch_Detect" domain = "Slots" area = "FE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("TOPDOWN_FE_BOUND.BRANCH_DETECT", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "Branch_Detect zero division") return self.val desc = """ Counts the number of issue slots that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.""" class Branch_Resteer: name = "Branch_Resteer" domain = "Slots" area = "FE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("TOPDOWN_FE_BOUND.BRANCH_RESTEER", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "Branch_Resteer zero division") return self.val desc = """ Counts the number of issue slots that were not delivered by the frontend due to BTCLEARS, which occurs when the Branch Target Buffer (BTB) predicts a taken branch.""" class Fetch_Bandwidth: name = "Fetch_Bandwidth" domain = "Slots" area = "FE" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("TOPDOWN_FE_BOUND.FRONTEND_BANDWIDTH", 2) / SLOTS(self, EV, 2) self.thresh = (self.val > 0.10) except ZeroDivisionError: handle_error(self, "Fetch_Bandwidth zero division") return self.val desc = """ Counts the number of issue slots that were not delivered by the frontend due to frontend bandwidth restrictions due to decode, predecode, cisc, and other limitations.""" class Cisc: name = "Cisc" domain = "Slots" area = "FE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("TOPDOWN_FE_BOUND.CISC", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "Cisc zero division") return self.val desc = """ Counts the number of issue slots that were not delivered by the frontend due to the microcode sequencer (MS).""" class Decode: name = "Decode" domain = "Slots" area = "FE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("TOPDOWN_FE_BOUND.DECODE", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "Decode zero division") return self.val desc = """ Counts the number of issue slots that were not delivered by the frontend due to decode stalls.""" class Predecode: name = "Predecode" domain = "Slots" area = "FE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("TOPDOWN_FE_BOUND.PREDECODE", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "Predecode zero division") return self.val desc = """ Counts the number of issue slots that were not delivered by the frontend due to wrong predecodes.""" class Other_FB: name = "Other_FB" domain = "Slots" area = "FE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("TOPDOWN_FE_BOUND.OTHER", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "Other_FB zero division") return self.val desc = """ Counts the number of issue slots that were not delivered by the frontend due to other common frontend stalls not categorized.""" class Bad_Speculation: name = "Bad_Speculation" domain = "Slots" area = "BAD" level = 1 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = (SLOTS(self, EV, 1) - (EV("TOPDOWN_FE_BOUND.ALL", 1) + EV("TOPDOWN_BE_BOUND.ALL", 1) + EV("TOPDOWN_RETIRING.ALL", 1))) / SLOTS(self, EV, 1) self.thresh = (self.val > 0.15) except ZeroDivisionError: handle_error(self, "Bad_Speculation zero division") return self.val desc = """ Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window including relevant microcode flows and while uops are not yet available in the instruction queue (IQ). Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear.""" class Branch_Mispredicts: name = "Branch_Mispredicts" domain = "Slots" area = "BAD" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("TOPDOWN_BAD_SPECULATION.MISPREDICT", 2) / SLOTS(self, EV, 2) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "Branch_Mispredicts zero division") return self.val desc = """ Counts the number of issue slots that were not consumed by the backend due to branch mispredicts.""" class Machine_Clears: name = "Machine_Clears" domain = "Slots" area = "BAD" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("TOPDOWN_BAD_SPECULATION.MACHINE_CLEARS", 2) / SLOTS(self, EV, 2) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "Machine_Clears zero division") return self.val desc = """ Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a machine clear (nuke) of any kind including memory ordering and memory disambiguation.""" class Nuke: name = "Nuke" domain = "Slots" area = "BAD" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("TOPDOWN_BAD_SPECULATION.NUKE", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "Nuke zero division") return self.val desc = """ Counts the number of issue slots that were not consumed by the backend due to a machine clear (slow nuke).""" class SMC: name = "SMC" domain = "Count" area = "BAD" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = self.Nuke.compute(EV) * (EV("MACHINE_CLEARS.SMC", 4) / EV("MACHINE_CLEARS.SLOW", 4)) self.thresh = (self.val > 0.02) except ZeroDivisionError: handle_error(self, "SMC zero division") return self.val desc = """ Counts the number of machine clears relative to the number of nuke slots due to SMC.""" class Memory_Ordering: name = "Memory_Ordering" domain = "Count" area = "BAD" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = self.Nuke.compute(EV) * (EV("MACHINE_CLEARS.MEMORY_ORDERING", 4) / EV("MACHINE_CLEARS.SLOW", 4)) self.thresh = (self.val > 0.02) except ZeroDivisionError: handle_error(self, "Memory_Ordering zero division") return self.val desc = """ Counts the number of machine clears relative to the number of nuke slots due to memory ordering.""" class FP_Assist: name = "FP_Assist" domain = "Count" area = "BAD" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = self.Nuke.compute(EV) * (EV("MACHINE_CLEARS.FP_ASSIST", 4) / EV("MACHINE_CLEARS.SLOW", 4)) self.thresh = (self.val > 0.02) except ZeroDivisionError: handle_error(self, "FP_Assist zero division") return self.val desc = """ Counts the number of machine clears relative to the number of nuke slots due to FP assists.""" class Disambiguation: name = "Disambiguation" domain = "Count" area = "BAD" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = self.Nuke.compute(EV) * (EV("MACHINE_CLEARS.DISAMBIGUATION", 4) / EV("MACHINE_CLEARS.SLOW", 4)) self.thresh = (self.val > 0.02) except ZeroDivisionError: handle_error(self, "Disambiguation zero division") return self.val desc = """ Counts the number of machine clears relative to the number of nuke slots due to memory disambiguation.""" class Page_Fault: name = "Page_Fault" domain = "Count" area = "BAD" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = self.Nuke.compute(EV) * (EV("MACHINE_CLEARS.PAGE_FAULT", 4) / EV("MACHINE_CLEARS.SLOW", 4)) self.thresh = (self.val > 0.02) except ZeroDivisionError: handle_error(self, "Page_Fault zero division") return self.val desc = """ Counts the number of machine clears relative to the number of nuke slots due to page faults.""" class Fast_Nuke: name = "Fast_Nuke" domain = "Slots" area = "BAD" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("TOPDOWN_BAD_SPECULATION.FASTNUKE", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "Fast_Nuke zero division") return self.val desc = """ Counts the number of issue slots that were not consumed by the backend due to a machine clear classified as a fast nuke due to memory ordering, memory disambiguation and memory renaming.""" class Backend_Bound: name = "Backend_Bound" domain = "Slots" area = "BE" level = 1 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("TOPDOWN_BE_BOUND.ALL", 1) / SLOTS(self, EV, 1) self.thresh = (self.val > 0.10) except ZeroDivisionError: handle_error(self, "Backend_Bound zero division") return self.val desc = """ Counts the total number of issue slots that were not consumed by the backend due to backend stalls. Note that uops must be available for consumption in order for this event to count. If a uop is not available (IQ is empty), this event will not count. The rest of these subevents count backend stalls, in cycles, due to an outstanding request which is memory bound vs core bound. The subevents are not slot based events and therefore can not be precisely added or subtracted from the Backend_Bound_Aux subevents which are slot based.""" class Core_Bound: name = "Core_Bound" domain = "Cycles" area = "BE" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = max(0 , self.Backend_Bound.compute(EV) - self.Memory_Bound.compute(EV)) self.thresh = (self.val > 0.10) except ZeroDivisionError: handle_error(self, "Core_Bound zero division") return self.val desc = """ Counts the number of cycles due to backend bound stalls that are core execution bound and not attributed to outstanding demand load or store stalls.""" class Memory_Bound: name = "Memory_Bound" domain = "Cycles" area = "BE" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = min((EV("TOPDOWN_BE_BOUND.ALL", 2) / SLOTS(self, EV, 2)) , (EV("LD_HEAD.ANY_AT_RET", 2) / CLKS(self, EV, 2)) + self.Store_Bound.compute(EV)) self.thresh = (self.val > 0.20) except ZeroDivisionError: handle_error(self, "Memory_Bound zero division") return self.val desc = """ Counts the number of cycles the core is stalled due to stores or loads.""" class Store_Bound: name = "Store_Bound" domain = "Cycles" area = "BE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = self.Mem_Scheduler.compute(EV) * (EV("MEM_SCHEDULER_BLOCK.ST_BUF", 3) / EV("MEM_SCHEDULER_BLOCK.ALL", 3)) self.thresh = (self.val > 0.10) except ZeroDivisionError: handle_error(self, "Store_Bound zero division") return self.val desc = """ Counts the number of cycles the core is stalled due to store buffer full.""" class L1_Bound: name = "L1_Bound" domain = "Cycles" area = "BE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("LD_HEAD.L1_BOUND_AT_RET", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.10) except ZeroDivisionError: handle_error(self, "L1_Bound zero division") return self.val desc = """ Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a load block.""" class Store_Fwd_Blk: name = "Store_Fwd_Blk" domain = "Cycles" area = "BE" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("LD_HEAD.ST_ADDR_AT_RET", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "Store_Fwd_Blk zero division") return self.val desc = """ Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a store forward block.""" class STLB_Hit: name = "STLB_Hit" domain = "Cycles" area = "BE" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("LD_HEAD.DTLB_MISS_AT_RET", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "STLB_Hit zero division") return self.val desc = """ Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a first level TLB miss.""" class STLB_Miss: name = "STLB_Miss" domain = "Cycles" area = "BE" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("LD_HEAD.PGWALK_AT_RET", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "STLB_Miss zero division") return self.val desc = """ Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a second level TLB miss requiring a page walk.""" class Other_L1: name = "Other_L1" domain = "Cycles" area = "BE" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("LD_HEAD.OTHER_AT_RET", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "Other_L1 zero division") return self.val desc = """ Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a number of other load blocks.""" class L2_Bound: name = "L2_Bound" domain = "Cycles" area = "BE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = (EV("MEM_BOUND_STALLS.LOAD_L2_HIT", 3) / CLKS(self, EV, 3)) - (MEM_BOUND_STALLS_AT_RET_CORRECTION(self, EV, 3) * EV("MEM_BOUND_STALLS.LOAD_L2_HIT", 3) / EV("MEM_BOUND_STALLS.LOAD", 3)) self.thresh = (self.val > 0.10) except ZeroDivisionError: handle_error(self, "L2_Bound zero division") return self.val desc = """ Counts the number of cycles a core is stalled due to a demand load which hit in the L2 Cache.""" class L3_Bound: name = "L3_Bound" domain = "Cycles" area = "BE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = (EV("MEM_BOUND_STALLS.LOAD_LLC_HIT", 3) / CLKS(self, EV, 3)) - (MEM_BOUND_STALLS_AT_RET_CORRECTION(self, EV, 3) * EV("MEM_BOUND_STALLS.LOAD_LLC_HIT", 3) / EV("MEM_BOUND_STALLS.LOAD", 3)) self.thresh = (self.val > 0.10) except ZeroDivisionError: handle_error(self, "L3_Bound zero division") return self.val desc = """ Counts the number of cycles a core is stalled due to a demand load which hit in the Last Level Cache (LLC) or other core with HITE/F/M.""" class DRAM_Bound: name = "DRAM_Bound" domain = "Cycles" area = "BE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = (EV("MEM_BOUND_STALLS.LOAD_DRAM_HIT", 3) / CLKS(self, EV, 3)) - (MEM_BOUND_STALLS_AT_RET_CORRECTION(self, EV, 3) * EV("MEM_BOUND_STALLS.LOAD_DRAM_HIT", 3) / EV("MEM_BOUND_STALLS.LOAD", 3)) self.thresh = (self.val > 0.10) except ZeroDivisionError: handle_error(self, "DRAM_Bound zero division") return self.val desc = """ Counts the number of cycles the core is stalled due to a demand load miss which hit in DRAM or MMIO (Non-DRAM).""" class Other_Load_Store: name = "Other_Load_Store" domain = "Cycles" area = "BE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = max(0 , self.Memory_Bound.compute(EV) - (self.Store_Bound.compute(EV) + self.L1_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV))) self.thresh = (self.val > 0.10) except ZeroDivisionError: handle_error(self, "Other_Load_Store zero division") return self.val desc = """ Counts the number of cycles the core is stalled due to a demand load miss which hits in the L2, LLC, DRAM or MMIO (Non-DRAM) but could not be correctly attributed or cycles in which the load miss is waiting on a request buffer.""" class Backend_Bound_Aux: name = "Backend_Bound_Aux" domain = "Slots" area = "BE_aux" level = 1 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = self.Backend_Bound.compute(EV) self.thresh = (self.val > 0.20) except ZeroDivisionError: handle_error(self, "Backend_Bound_Aux zero division") return self.val desc = """ Counts the total number of issue slots that were not consumed by the backend due to backend stalls. Note that UOPS must be available for consumption in order for this event to count. If a uop is not available (IQ is empty), this event will not count. All of these subevents count backend stalls, in slots, due to a resource limitation. These are not cycle based events and therefore can not be precisely added or subtracted from the Backend_Bound subevents which are cycle based. These subevents are supplementary to Backend_Bound and can be used to analyze results from a resource perspective at allocation.""" class Resource_Bound: name = "Resource_Bound" domain = "Slots" area = "BE_aux" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = self.Backend_Bound.compute(EV) self.thresh = (self.val > 0.20) except ZeroDivisionError: handle_error(self, "Resource_Bound zero division") return self.val desc = """ Counts the total number of issue slots that were not consumed by the backend due to backend stalls. Note that uops must be available for consumption in order for this event to count. If a uop is not available (IQ is empty), this event will not count.""" class Mem_Scheduler: name = "Mem_Scheduler" domain = "Slots" area = "BE_aux" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("TOPDOWN_BE_BOUND.MEM_SCHEDULER", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.10) except ZeroDivisionError: handle_error(self, "Mem_Scheduler zero division") return self.val desc = """ Counts the number of issue slots that were not consumed by the backend due to memory reservation stalls in which a scheduler is not able to accept uops.""" class ST_Buffer: name = "ST_Buffer" domain = "Count" area = "BE_aux" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = self.Mem_Scheduler.compute(EV) * (EV("MEM_SCHEDULER_BLOCK.ST_BUF", 4) / EV("MEM_SCHEDULER_BLOCK.ALL", 4)) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "ST_Buffer zero division") return self.val desc = """ Counts the number of cycles, relative to the number of mem_scheduler slots, in which uops are blocked due to store buffer full""" class LD_Buffer: name = "LD_Buffer" domain = "Count" area = "BE_aux" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = self.Mem_Scheduler.compute(EV) * EV("MEM_SCHEDULER_BLOCK.LD_BUF", 4) / EV("MEM_SCHEDULER_BLOCK.ALL", 4) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "LD_Buffer zero division") return self.val desc = """ Counts the number of cycles, relative to the number of mem_scheduler slots, in which uops are blocked due to load buffer full""" class RSV: name = "RSV" domain = "Count" area = "BE_aux" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = self.Mem_Scheduler.compute(EV) * EV("MEM_SCHEDULER_BLOCK.RSV", 4) / EV("MEM_SCHEDULER_BLOCK.ALL", 4) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "RSV zero division") return self.val desc = """ Counts the number of cycles, relative to the number of mem_scheduler slots, in which uops are blocked due to RSV full relative""" class Non_Mem_Scheduler: name = "Non_Mem_Scheduler" domain = "Slots" area = "BE_aux" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("TOPDOWN_BE_BOUND.NON_MEM_SCHEDULER", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.10) except ZeroDivisionError: handle_error(self, "Non_Mem_Scheduler zero division") return self.val desc = """ Counts the number of issue slots that were not consumed by the backend due to IEC or FPC RAT stalls, which can be due to FIQ or IEC reservation stalls in which the integer, floating point or SIMD scheduler is not able to accept uops.""" class Register: name = "Register" domain = "Slots" area = "BE_aux" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("TOPDOWN_BE_BOUND.REGISTER", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.10) except ZeroDivisionError: handle_error(self, "Register zero division") return self.val desc = """ Counts the number of issue slots that were not consumed by the backend due to the physical register file unable to accept an entry (marble stalls).""" class Reorder_Buffer: name = "Reorder_Buffer" domain = "Slots" area = "BE_aux" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("TOPDOWN_BE_BOUND.REORDER_BUFFER", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.10) except ZeroDivisionError: handle_error(self, "Reorder_Buffer zero division") return self.val desc = """ Counts the number of issue slots that were not consumed by the backend due to the reorder buffer being full (ROB stalls).""" class Alloc_Restriction: name = "Alloc_Restriction" domain = "Slots" area = "BE_aux" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("TOPDOWN_BE_BOUND.ALLOC_RESTRICTIONS", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.10) except ZeroDivisionError: handle_error(self, "Alloc_Restriction zero division") return self.val desc = """ Counts the number of issue slots that were not consumed by the backend due to certain allocation restrictions.""" class Serialization: name = "Serialization" domain = "Slots" area = "BE_aux" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("TOPDOWN_BE_BOUND.SERIALIZATION", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.10) except ZeroDivisionError: handle_error(self, "Serialization zero division") return self.val desc = """ Counts the number of issue slots that were not consumed by the backend due to scoreboards from the instruction queue (IQ), jump execution unit (JEU), or microcode sequencer (MS).""" class Retiring: name = "Retiring" domain = "Slots" area = "RET" level = 1 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("TOPDOWN_RETIRING.ALL", 1) / SLOTS(self, EV, 1) self.thresh = (self.val > 0.75) except ZeroDivisionError: handle_error(self, "Retiring zero division") return self.val desc = """ Counts the number of issue slots that result in retirement slots.""" class Base: name = "Base" domain = "Slots" area = "RET" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = (EV("TOPDOWN_RETIRING.ALL", 2) - EV("UOPS_RETIRED.MS", 2)) / SLOTS(self, EV, 2) self.thresh = (self.val > 0.60) except ZeroDivisionError: handle_error(self, "Base zero division") return self.val desc = """ Counts the number of uops that are not from the microsequencer.""" class FPDIV_uops: name = "FPDIV_uops" domain = "Slots" area = "RET" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_RETIRED.FPDIV", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.20) except ZeroDivisionError: handle_error(self, "FPDIV_uops zero division") return self.val desc = """ Counts the number of floating point divide operations per uop.""" class Other_Ret: name = "Other_Ret" domain = "Slots" area = "RET" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = (EV("TOPDOWN_RETIRING.ALL", 3) - EV("UOPS_RETIRED.MS", 3) - EV("UOPS_RETIRED.FPDIV", 3)) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.30) except ZeroDivisionError: handle_error(self, "Other_Ret zero division") return self.val desc = """ Counts the number of uops retired excluding ms and fp div uops.""" class MS_uops: name = "MS_uops" domain = "Slots" area = "RET" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_RETIRED.MS", 2) / SLOTS(self, EV, 2) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "MS_uops zero division") return self.val desc = """ Counts the number of uops that are from the complex flows issued by the micro-sequencer (MS). This includes uops from flows due to complex instructions, faults, assists, and inserted flows.""" class Metric_CLKS: name = "CLKS" domain = "Cycles" maxval = 0 errcount = 0 area = "Info.Core" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = CLKS(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CLKS zero division") desc = """ """ class Metric_CLKS_P: name = "CLKS_P" domain = "Cycles" maxval = 0 errcount = 0 area = "Info.Core" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = CLKS_P(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CLKS_P zero division") desc = """ """ class Metric_SLOTS: name = "SLOTS" domain = "Cycles" maxval = 0 errcount = 0 area = "Info.Core" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = SLOTS(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "SLOTS zero division") desc = """ """ class Metric_IPC: name = "IPC" domain = "" maxval = 0 errcount = 0 area = "Info.Core" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = IPC(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IPC zero division") desc = """ Instructions Per Cycle""" class Metric_CPI: name = "CPI" domain = "" maxval = 0 errcount = 0 area = "Info.Core" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = CPI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CPI zero division") desc = """ Cycles Per Instruction""" class Metric_UPI: name = "UPI" domain = "" maxval = 0 errcount = 0 area = "Info.Core" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = UPI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "UPI zero division") desc = """ Uops Per Instruction""" class Metric_Store_Fwd_Blocks: name = "Store_Fwd_Blocks" domain = "" maxval = 0 errcount = 0 area = "Info.L1_Bound" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = Store_Fwd_Blocks(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Store_Fwd_Blocks zero division") desc = """ Percentage of total non-speculative loads with a store forward or unknown store address block""" class Metric_Address_Alias_Blocks: name = "Address_Alias_Blocks" domain = "" maxval = 0 errcount = 0 area = "Info.L1_Bound" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = Address_Alias_Blocks(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Address_Alias_Blocks zero division") desc = """ Percentage of total non-speculative loads with a address aliasing block""" class Metric_Load_Splits: name = "Load_Splits" domain = "" maxval = 0 errcount = 0 area = "Info.L1_Bound" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = Load_Splits(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Load_Splits zero division") desc = """ Percentage of total non-speculative loads that are splits""" class Metric_IpBranch: name = "IpBranch" domain = "" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = IpBranch(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IpBranch zero division") desc = """ Instructions per Branch (lower number means higher occurrence rate)""" class Metric_IpCall: name = "IpCall" domain = "" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = IpCall(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IpCall zero division") desc = """ Instruction per (near) call (lower number means higher occurrence rate)""" class Metric_IpLoad: name = "IpLoad" domain = "" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = IpLoad(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IpLoad zero division") desc = """ Instructions per Load""" class Metric_IpStore: name = "IpStore" domain = "" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = IpStore(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IpStore zero division") desc = """ Instructions per Store""" class Metric_IpMispredict: name = "IpMispredict" domain = "" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = IpMispredict(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IpMispredict zero division") desc = """ Instructions per retired Branch Misprediction""" class Metric_IpMisp_Cond_Ntaken: name = "IpMisp_Cond_Ntaken" domain = "" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = IpMisp_Cond_Ntaken(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IpMisp_Cond_Ntaken zero division") desc = """ Instructions per retired conditional Branch Misprediction where the branch was not taken""" class Metric_IpMisp_Cond_Taken: name = "IpMisp_Cond_Taken" domain = "" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = IpMisp_Cond_Taken(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IpMisp_Cond_Taken zero division") desc = """ Instructions per retired conditional Branch Misprediction where the branch was taken""" class Metric_IpMisp_Ret: name = "IpMisp_Ret" domain = "" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = IpMisp_Ret(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IpMisp_Ret zero division") desc = """ Instructions per retired return Branch Misprediction""" class Metric_IpMisp_Indirect: name = "IpMisp_Indirect" domain = "" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = IpMisp_Indirect(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IpMisp_Indirect zero division") desc = """ Instructions per retired indirect call or jump Branch Misprediction""" class Metric_IpFarBranch: name = "IpFarBranch" domain = "" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = IpFarBranch(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IpFarBranch zero division") desc = """ Instructions per Far Branch""" class Metric_Branch_Mispredict_Ratio: name = "Branch_Mispredict_Ratio" domain = "" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = Branch_Mispredict_Ratio(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Branch_Mispredict_Ratio zero division") desc = """ Ratio of all branches which mispredict""" class Metric_Branch_Mispredict_to_Unknown_Branch_Ratio: name = "Branch_Mispredict_to_Unknown_Branch_Ratio" domain = "" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = Branch_Mispredict_to_Unknown_Branch_Ratio(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Branch_Mispredict_to_Unknown_Branch_Ratio zero division") desc = """ Ratio between Mispredicted branches and unknown branches""" class Metric_Microcode_Uop_Ratio: name = "Microcode_Uop_Ratio" domain = "" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = Microcode_Uop_Ratio(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Microcode_Uop_Ratio zero division") desc = """ Percentage of all uops which are ucode ops""" class Metric_FPDiv_Uop_Ratio: name = "FPDiv_Uop_Ratio" domain = "" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = FPDiv_Uop_Ratio(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "FPDiv_Uop_Ratio zero division") desc = """ Percentage of all uops which are FPDiv uops""" class Metric_IDiv_Uop_Ratio: name = "IDiv_Uop_Ratio" domain = "" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = IDiv_Uop_Ratio(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IDiv_Uop_Ratio zero division") desc = """ Percentage of all uops which are IDiv uops""" class Metric_X87_Uop_Ratio: name = "X87_Uop_Ratio" domain = "" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = X87_Uop_Ratio(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "X87_Uop_Ratio zero division") desc = """ Percentage of all uops which are x87 uops""" class Metric_Turbo_Utilization: name = "Turbo_Utilization" domain = "" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = Turbo_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Turbo_Utilization zero division") desc = """ Average Frequency Utilization relative nominal frequency""" class Metric_Kernel_Utilization: name = "Kernel_Utilization" domain = "" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = Kernel_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Kernel_Utilization zero division") desc = """ Fraction of cycles spent in Kernel mode""" class Metric_CPU_Utilization: name = "CPU_Utilization" domain = "" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = CPU_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CPU_Utilization zero division") desc = """ Average CPU Utilization""" class Metric_Cycles_per_Demand_Load_L2_Hit: name = "Cycles_per_Demand_Load_L2_Hit" domain = "" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = Cycles_per_Demand_Load_L2_Hit(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Cycles_per_Demand_Load_L2_Hit zero division") desc = """ Cycle cost per L2 hit""" class Metric_Cycles_per_Demand_Load_L3_Hit: name = "Cycles_per_Demand_Load_L3_Hit" domain = "" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = Cycles_per_Demand_Load_L3_Hit(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Cycles_per_Demand_Load_L3_Hit zero division") desc = """ Cycle cost per LLC hit""" class Metric_Cycles_per_Demand_Load_DRAM_Hit: name = "Cycles_per_Demand_Load_DRAM_Hit" domain = "" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = Cycles_per_Demand_Load_DRAM_Hit(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Cycles_per_Demand_Load_DRAM_Hit zero division") desc = """ Cycle cost per DRAM hit""" class Metric_Inst_Miss_Cost_L2Hit_Percent: name = "Inst_Miss_Cost_L2Hit_Percent" domain = "" maxval = 0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = Inst_Miss_Cost_L2Hit_Percent(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Inst_Miss_Cost_L2Hit_Percent zero division") desc = """ Percent of instruction miss cost that hit in the L2""" class Metric_Inst_Miss_Cost_L3Hit_Percent: name = "Inst_Miss_Cost_L3Hit_Percent" domain = "" maxval = 0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = Inst_Miss_Cost_L3Hit_Percent(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Inst_Miss_Cost_L3Hit_Percent zero division") desc = """ Percent of instruction miss cost that hit in the L3""" class Metric_Inst_Miss_Cost_DRAMHit_Percent: name = "Inst_Miss_Cost_DRAMHit_Percent" domain = "" maxval = 0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = Inst_Miss_Cost_DRAMHit_Percent(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Inst_Miss_Cost_DRAMHit_Percent zero division") desc = """ Percent of instruction miss cost that hit in DRAM""" class Metric_MemLoadPKI: name = "MemLoadPKI" domain = "" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = MemLoadPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "MemLoadPKI zero division") desc = """ load ops retired per 1000 instruction""" # Schedule class Setup: def __init__(self, r): o = dict() n = Frontend_Bound() ; r.run(n) ; o["Frontend_Bound"] = n n = Fetch_Latency() ; r.run(n) ; o["Fetch_Latency"] = n n = ICache_Misses() ; r.run(n) ; o["ICache_Misses"] = n n = ITLB_Misses() ; r.run(n) ; o["ITLB_Misses"] = n n = Branch_Detect() ; r.run(n) ; o["Branch_Detect"] = n n = Branch_Resteer() ; r.run(n) ; o["Branch_Resteer"] = n n = Fetch_Bandwidth() ; r.run(n) ; o["Fetch_Bandwidth"] = n n = Cisc() ; r.run(n) ; o["Cisc"] = n n = Decode() ; r.run(n) ; o["Decode"] = n n = Predecode() ; r.run(n) ; o["Predecode"] = n n = Other_FB() ; r.run(n) ; o["Other_FB"] = n n = Bad_Speculation() ; r.run(n) ; o["Bad_Speculation"] = n n = Branch_Mispredicts() ; r.run(n) ; o["Branch_Mispredicts"] = n n = Machine_Clears() ; r.run(n) ; o["Machine_Clears"] = n n = Nuke() ; r.run(n) ; o["Nuke"] = n n = SMC() ; r.run(n) ; o["SMC"] = n n = Memory_Ordering() ; r.run(n) ; o["Memory_Ordering"] = n n = FP_Assist() ; r.run(n) ; o["FP_Assist"] = n n = Disambiguation() ; r.run(n) ; o["Disambiguation"] = n n = Page_Fault() ; r.run(n) ; o["Page_Fault"] = n n = Fast_Nuke() ; r.run(n) ; o["Fast_Nuke"] = n n = Backend_Bound() ; r.run(n) ; o["Backend_Bound"] = n n = Core_Bound() ; r.run(n) ; o["Core_Bound"] = n n = Memory_Bound() ; r.run(n) ; o["Memory_Bound"] = n n = Store_Bound() ; r.run(n) ; o["Store_Bound"] = n n = L1_Bound() ; r.run(n) ; o["L1_Bound"] = n n = Store_Fwd_Blk() ; r.run(n) ; o["Store_Fwd_Blk"] = n n = STLB_Hit() ; r.run(n) ; o["STLB_Hit"] = n n = STLB_Miss() ; r.run(n) ; o["STLB_Miss"] = n n = Other_L1() ; r.run(n) ; o["Other_L1"] = n n = L2_Bound() ; r.run(n) ; o["L2_Bound"] = n n = L3_Bound() ; r.run(n) ; o["L3_Bound"] = n n = DRAM_Bound() ; r.run(n) ; o["DRAM_Bound"] = n n = Other_Load_Store() ; r.run(n) ; o["Other_Load_Store"] = n if use_aux: n = Backend_Bound_Aux() ; r.run(n) ; o["Backend_Bound_Aux"] = n if use_aux: n = Resource_Bound() ; r.run(n) ; o["Resource_Bound"] = n n = Mem_Scheduler() ; r.run(n) ; o["Mem_Scheduler"] = n if use_aux: n = ST_Buffer() ; r.run(n) ; o["ST_Buffer"] = n if use_aux: n = LD_Buffer() ; r.run(n) ; o["LD_Buffer"] = n if use_aux: n = RSV() ; r.run(n) ; o["RSV"] = n if use_aux: n = Non_Mem_Scheduler() ; r.run(n) ; o["Non_Mem_Scheduler"] = n if use_aux: n = Register() ; r.run(n) ; o["Register"] = n if use_aux: n = Reorder_Buffer() ; r.run(n) ; o["Reorder_Buffer"] = n if use_aux: n = Alloc_Restriction() ; r.run(n) ; o["Alloc_Restriction"] = n if use_aux: n = Serialization() ; r.run(n) ; o["Serialization"] = n n = Retiring() ; r.run(n) ; o["Retiring"] = n n = Base() ; r.run(n) ; o["Base"] = n n = FPDIV_uops() ; r.run(n) ; o["FPDIV_uops"] = n n = Other_Ret() ; r.run(n) ; o["Other_Ret"] = n n = MS_uops() ; r.run(n) ; o["MS_uops"] = n # parents o["Fetch_Latency"].parent = o["Frontend_Bound"] o["ICache_Misses"].parent = o["Fetch_Latency"] o["ITLB_Misses"].parent = o["Fetch_Latency"] o["Branch_Detect"].parent = o["Fetch_Latency"] o["Branch_Resteer"].parent = o["Fetch_Latency"] o["Fetch_Bandwidth"].parent = o["Frontend_Bound"] o["Cisc"].parent = o["Fetch_Bandwidth"] o["Decode"].parent = o["Fetch_Bandwidth"] o["Predecode"].parent = o["Fetch_Bandwidth"] o["Other_FB"].parent = o["Fetch_Bandwidth"] o["Branch_Mispredicts"].parent = o["Bad_Speculation"] o["Machine_Clears"].parent = o["Bad_Speculation"] o["Nuke"].parent = o["Machine_Clears"] o["SMC"].parent = o["Nuke"] o["Memory_Ordering"].parent = o["Nuke"] o["FP_Assist"].parent = o["Nuke"] o["Disambiguation"].parent = o["Nuke"] o["Page_Fault"].parent = o["Nuke"] o["Fast_Nuke"].parent = o["Machine_Clears"] o["Core_Bound"].parent = o["Backend_Bound"] o["Memory_Bound"].parent = o["Backend_Bound"] o["Store_Bound"].parent = o["Memory_Bound"] o["L1_Bound"].parent = o["Memory_Bound"] o["Store_Fwd_Blk"].parent = o["L1_Bound"] o["STLB_Hit"].parent = o["L1_Bound"] o["STLB_Miss"].parent = o["L1_Bound"] o["Other_L1"].parent = o["L1_Bound"] o["L2_Bound"].parent = o["Memory_Bound"] o["L3_Bound"].parent = o["Memory_Bound"] o["DRAM_Bound"].parent = o["Memory_Bound"] o["Other_Load_Store"].parent = o["Memory_Bound"] if use_aux: o["Resource_Bound"].parent = o["Backend_Bound_Aux"] if use_aux: o["Mem_Scheduler"].parent = o["Resource_Bound"] if use_aux: o["ST_Buffer"].parent = o["Mem_Scheduler"] if use_aux: o["LD_Buffer"].parent = o["Mem_Scheduler"] if use_aux: o["RSV"].parent = o["Mem_Scheduler"] if use_aux: o["Non_Mem_Scheduler"].parent = o["Resource_Bound"] if use_aux: o["Register"].parent = o["Resource_Bound"] if use_aux: o["Reorder_Buffer"].parent = o["Resource_Bound"] if use_aux: o["Alloc_Restriction"].parent = o["Resource_Bound"] if use_aux: o["Serialization"].parent = o["Resource_Bound"] o["Base"].parent = o["Retiring"] o["FPDIV_uops"].parent = o["Base"] o["Other_Ret"].parent = o["Base"] o["MS_uops"].parent = o["Retiring"] # user visible metrics n = Metric_CLKS() ; r.metric(n) ; o["CLKS"] = n n = Metric_CLKS_P() ; r.metric(n) ; o["CLKS_P"] = n n = Metric_SLOTS() ; r.metric(n) ; o["SLOTS"] = n n = Metric_IPC() ; r.metric(n) ; o["IPC"] = n n = Metric_CPI() ; r.metric(n) ; o["CPI"] = n n = Metric_UPI() ; r.metric(n) ; o["UPI"] = n n = Metric_Store_Fwd_Blocks() ; r.metric(n) ; o["Store_Fwd_Blocks"] = n n = Metric_Address_Alias_Blocks() ; r.metric(n) ; o["Address_Alias_Blocks"] = n n = Metric_Load_Splits() ; r.metric(n) ; o["Load_Splits"] = n n = Metric_IpBranch() ; r.metric(n) ; o["IpBranch"] = n n = Metric_IpCall() ; r.metric(n) ; o["IpCall"] = n n = Metric_IpLoad() ; r.metric(n) ; o["IpLoad"] = n n = Metric_IpStore() ; r.metric(n) ; o["IpStore"] = n n = Metric_IpMispredict() ; r.metric(n) ; o["IpMispredict"] = n n = Metric_IpMisp_Cond_Ntaken() ; r.metric(n) ; o["IpMisp_Cond_Ntaken"] = n n = Metric_IpMisp_Cond_Taken() ; r.metric(n) ; o["IpMisp_Cond_Taken"] = n n = Metric_IpMisp_Ret() ; r.metric(n) ; o["IpMisp_Ret"] = n n = Metric_IpMisp_Indirect() ; r.metric(n) ; o["IpMisp_Indirect"] = n n = Metric_IpFarBranch() ; r.metric(n) ; o["IpFarBranch"] = n n = Metric_Branch_Mispredict_Ratio() ; r.metric(n) ; o["Branch_Mispredict_Ratio"] = n n = Metric_Branch_Mispredict_to_Unknown_Branch_Ratio() ; r.metric(n) ; o["Branch_Mispredict_to_Unknown_Branch_Ratio"] = n n = Metric_Microcode_Uop_Ratio() ; r.metric(n) ; o["Microcode_Uop_Ratio"] = n n = Metric_FPDiv_Uop_Ratio() ; r.metric(n) ; o["FPDiv_Uop_Ratio"] = n n = Metric_IDiv_Uop_Ratio() ; r.metric(n) ; o["IDiv_Uop_Ratio"] = n n = Metric_X87_Uop_Ratio() ; r.metric(n) ; o["X87_Uop_Ratio"] = n n = Metric_Turbo_Utilization() ; r.metric(n) ; o["Turbo_Utilization"] = n n = Metric_Kernel_Utilization() ; r.metric(n) ; o["Kernel_Utilization"] = n n = Metric_CPU_Utilization() ; r.metric(n) ; o["CPU_Utilization"] = n n = Metric_Cycles_per_Demand_Load_L2_Hit() ; r.metric(n) ; o["Cycles_per_Demand_Load_L2_Hit"] = n n = Metric_Cycles_per_Demand_Load_L3_Hit() ; r.metric(n) ; o["Cycles_per_Demand_Load_L3_Hit"] = n n = Metric_Cycles_per_Demand_Load_DRAM_Hit() ; r.metric(n) ; o["Cycles_per_Demand_Load_DRAM_Hit"] = n n = Metric_Inst_Miss_Cost_L2Hit_Percent() ; r.metric(n) ; o["Inst_Miss_Cost_L2Hit_Percent"] = n n = Metric_Inst_Miss_Cost_L3Hit_Percent() ; r.metric(n) ; o["Inst_Miss_Cost_L3Hit_Percent"] = n n = Metric_Inst_Miss_Cost_DRAMHit_Percent() ; r.metric(n) ; o["Inst_Miss_Cost_DRAMHit_Percent"] = n n = Metric_MemLoadPKI() ; r.metric(n) ; o["MemLoadPKI"] = n # references between groups o["SMC"].Nuke = o["Nuke"] o["Memory_Ordering"].Nuke = o["Nuke"] o["FP_Assist"].Nuke = o["Nuke"] o["Disambiguation"].Nuke = o["Nuke"] o["Page_Fault"].Nuke = o["Nuke"] o["Core_Bound"].Memory_Bound = o["Memory_Bound"] o["Core_Bound"].Mem_Scheduler = o["Mem_Scheduler"] o["Core_Bound"].Backend_Bound = o["Backend_Bound"] o["Core_Bound"].Store_Bound = o["Store_Bound"] o["Memory_Bound"].Mem_Scheduler = o["Mem_Scheduler"] o["Memory_Bound"].Store_Bound = o["Store_Bound"] o["Store_Bound"].Mem_Scheduler = o["Mem_Scheduler"] o["Other_Load_Store"].Mem_Scheduler = o["Mem_Scheduler"] o["Other_Load_Store"].L1_Bound = o["L1_Bound"] o["Other_Load_Store"].L2_Bound = o["L2_Bound"] o["Other_Load_Store"].Memory_Bound = o["Memory_Bound"] o["Other_Load_Store"].Store_Bound = o["Store_Bound"] o["Other_Load_Store"].L3_Bound = o["L3_Bound"] o["Other_Load_Store"].DRAM_Bound = o["DRAM_Bound"] if use_aux: o["Backend_Bound_Aux"].Backend_Bound = o["Backend_Bound"] if use_aux: o["Resource_Bound"].Backend_Bound = o["Backend_Bound"] if use_aux: o["ST_Buffer"].Mem_Scheduler = o["Mem_Scheduler"] if use_aux: o["LD_Buffer"].Mem_Scheduler = o["Mem_Scheduler"] if use_aux: o["RSV"].Mem_Scheduler = o["Mem_Scheduler"] # siblings cross-tree
68,772
Python
.py
2,004
28.078842
211
0.618823
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,905
skx_server_ratios.py
andikleen_pmu-tools/skx_server_ratios.py
# -*- coding: latin-1 -*- # # auto generated TopDown/TMA 4.8-full-perf description for Intel Xeon Scalable Processors (code named Skylake Server) # Please see http://ark.intel.com for more details on these CPUs. # # References: # http://bit.ly/tma-ispass14 # http://halobates.de/blog/p/262 # https://sites.google.com/site/analysismethods/yasin-pubs # https://download.01.org/perfmon/ # https://github.com/andikleen/pmu-tools/wiki/toplev-manual # # Helpers print_error = lambda msg: False smt_enabled = False ebs_mode = False version = "4.8-full-perf" base_frequency = -1.0 Memory = 0 Average_Frequency = 0.0 num_cores = 1 num_threads = 1 num_sockets = 1 def handle_error(obj, msg): print_error(msg) obj.errcount += 1 obj.val = 0 obj.thresh = False def handle_error_metric(obj, msg): print_error(msg) obj.errcount += 1 obj.val = 0 # Constants Exe_Ports = 8 Mem_L2_Store_Cost = 11 Mem_STLB_Hit_Cost = 9 BAClear_Cost = 9 MS_Switches_Cost = 2 Avg_Assist_Cost = 34 Pipeline_Width = 4 OneMillion = 1000000 OneBillion = 1000000000 Energy_Unit = 61 Errata_Whitelist = "SKL091" EBS_Mode = 0 DS = 1 # Aux. formulas def Backend_Bound_Cycles(self, EV, level): return EV("CYCLE_ACTIVITY.STALLS_TOTAL", level) + Few_Uops_Executed_Threshold(self, EV, level) + EV("EXE_ACTIVITY.BOUND_ON_STORES", level) def Br_DoI_Jumps(self, EV, level): return EV("BR_INST_RETIRED.NEAR_TAKEN", level) - (EV("BR_INST_RETIRED.COND", level) - EV("BR_INST_RETIRED.NOT_TAKEN", level)) - 2 * EV("BR_INST_RETIRED.NEAR_CALL", level) def Branching_Retired(self, EV, level): return (EV("BR_INST_RETIRED.ALL_BRANCHES", level) + 2 * EV("BR_INST_RETIRED.NEAR_CALL", level) + EV("INST_RETIRED.NOP", level)) / SLOTS(self, EV, level) def Serialize_Core(self, EV, level): return self.Core_Bound.compute(EV) * (self.Serializing_Operation.compute(EV) + self.Core_Bound.compute(EV) * EV("RS_EVENTS.EMPTY_CYCLES", level) / CLKS(self, EV, level) * self.Ports_Utilized_0.compute(EV)) / (self.Serializing_Operation.compute(EV) + self.Ports_Utilization.compute(EV) + self.Divider.compute(EV)) def Umisp(self, EV, level): return 10 * self.Microcode_Sequencer.compute(EV) * self.Other_Mispredicts.compute(EV) / self.Branch_Mispredicts.compute(EV) def Assist(self, EV, level): return (self.Microcode_Sequencer.compute(EV) / (self.Microcode_Sequencer.compute(EV) + self.Few_Uops_Instructions.compute(EV))) * (self.Assists.compute(EV) / self.Microcode_Sequencer.compute(EV)) def Assist_Frontend(self, EV, level): return Assist(self, EV, level) * self.Fetch_Latency.compute(EV) * (self.MS_Switches.compute(EV) + self.Branch_Resteers.compute(EV) * (self.Clears_Resteers.compute(EV) + self.Mispredicts_Resteers.compute(EV) * Umisp(self, EV, level)) / (self.Clears_Resteers.compute(EV) + self.Unknown_Branches.compute(EV) + self.Mispredicts_Resteers.compute(EV))) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV)) def Assist_Retired(self, EV, level): return Assist(self, EV, level) * self.Heavy_Operations.compute(EV) def Core_Bound_Cycles(self, EV, level): return self.Ports_Utilized_0.compute(EV) * CLKS(self, EV, level) + Few_Uops_Executed_Threshold(self, EV, level) def Cycles_1_Port_Utilized(self, EV, level): return (EV("UOPS_EXECUTED.CORE_CYCLES_GE_1", level) - EV("UOPS_EXECUTED.CORE_CYCLES_GE_2", level)) / 2 if smt_enabled else EV("EXE_ACTIVITY.1_PORTS_UTIL", level) def Cycles_2_Ports_Utilized(self, EV, level): return (EV("UOPS_EXECUTED.CORE_CYCLES_GE_2", level) - EV("UOPS_EXECUTED.CORE_CYCLES_GE_3", level)) / 2 if smt_enabled else EV("EXE_ACTIVITY.2_PORTS_UTIL", level) def Cycles_3m_Ports_Utilized(self, EV, level): return EV("UOPS_EXECUTED.CORE_CYCLES_GE_3", level) / 2 if smt_enabled else EV("UOPS_EXECUTED.CORE_CYCLES_GE_3", level) def DurationTimeInSeconds(self, EV, level): return EV("interval-ms", 0) / 1000 def Execute_Cycles(self, EV, level): return (EV("UOPS_EXECUTED.CORE_CYCLES_GE_1", level) / 2) if smt_enabled else EV("UOPS_EXECUTED.THREAD:c1", level) # factor used for metrics associating fixed costs for FB Hits - according to probability theory if all FB Hits come at a random rate in original L1_Miss cost interval then the average cost for each one is 0.5 of the fixed cost def FB_Factor(self, EV, level): return 1 + FBHit_per_L1Miss(self, EV, level) / 2 def FBHit_per_L1Miss(self, EV, level): return EV("MEM_LOAD_RETIRED.FB_HIT", level) / EV("MEM_LOAD_RETIRED.L1_MISS", level) def Fetched_Uops(self, EV, level): return EV("IDQ.DSB_UOPS", level) + EV("IDQ.MITE_UOPS", level) + EV("IDQ.MS_UOPS", level) def Few_Uops_Executed_Threshold(self, EV, level): return EV("EXE_ACTIVITY.1_PORTS_UTIL", level) + self.Retiring.compute(EV) * EV("EXE_ACTIVITY.2_PORTS_UTIL", level) # Floating Point computational (arithmetic) Operations Count def FLOP_Count(self, EV, level): return EV("FP_ARITH_INST_RETIRED.SCALAR", level) + 2 * EV("FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", level) + 4 * EV("FP_ARITH_INST_RETIRED.4_FLOPS", level) + 8 * EV("FP_ARITH_INST_RETIRED.8_FLOPS", level) + 16 * EV("FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE", level) # Floating Point computational (arithmetic) Operations Count def FP_Arith_Scalar(self, EV, level): return EV("FP_ARITH_INST_RETIRED.SCALAR", level) # Floating Point computational (arithmetic) Operations Count def FP_Arith_Vector(self, EV, level): return EV("FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE:u0xfc", level) def HighIPC(self, EV, level): val = IPC(self, EV, level) / Pipeline_Width return val def L2_Bound_Ratio(self, EV, level): return (EV("CYCLE_ACTIVITY.STALLS_L1D_MISS", level) - EV("CYCLE_ACTIVITY.STALLS_L2_MISS", level)) / CLKS(self, EV, level) def Light_Ops_Sum(self, EV, level): return self.FP_Arith.compute(EV) + self.Memory_Operations.compute(EV) + self.Fused_Instructions.compute(EV) + self.Non_Fused_Branches.compute(EV) def LOAD_L2_HIT(self, EV, level): return EV("MEM_LOAD_RETIRED.L2_HIT", level) * (1 + FBHit_per_L1Miss(self, EV, level)) def LOAD_L3_HIT(self, EV, level): return EV("MEM_LOAD_RETIRED.L3_HIT", level) * FB_Factor(self, EV, level) def LOAD_XSNP_HIT(self, EV, level): return EV("MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT", level) + EV("MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM", level) * (1 - True_XSNP_HitM_Fraction(self, EV, level)) def LOAD_XSNP_HITM(self, EV, level): return EV("MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM", level) * True_XSNP_HitM_Fraction(self, EV, level) def LOAD_XSNP_MISS(self, EV, level): return EV("MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS", level) def MEM_Bound_Ratio(self, EV, level): return EV("CYCLE_ACTIVITY.STALLS_L3_MISS", level) / CLKS(self, EV, level) + L2_Bound_Ratio(self, EV, level) - self.L2_Bound.compute(EV) def Mem_Lock_St_Fraction(self, EV, level): return EV("MEM_INST_RETIRED.LOCK_LOADS", level) / EV("MEM_INST_RETIRED.ALL_STORES", level) def Memory_Bound_Fraction(self, EV, level): return (EV("CYCLE_ACTIVITY.STALLS_MEM_ANY", level) + EV("EXE_ACTIVITY.BOUND_ON_STORES", level)) / Backend_Bound_Cycles(self, EV, level) def Mispred_Clears_Fraction(self, EV, level): return EV("BR_MISP_RETIRED.ALL_BRANCHES", level) / (EV("BR_MISP_RETIRED.ALL_BRANCHES", level) + EV("MACHINE_CLEARS.COUNT", level)) def OCR_all_rfo_l3_hit_snoop_hitm(self, EV, level): return EV("OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE", level) + EV("OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.HITM_OTHER_CORE", level) def OCR_all_rfo_l3_miss_remote_hitm(self, EV, level): return EV("OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.REMOTE_HITM", level) + EV("OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.REMOTE_HITM", level) def ORO_Demand_RFO_C1(self, EV, level): return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO", level)) , level ) def ORO_DRD_Any_Cycles(self, EV, level): return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", level)) , level ) def ORO_DRD_BW_Cycles(self, EV, level): return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD:c4", level)) , level ) def SQ_Full_Cycles(self, EV, level): return (EV("OFFCORE_REQUESTS_BUFFER.SQ_FULL", level) / 2) if smt_enabled else EV("OFFCORE_REQUESTS_BUFFER.SQ_FULL", level) def Store_L2_Hit_Cycles(self, EV, level): return EV("L2_RQSTS.RFO_HIT", level) * Mem_L2_Store_Cost *(1 - Mem_Lock_St_Fraction(self, EV, level)) def True_XSNP_HitM_Fraction(self, EV, level): return EV("OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE", level) / (EV("OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE", level) + EV("OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD", level)) def Mem_XSNP_HitM_Cost(self, EV, level): return 47.5 * Core_Frequency(self, EV, level) def Mem_XSNP_Hit_Cost(self, EV, level): return 47.5 * Core_Frequency(self, EV, level) def Mem_XSNP_None_Cost(self, EV, level): return 20.5 * Core_Frequency(self, EV, level) def Mem_Local_DRAM_Cost(self, EV, level): return 80 * Core_Frequency(self, EV, level) def Mem_Remote_DRAM_Cost(self, EV, level): return 147.5 * Core_Frequency(self, EV, level) def Mem_Remote_HitM_Cost(self, EV, level): return 110 * Core_Frequency(self, EV, level) def Mem_Remote_Fwd_Cost(self, EV, level): return 110 * Core_Frequency(self, EV, level) def Mem_L2_Hit_Cost(self, EV, level): return 3.5 * Core_Frequency(self, EV, level) def Recovery_Cycles(self, EV, level): return (EV("INT_MISC.RECOVERY_CYCLES_ANY", level) / 2) if smt_enabled else EV("INT_MISC.RECOVERY_CYCLES", level) def Retire_Fraction(self, EV, level): return Retired_Slots(self, EV, level) / EV("UOPS_ISSUED.ANY", level) # Retired slots per Logical Processor def Retired_Slots(self, EV, level): return EV("UOPS_RETIRED.RETIRE_SLOTS", level) # Number of logical processors (enabled or online) on the target system def Num_CPUs(self, EV, level): return 8 if smt_enabled else 4 # A system parameter for dependent-loads (pointer chasing like access pattern) of the workload. An integer fraction in range from 0 (no dependent loads) to 100 (all loads are dependent loads) def Dependent_Loads_Weight(self, EV, level): return 20 # Total pipeline cost of Branch Misprediction related bottlenecks def Mispredictions(self, EV, level): val = 100 *(1 - Umisp(self, EV, level)) * (self.Branch_Mispredicts.compute(EV) + self.Fetch_Latency.compute(EV) * self.Mispredicts_Resteers.compute(EV) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV))) self.thresh = (val > 20) return val # Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses) def Big_Code(self, EV, level): val = 100 * self.Fetch_Latency.compute(EV) * (self.ITLB_Misses.compute(EV) + self.ICache_Misses.compute(EV) + self.Unknown_Branches.compute(EV)) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV)) self.thresh = (val > 20) return val # Total pipeline cost of instruction fetch bandwidth related bottlenecks (when the front-end could not sustain operations delivery to the back-end) def Instruction_Fetch_BW(self, EV, level): val = 100 *(self.Frontend_Bound.compute(EV) - (1 - Umisp(self, EV, level)) * self.Fetch_Latency.compute(EV) * self.Mispredicts_Resteers.compute(EV) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV)) - Assist_Frontend(self, EV, level)) - Big_Code(self, EV, level) self.thresh = (val > 20) return val # Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks def Cache_Memory_Bandwidth(self, EV, level): val = 100 *((self.Memory_Bound.compute(EV) * (self.DRAM_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * (self.MEM_Bandwidth.compute(EV) / (self.MEM_Latency.compute(EV) + self.MEM_Bandwidth.compute(EV)))) + (self.Memory_Bound.compute(EV) * (self.L3_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * (self.SQ_Full.compute(EV) / (self.L3_Hit_Latency.compute(EV) + self.Contested_Accesses.compute(EV) + self.SQ_Full.compute(EV) + self.Data_Sharing.compute(EV)))) + (self.Memory_Bound.compute(EV) * (self.L1_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * (self.FB_Full.compute(EV) / (self.Store_Fwd_Blk.compute(EV) + self.L1_Hit_Latency.compute(EV) + self.DTLB_Load.compute(EV) + self.G4K_Aliasing.compute(EV) + self.Lock_Latency.compute(EV) + self.Split_Loads.compute(EV) + self.FB_Full.compute(EV))))) self.thresh = (val > 20) return val # Total pipeline cost of external Memory- or Cache-Latency related bottlenecks def Cache_Memory_Latency(self, EV, level): val = 100 *((self.Memory_Bound.compute(EV) * (self.DRAM_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * (self.MEM_Latency.compute(EV) / (self.MEM_Latency.compute(EV) + self.MEM_Bandwidth.compute(EV)))) + (self.Memory_Bound.compute(EV) * (self.L3_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * (self.L3_Hit_Latency.compute(EV) / (self.L3_Hit_Latency.compute(EV) + self.Contested_Accesses.compute(EV) + self.SQ_Full.compute(EV) + self.Data_Sharing.compute(EV)))) + (self.Memory_Bound.compute(EV) * self.L2_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) + (self.Memory_Bound.compute(EV) * (self.Store_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * (self.Store_Latency.compute(EV) / (self.Split_Stores.compute(EV) + self.DTLB_Store.compute(EV) + self.Store_Latency.compute(EV) + self.False_Sharing.compute(EV)))) + (self.Memory_Bound.compute(EV) * (self.L1_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * (self.L1_Hit_Latency.compute(EV) / (self.Store_Fwd_Blk.compute(EV) + self.L1_Hit_Latency.compute(EV) + self.DTLB_Load.compute(EV) + self.G4K_Aliasing.compute(EV) + self.Lock_Latency.compute(EV) + self.Split_Loads.compute(EV) + self.FB_Full.compute(EV))))) self.thresh = (val > 20) return val # Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs) def Memory_Data_TLBs(self, EV, level): val = 100 *(self.Memory_Bound.compute(EV) * (self.L1_Bound.compute(EV) / max(self.Memory_Bound.compute(EV) , (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV)))) * (self.DTLB_Load.compute(EV) / max(self.L1_Bound.compute(EV) , (self.Store_Fwd_Blk.compute(EV) + self.L1_Hit_Latency.compute(EV) + self.DTLB_Load.compute(EV) + self.G4K_Aliasing.compute(EV) + self.Lock_Latency.compute(EV) + self.Split_Loads.compute(EV) + self.FB_Full.compute(EV)))) + (self.Memory_Bound.compute(EV) * (self.Store_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * (self.DTLB_Store.compute(EV) / (self.Split_Stores.compute(EV) + self.DTLB_Store.compute(EV) + self.Store_Latency.compute(EV) + self.False_Sharing.compute(EV))))) self.thresh = (val > 20) return val # Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors) def Memory_Synchronization(self, EV, level): val = 100 *(self.Memory_Bound.compute(EV) * ((self.DRAM_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * (self.MEM_Latency.compute(EV) / (self.MEM_Latency.compute(EV) + self.MEM_Bandwidth.compute(EV))) * self.Remote_Cache.compute(EV) / (self.Remote_Cache.compute(EV) + self.Remote_MEM.compute(EV) + self.Local_MEM.compute(EV)) + (self.L3_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * (self.Contested_Accesses.compute(EV) + self.Data_Sharing.compute(EV)) / (self.L3_Hit_Latency.compute(EV) + self.Contested_Accesses.compute(EV) + self.SQ_Full.compute(EV) + self.Data_Sharing.compute(EV)) + (self.Store_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * self.False_Sharing.compute(EV) / ((self.Split_Stores.compute(EV) + self.DTLB_Store.compute(EV) + self.Store_Latency.compute(EV) + self.False_Sharing.compute(EV)) - self.Store_Latency.compute(EV))) + self.Machine_Clears.compute(EV) * (1 - self.Other_Nukes.compute(EV) / (self.Other_Nukes.compute(EV)))) if DS else 100 *(self.Memory_Bound.compute(EV) * ((self.L3_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * (self.Contested_Accesses.compute(EV) + self.Data_Sharing.compute(EV)) / (self.L3_Hit_Latency.compute(EV) + self.Contested_Accesses.compute(EV) + self.SQ_Full.compute(EV) + self.Data_Sharing.compute(EV)) + (self.Store_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * self.False_Sharing.compute(EV) / ((self.Split_Stores.compute(EV) + self.DTLB_Store.compute(EV) + self.Store_Latency.compute(EV) + self.False_Sharing.compute(EV)) - self.Store_Latency.compute(EV))) + self.Machine_Clears.compute(EV) * (1 - self.Other_Nukes.compute(EV) / (self.Other_Nukes.compute(EV)))) self.thresh = (val > 10) return val # Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy. def Compute_Bound_Est(self, EV, level): val = 100 *((self.Core_Bound.compute(EV) * self.Divider.compute(EV) / (self.Serializing_Operation.compute(EV) + self.Ports_Utilization.compute(EV) + self.Divider.compute(EV))) + (self.Core_Bound.compute(EV) * (self.Ports_Utilization.compute(EV) / (self.Serializing_Operation.compute(EV) + self.Ports_Utilization.compute(EV) + self.Divider.compute(EV))) * (self.Ports_Utilized_3m.compute(EV) / (self.Ports_Utilized_0.compute(EV) + self.Ports_Utilized_1.compute(EV) + self.Ports_Utilized_2.compute(EV) + self.Ports_Utilized_3m.compute(EV))))) self.thresh = (val > 20) return val # Total pipeline cost of irregular execution (e.g. FP-assists in HPC, Wait time with work imbalance multithreaded workloads, overhead in system services or virtualized environments) def Irregular_Overhead(self, EV, level): val = 100 *(Assist_Frontend(self, EV, level) + Umisp(self, EV, level) * self.Branch_Mispredicts.compute(EV) + (self.Machine_Clears.compute(EV) * self.Other_Nukes.compute(EV) / (self.Other_Nukes.compute(EV))) + Serialize_Core(self, EV, level) + Assist_Retired(self, EV, level)) self.thresh = (val > 10) return val # Total pipeline cost of remaining bottlenecks in the back-end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls. def Other_Bottlenecks(self, EV, level): val = 100 -(Big_Code(self, EV, level) + Instruction_Fetch_BW(self, EV, level) + Mispredictions(self, EV, level) + Cache_Memory_Bandwidth(self, EV, level) + Cache_Memory_Latency(self, EV, level) + Memory_Data_TLBs(self, EV, level) + Memory_Synchronization(self, EV, level) + Compute_Bound_Est(self, EV, level) + Irregular_Overhead(self, EV, level) + Branching_Overhead(self, EV, level) + Useful_Work(self, EV, level)) self.thresh = (val > 20) return val # Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA. Examples include function calls; loops and alignments. (A lower bound). Consider Loop Unrolling or function inlining optimizations def Branching_Overhead(self, EV, level): val = 100 * Branching_Retired(self, EV, level) self.thresh = (val > 5) return val # Total pipeline cost of "useful operations" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead. def Useful_Work(self, EV, level): val = 100 *(self.Retiring.compute(EV) - Branching_Retired(self, EV, level) - Assist_Retired(self, EV, level)) self.thresh = (val > 20) return val # Probability of Core Bound bottleneck hidden by SMT-profiling artifacts. Tip: consider analysis with SMT disabled def Core_Bound_Likely(self, EV, level): val = 100 *(1 - self.Core_Bound.compute(EV) / self.Ports_Utilization.compute(EV) if self.Core_Bound.compute(EV)< self.Ports_Utilization.compute(EV) else 1) if SMT_2T_Utilization(self, EV, level)> 0.5 else 0 self.thresh = (val > 0.5) return val # Instructions Per Cycle (per Logical Processor) def IPC(self, EV, level): return EV("INST_RETIRED.ANY", level) / CLKS(self, EV, level) # Uops Per Instruction def UopPI(self, EV, level): val = Retired_Slots(self, EV, level) / EV("INST_RETIRED.ANY", level) self.thresh = (val > 1.05) return val # Uops per taken branch def UpTB(self, EV, level): val = Retired_Slots(self, EV, level) / EV("BR_INST_RETIRED.NEAR_TAKEN", level) self.thresh = val < Pipeline_Width * 1.5 return val # Cycles Per Instruction (per Logical Processor) def CPI(self, EV, level): return 1 / IPC(self, EV, level) # Per-Logical Processor actual clocks when the Logical Processor is active. def CLKS(self, EV, level): return EV("CPU_CLK_UNHALTED.THREAD", level) # Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward) def SLOTS(self, EV, level): return Pipeline_Width * CORE_CLKS(self, EV, level) # The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of "execute" at rename stage. def Execute_per_Issue(self, EV, level): return EV("UOPS_EXECUTED.THREAD", level) / EV("UOPS_ISSUED.ANY", level) # Instructions Per Cycle across hyper-threads (per physical core) def CoreIPC(self, EV, level): return EV("INST_RETIRED.ANY", level) / CORE_CLKS(self, EV, level) # Floating Point Operations Per Cycle def FLOPc(self, EV, level): return FLOP_Count(self, EV, level) / CORE_CLKS(self, EV, level) # Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to Fused-Multiply Add FMA counting - common. def FP_Arith_Utilization(self, EV, level): return (FP_Arith_Scalar(self, EV, level) + FP_Arith_Vector(self, EV, level)) / (2 * CORE_CLKS(self, EV, level)) # Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor) def ILP(self, EV, level): return EV("UOPS_EXECUTED.THREAD", level) / EV("UOPS_EXECUTED.THREAD:c1", level) # uops Executed per Cycle def EPC(self, EV, level): return EV("UOPS_EXECUTED.THREAD", level) / CLKS(self, EV, level) # Core actual clocks when any Logical Processor is active on the Physical Core def CORE_CLKS(self, EV, level): return ((EV("CPU_CLK_UNHALTED.THREAD", level) / 2) * (1 + EV("CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE", level) / EV("CPU_CLK_UNHALTED.REF_XCLK", level))) if ebs_mode else(EV("CPU_CLK_UNHALTED.THREAD_ANY", level) / 2) if smt_enabled else CLKS(self, EV, level) # Instructions per Load (lower number means higher occurrence rate). Tip: reduce memory accesses. #Link Opt Guide section: Minimize Register Spills def IpLoad(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("MEM_INST_RETIRED.ALL_LOADS", level) self.thresh = (val < 3) return val # Instructions per Store (lower number means higher occurrence rate). Tip: reduce memory accesses. #Link Opt Guide section: Minimize Register Spills def IpStore(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("MEM_INST_RETIRED.ALL_STORES", level) self.thresh = (val < 8) return val # Instructions per Branch (lower number means higher occurrence rate) def IpBranch(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.ALL_BRANCHES", level) self.thresh = (val < 8) return val # Instructions per (near) call (lower number means higher occurrence rate) def IpCall(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.NEAR_CALL", level) self.thresh = (val < 200) return val # Instructions per taken branch def IpTB(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.NEAR_TAKEN", level) self.thresh = val < Pipeline_Width * 2 + 1 return val # Branch instructions per taken branch. . Can be used to approximate PGO-likelihood for non-loopy codes. def BpTkBranch(self, EV, level): return EV("BR_INST_RETIRED.ALL_BRANCHES", level) / EV("BR_INST_RETIRED.NEAR_TAKEN", level) # Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate). Reference: Tuning Performance via Metrics with Expectations. https://doi.org/10.1109/LCA.2019.2916408 def IpFLOP(self, EV, level): val = EV("INST_RETIRED.ANY", level) / FLOP_Count(self, EV, level) self.thresh = (val < 10) return val # Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW. def IpArith(self, EV, level): val = EV("INST_RETIRED.ANY", level) / (FP_Arith_Scalar(self, EV, level) + FP_Arith_Vector(self, EV, level)) self.thresh = (val < 10) return val # Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. def IpArith_Scalar_SP(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("FP_ARITH_INST_RETIRED.SCALAR_SINGLE", level) self.thresh = (val < 10) return val # Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. def IpArith_Scalar_DP(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("FP_ARITH_INST_RETIRED.SCALAR_DOUBLE", level) self.thresh = (val < 10) return val # Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. def IpArith_AVX128(self, EV, level): val = EV("INST_RETIRED.ANY", level) / (EV("FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", level) + EV("FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE", level)) self.thresh = (val < 10) return val # Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. def IpArith_AVX256(self, EV, level): val = EV("INST_RETIRED.ANY", level) / (EV("FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE", level) + EV("FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE", level)) self.thresh = (val < 10) return val # Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. def IpArith_AVX512(self, EV, level): val = EV("INST_RETIRED.ANY", level) / (EV("FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE", level) + EV("FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE", level)) self.thresh = (val < 10) return val # Instructions per Software prefetch instruction (of any type: NTA/T0/T1/T2/Prefetch) (lower number means higher occurrence rate) def IpSWPF(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("SW_PREFETCH_ACCESS.T0:u0xF", level) self.thresh = (val < 100) return val # Total number of retired Instructions def Instructions(self, EV, level): return EV("INST_RETIRED.ANY", level) # Average number of Uops retired in cycles where at least one uop has retired. def Retire(self, EV, level): return Retired_Slots(self, EV, level) / EV("UOPS_RETIRED.RETIRE_SLOTS:c1", level) # Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate) def IpAssist(self, EV, level): val = EV("INST_RETIRED.ANY", level) / (EV("FP_ASSIST.ANY", level) + EV("OTHER_ASSISTS.ANY", level)) self.thresh = (val < 100000) return val def Execute(self, EV, level): return EV("UOPS_EXECUTED.THREAD", level) / Execute_Cycles(self, EV, level) # Average number of uops fetched from DSB per cycle def Fetch_DSB(self, EV, level): return EV("IDQ.DSB_UOPS", level) / EV("IDQ.DSB_CYCLES_ANY", level) # Average number of uops fetched from MITE per cycle def Fetch_MITE(self, EV, level): return EV("IDQ.MITE_UOPS", level) / EV("IDQ.MITE_CYCLES", level) # Average number of Uops issued by front-end when it issued something def Fetch_UpC(self, EV, level): return EV("UOPS_ISSUED.ANY", level) / EV("UOPS_ISSUED.ANY:c1", level) # Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). See section 'Decoded ICache' in Optimization Manual. http://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-optimization-manual.html def DSB_Coverage(self, EV, level): val = EV("IDQ.DSB_UOPS", level) / Fetched_Uops(self, EV, level) self.thresh = (val < 0.7) and HighIPC(self, EV, 1) return val # Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details. def DSB_Switch_Cost(self, EV, level): return EV("DSB2MITE_SWITCHES.PENALTY_CYCLES", level) / EV("DSB2MITE_SWITCHES.COUNT", level) # Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck. def DSB_Misses(self, EV, level): val = 100 *(self.Fetch_Latency.compute(EV) * self.DSB_Switches.compute(EV) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV)) + self.Fetch_Bandwidth.compute(EV) * self.MITE.compute(EV) / (self.MITE.compute(EV) + self.DSB.compute(EV))) self.thresh = (val > 10) return val # Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck. def DSB_Bandwidth(self, EV, level): val = 100 *(self.Frontend_Bound.compute(EV) * (self.Fetch_Bandwidth.compute(EV) / (self.Fetch_Bandwidth.compute(EV) + self.Fetch_Latency.compute(EV))) * (self.DSB.compute(EV) / (self.MITE.compute(EV) + self.DSB.compute(EV)))) self.thresh = (val > 10) return val # Average Latency for L1 instruction cache misses def ICache_Miss_Latency(self, EV, level): return EV("ICACHE_16B.IFDATA_STALL", level) / EV("ICACHE_16B.IFDATA_STALL:c1:e1", level) + 2 # Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck. def IC_Misses(self, EV, level): val = 100 *(self.Fetch_Latency.compute(EV) * self.ICache_Misses.compute(EV) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV))) self.thresh = (val > 5) return val # Instructions per non-speculative DSB miss (lower number means higher occurrence rate) def IpDSB_Miss_Ret(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("FRONTEND_RETIRED.ANY_DSB_MISS", level) self.thresh = (val < 50) return val # Instructions per speculative Unknown Branch Misprediction (BAClear) (lower number means higher occurrence rate) def IpUnknown_Branch(self, EV, level): return Instructions(self, EV, level) / EV("BACLEARS.ANY", level) # L2 cache true code cacheline misses per kilo instruction def L2MPKI_Code(self, EV, level): return 1000 * EV("FRONTEND_RETIRED.L2_MISS", level) / EV("INST_RETIRED.ANY", level) # L2 cache speculative code cacheline misses per kilo instruction def L2MPKI_Code_All(self, EV, level): return 1000 * EV("L2_RQSTS.CODE_RD_MISS", level) / EV("INST_RETIRED.ANY", level) # Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate) def IpMispredict(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_MISP_RETIRED.ALL_BRANCHES", level) self.thresh = (val < 200) return val # Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate). def IpMisp_Indirect(self, EV, level): val = Instructions(self, EV, level) / (Retire_Fraction(self, EV, level) * EV("BR_MISP_EXEC.INDIRECT", level)) self.thresh = (val < 1000) return val # Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear) def Branch_Misprediction_Cost(self, EV, level): return Mispredictions(self, EV, level) * SLOTS(self, EV, level) / EV("BR_MISP_RETIRED.ALL_BRANCHES", level) / 100 # Speculative to Retired ratio of all clears (covering Mispredicts and nukes) def Spec_Clears_Ratio(self, EV, level): return EV("INT_MISC.CLEARS_COUNT", level) / (EV("BR_MISP_RETIRED.ALL_BRANCHES", level) + EV("MACHINE_CLEARS.COUNT", level)) # Fraction of branches that are non-taken conditionals def Cond_NT(self, EV, level): return EV("BR_INST_RETIRED.NOT_TAKEN", level) / EV("BR_INST_RETIRED.ALL_BRANCHES", level) # Fraction of branches that are taken conditionals def Cond_TK(self, EV, level): return (EV("BR_INST_RETIRED.CONDITIONAL", level) - EV("BR_INST_RETIRED.NOT_TAKEN", level)) / EV("BR_INST_RETIRED.ALL_BRANCHES", level) # Fraction of branches that are CALL or RET def CallRet(self, EV, level): return (EV("BR_INST_RETIRED.NEAR_CALL", level) + EV("BR_INST_RETIRED.NEAR_RETURN", level)) / EV("BR_INST_RETIRED.ALL_BRANCHES", level) # Fraction of branches that are unconditional (direct or indirect) jumps def Jump(self, EV, level): return Br_DoI_Jumps(self, EV, level) / EV("BR_INST_RETIRED.ALL_BRANCHES", level) # Actual Average Latency for L1 data-cache miss demand load operations (in core cycles) def Load_Miss_Real_Latency(self, EV, level): return EV("L1D_PEND_MISS.PENDING", level) / (EV("MEM_LOAD_RETIRED.L1_MISS", level) + EV("MEM_LOAD_RETIRED.FB_HIT", level)) # Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor) def MLP(self, EV, level): return EV("L1D_PEND_MISS.PENDING", level) / EV("L1D_PEND_MISS.PENDING_CYCLES", level) # L1 cache true misses per kilo instruction for retired demand loads def L1MPKI(self, EV, level): return 1000 * EV("MEM_LOAD_RETIRED.L1_MISS", level) / EV("INST_RETIRED.ANY", level) # L1 cache true misses per kilo instruction for all demand loads (including speculative) def L1MPKI_Load(self, EV, level): return 1000 * EV("L2_RQSTS.ALL_DEMAND_DATA_RD", level) / EV("INST_RETIRED.ANY", level) # L2 cache true misses per kilo instruction for retired demand loads def L2MPKI(self, EV, level): return 1000 * EV("MEM_LOAD_RETIRED.L2_MISS", level) / EV("INST_RETIRED.ANY", level) # L2 cache misses per kilo instruction for all request types (including speculative) def L2MPKI_All(self, EV, level): return 1000 * EV("L2_RQSTS.MISS", level) / EV("INST_RETIRED.ANY", level) # L2 cache misses per kilo instruction for all demand loads (including speculative) def L2MPKI_Load(self, EV, level): return 1000 * EV("L2_RQSTS.DEMAND_DATA_RD_MISS", level) / EV("INST_RETIRED.ANY", level) # Offcore requests (L2 cache miss) per kilo instruction for demand RFOs def L2MPKI_RFO(self, EV, level): return 1000 * EV("OFFCORE_REQUESTS.DEMAND_RFO", level) / EV("INST_RETIRED.ANY", level) # L2 cache hits per kilo instruction for all request types (including speculative) def L2HPKI_All(self, EV, level): return 1000 *(EV("L2_RQSTS.REFERENCES", level) - EV("L2_RQSTS.MISS", level)) / EV("INST_RETIRED.ANY", level) # L2 cache hits per kilo instruction for all demand loads (including speculative) def L2HPKI_Load(self, EV, level): return 1000 * EV("L2_RQSTS.DEMAND_DATA_RD_HIT", level) / EV("INST_RETIRED.ANY", level) # L3 cache true misses per kilo instruction for retired demand loads def L3MPKI(self, EV, level): return 1000 * EV("MEM_LOAD_RETIRED.L3_MISS", level) / EV("INST_RETIRED.ANY", level) # Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries) def FB_HPKI(self, EV, level): return 1000 * EV("MEM_LOAD_RETIRED.FB_HIT", level) / EV("INST_RETIRED.ANY", level) def L1D_Cache_Fill_BW(self, EV, level): return 64 * EV("L1D.REPLACEMENT", level) / OneBillion / Time(self, EV, level) def L2_Cache_Fill_BW(self, EV, level): return 64 * EV("L2_LINES_IN.ALL", level) / OneBillion / Time(self, EV, level) def L3_Cache_Fill_BW(self, EV, level): return 64 * EV("LONGEST_LAT_CACHE.MISS", level) / OneBillion / Time(self, EV, level) def L3_Cache_Access_BW(self, EV, level): return 64 * EV("OFFCORE_REQUESTS.ALL_REQUESTS", level) / OneBillion / Time(self, EV, level) # Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses def Page_Walks_Utilization(self, EV, level): val = (EV("ITLB_MISSES.WALK_PENDING", level) + EV("DTLB_LOAD_MISSES.WALK_PENDING", level) + EV("DTLB_STORE_MISSES.WALK_PENDING", level) + EV("EPT.WALK_PENDING", level)) / (2 * CORE_CLKS(self, EV, level)) self.thresh = (val > 0.5) return val # STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk) def Code_STLB_MPKI(self, EV, level): return 1000 * EV("ITLB_MISSES.WALK_COMPLETED", level) / EV("INST_RETIRED.ANY", level) # STLB (2nd level TLB) data load speculative misses per kilo instruction (misses of any page-size that complete the page walk) def Load_STLB_MPKI(self, EV, level): return 1000 * EV("DTLB_LOAD_MISSES.WALK_COMPLETED", level) / EV("INST_RETIRED.ANY", level) # STLB (2nd level TLB) data store speculative misses per kilo instruction (misses of any page-size that complete the page walk) def Store_STLB_MPKI(self, EV, level): return 1000 * EV("DTLB_STORE_MISSES.WALK_COMPLETED", level) / EV("INST_RETIRED.ANY", level) # Average per-core data fill bandwidth to the L1 data cache [GB / sec] def L1D_Cache_Fill_BW_2T(self, EV, level): return L1D_Cache_Fill_BW(self, EV, level) # Average per-core data fill bandwidth to the L2 cache [GB / sec] def L2_Cache_Fill_BW_2T(self, EV, level): return L2_Cache_Fill_BW(self, EV, level) # Average per-core data fill bandwidth to the L3 cache [GB / sec] def L3_Cache_Fill_BW_2T(self, EV, level): return L3_Cache_Fill_BW(self, EV, level) # Average per-core data access bandwidth to the L3 cache [GB / sec] def L3_Cache_Access_BW_2T(self, EV, level): return L3_Cache_Access_BW(self, EV, level) # Rate of silent evictions from the L2 cache per Kilo instruction where the evicted lines are dropped (no writeback to L3 or memory) def L2_Evictions_Silent_PKI(self, EV, level): return 1000 * EV("L2_LINES_OUT.SILENT", level) / Instructions(self, EV, level) # Rate of non silent evictions from the L2 cache per Kilo instruction def L2_Evictions_NonSilent_PKI(self, EV, level): return 1000 * EV("L2_LINES_OUT.NON_SILENT", level) / Instructions(self, EV, level) # Average Latency for L2 cache miss demand Loads def Load_L2_Miss_Latency(self, EV, level): return EV("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD", level) / EV("OFFCORE_REQUESTS.DEMAND_DATA_RD", level) # Average Parallel L2 cache miss demand Loads def Load_L2_MLP(self, EV, level): return EV("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD", level) / EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD", level) # Average Parallel L2 cache miss data reads def Data_L2_MLP(self, EV, level): return EV("OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD", level) / EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", level) # Un-cacheable retired load per kilo instruction def UC_Load_PKI(self, EV, level): return 1000 * EV("MEM_LOAD_MISC_RETIRED.UC", level) / EV("INST_RETIRED.ANY", level) # Average CPU Utilization (percentage) def CPU_Utilization(self, EV, level): return CPUs_Utilized(self, EV, level) / Num_CPUs(self, EV, level) # Average number of utilized CPUs def CPUs_Utilized(self, EV, level): return EV("CPU_CLK_UNHALTED.REF_TSC", level) / EV("msr/tsc/", 0) # Measured Average Core Frequency for unhalted processors [GHz] def Core_Frequency(self, EV, level): return Turbo_Utilization(self, EV, level) * EV("msr/tsc/", 0) / OneBillion / Time(self, EV, level) # Measured Average Uncore Frequency for the SoC [GHz] def Uncore_Frequency(self, EV, level): return Socket_CLKS(self, EV, level) / 1e9 / Time(self, EV, level) # Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width def GFLOPs(self, EV, level): return (FLOP_Count(self, EV, level) / OneBillion) / Time(self, EV, level) # Average Frequency Utilization relative nominal frequency def Turbo_Utilization(self, EV, level): return CLKS(self, EV, level) / EV("CPU_CLK_UNHALTED.REF_TSC", level) # Fraction of Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes. def Power_License0_Utilization(self, EV, level): return EV("CORE_POWER.LVL0_TURBO_LICENSE", level) / 2 / CORE_CLKS(self, EV, level) if smt_enabled else EV("CORE_POWER.LVL0_TURBO_LICENSE", level) / CORE_CLKS(self, EV, level) # Fraction of Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions. def Power_License1_Utilization(self, EV, level): val = EV("CORE_POWER.LVL1_TURBO_LICENSE", level) / 2 / CORE_CLKS(self, EV, level) if smt_enabled else EV("CORE_POWER.LVL1_TURBO_LICENSE", level) / CORE_CLKS(self, EV, level) self.thresh = (val > 0.5) return val # Fraction of Core cycles where the core was running with power-delivery for license level 2 (introduced in SKX). This includes high current AVX 512-bit instructions. def Power_License2_Utilization(self, EV, level): val = EV("CORE_POWER.LVL2_TURBO_LICENSE", level) / 2 / CORE_CLKS(self, EV, level) if smt_enabled else EV("CORE_POWER.LVL2_TURBO_LICENSE", level) / CORE_CLKS(self, EV, level) self.thresh = (val > 0.5) return val # Fraction of cycles where both hardware Logical Processors were active def SMT_2T_Utilization(self, EV, level): return 1 - EV("CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE", level) / (EV("CPU_CLK_UNHALTED.REF_XCLK_ANY", level) / 2) if smt_enabled else 0 # Fraction of cycles spent in the Operating System (OS) Kernel mode def Kernel_Utilization(self, EV, level): val = EV("CPU_CLK_UNHALTED.THREAD_P:SUP", level) / EV("CPU_CLK_UNHALTED.THREAD", level) self.thresh = (val > 0.05) return val # Cycles Per Instruction for the Operating System (OS) Kernel mode def Kernel_CPI(self, EV, level): return EV("CPU_CLK_UNHALTED.THREAD_P:SUP", level) / EV("INST_RETIRED.ANY_P:SUP", level) # Average external Memory Bandwidth Use for reads and writes [GB / sec] def DRAM_BW_Use(self, EV, level): return (64 *(EV("UNC_M_CAS_COUNT.RD", level) + EV("UNC_M_CAS_COUNT.WR", level)) / OneBillion) / Time(self, EV, level) # Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches. def MEM_Read_Latency(self, EV, level): return OneBillion *(EV("UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD", level) / EV("UNC_CHA_TOR_INSERTS.IA_MISS_DRD", level)) / (Socket_CLKS(self, EV, level) / Time(self, EV, level)) # Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches def MEM_Parallel_Reads(self, EV, level): return EV("UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD", level) / EV("UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD:c1", level) # Average latency of data read request to external DRAM memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches def MEM_DRAM_Read_Latency(self, EV, level): return OneBillion *(EV("UNC_M_RPQ_OCCUPANCY", level) / EV("UNC_M_RPQ_INSERTS", level)) / EV("UNC_M_CLOCKTICKS:one_unit", level) # Average IO (network or disk) Bandwidth Use for Reads [GB / sec]. Bandwidth of IO reads that are initiated by end device controllers that are requesting memory from the CPU def IO_Read_BW(self, EV, level): return (EV("UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0", level) + EV("UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1", level) + EV("UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2", level) + EV("UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3", level)) * 4 / OneBillion / Time(self, EV, level) # Average IO (network or disk) Bandwidth Use for Writes [GB / sec]. Bandwidth of IO writes that are initiated by end device controllers that are writing memory to the CPU def IO_Write_BW(self, EV, level): return (EV("UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0", level) + EV("UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1", level) + EV("UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2", level) + EV("UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3", level)) * 4 / OneBillion / Time(self, EV, level) # Run duration time in seconds def Time(self, EV, level): val = EV("interval-s", 0) self.thresh = (val < 1) return val # Socket actual clocks when any core is active on that socket def Socket_CLKS(self, EV, level): return EV("UNC_CHA_CLOCKTICKS:one_unit", level) # Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate] def IpFarBranch(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.FAR_BRANCH:USER", level) self.thresh = (val < 1000000) return val # Event groups class Frontend_Bound: name = "Frontend_Bound" domain = "Slots" area = "FE" level = 1 htoff = False sample = ['FRONTEND_RETIRED.LATENCY_GE_4:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvFB', 'BvIO', 'TmaL1', 'PGO']) maxval = None def compute(self, EV): try: self.val = EV("IDQ_UOPS_NOT_DELIVERED.CORE", 1) / SLOTS(self, EV, 1) self.thresh = (self.val > 0.15) except ZeroDivisionError: handle_error(self, "Frontend_Bound zero division") return self.val desc = """ This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.""" class Fetch_Latency: name = "Fetch_Latency" domain = "Slots" area = "FE" level = 2 htoff = False sample = ['FRONTEND_RETIRED.LATENCY_GE_16:pp', 'FRONTEND_RETIRED.LATENCY_GE_8:pp'] errcount = 0 sibling = None metricgroup = frozenset(['Frontend', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = Pipeline_Width * EV("IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE", 2) / SLOTS(self, EV, 2) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Fetch_Latency zero division") return self.val desc = """ This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction- cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period.""" class ICache_Misses: name = "ICache_Misses" domain = "Clocks" area = "FE" level = 3 htoff = False sample = ['FRONTEND_RETIRED.L2_MISS:pp', 'FRONTEND_RETIRED.L1I_MISS:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BigFootprint', 'BvBC', 'FetchLat', 'IcMiss']) maxval = None def compute(self, EV): try: self.val = (EV("ICACHE_16B.IFDATA_STALL", 3) + 2 * EV("ICACHE_16B.IFDATA_STALL:c1:e1", 3)) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "ICache_Misses zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to instruction cache misses.. Using compiler's Profile-Guided Optimization (PGO) can reduce i-cache misses through improved hot code layout.""" class ITLB_Misses: name = "ITLB_Misses" domain = "Clocks" area = "FE" level = 3 htoff = False sample = ['FRONTEND_RETIRED.STLB_MISS:pp', 'FRONTEND_RETIRED.ITLB_MISS:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BigFootprint', 'BvBC', 'FetchLat', 'MemoryTLB']) maxval = None def compute(self, EV): try: self.val = EV("ICACHE_TAG.STALLS", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "ITLB_Misses zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses.. Consider large 2M pages for code (selectively prefer hot large-size function, due to limited 2M entries). Linux options: standard binaries use libhugetlbfs; Hfsort.. https://github. com/libhugetlbfs/libhugetlbfs;https://research.fb.com/public ations/optimizing-function-placement-for-large-scale-data- center-applications-2/""" class Branch_Resteers: name = "Branch_Resteers" domain = "Clocks" area = "FE" level = 3 htoff = False sample = ['BR_MISP_RETIRED.ALL_BRANCHES'] errcount = 0 sibling = None metricgroup = frozenset(['FetchLat']) maxval = None def compute(self, EV): try: self.val = EV("INT_MISC.CLEAR_RESTEER_CYCLES", 3) / CLKS(self, EV, 3) + self.Unknown_Branches.compute(EV) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Branch_Resteers zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings.""" class Mispredicts_Resteers: name = "Mispredicts_Resteers" domain = "Clocks" area = "FE" level = 4 htoff = False sample = ['INT_MISC.CLEAR_RESTEER_CYCLES'] errcount = 0 sibling = None metricgroup = frozenset(['BadSpec', 'BrMispredicts', 'BvMP']) maxval = None def compute(self, EV): try: self.val = Mispred_Clears_Fraction(self, EV, 4) * EV("INT_MISC.CLEAR_RESTEER_CYCLES", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Mispredicts_Resteers zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage.""" class Clears_Resteers: name = "Clears_Resteers" domain = "Clocks" area = "FE" level = 4 htoff = False sample = ['INT_MISC.CLEAR_RESTEER_CYCLES'] errcount = 0 sibling = None metricgroup = frozenset(['BadSpec', 'MachineClears']) maxval = None def compute(self, EV): try: self.val = (1 - Mispred_Clears_Fraction(self, EV, 4)) * EV("INT_MISC.CLEAR_RESTEER_CYCLES", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Clears_Resteers zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears.""" class Unknown_Branches: name = "Unknown_Branches" domain = "Clocks" area = "FE" level = 4 htoff = False sample = ['BACLEARS.ANY'] errcount = 0 sibling = None metricgroup = frozenset(['BigFootprint', 'BvBC', 'FetchLat']) maxval = None def compute(self, EV): try: self.val = BAClear_Cost * EV("BACLEARS.ANY", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Unknown_Branches zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches""" class MS_Switches: name = "MS_Switches" domain = "Clocks_Estimated" area = "FE" level = 3 htoff = False sample = ['IDQ.MS_SWITCHES'] errcount = 0 sibling = None metricgroup = frozenset(['FetchLat', 'MicroSeq']) maxval = 1.0 def compute(self, EV): try: self.val = MS_Switches_Cost * EV("IDQ.MS_SWITCHES", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "MS_Switches zero division") return self.val desc = """ This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals.""" class LCP: name = "LCP" domain = "Clocks" area = "FE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['FetchLat']) maxval = None def compute(self, EV): try: self.val = EV("DECODE.LCP", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "LCP zero division") return self.val desc = """ This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this.""" class DSB_Switches: name = "DSB_Switches" domain = "Clocks" area = "FE" level = 3 htoff = False sample = ['FRONTEND_RETIRED.DSB_MISS:pp'] errcount = 0 sibling = None metricgroup = frozenset(['DSBmiss', 'FetchLat']) maxval = None def compute(self, EV): try: self.val = EV("DSB2MITE_SWITCHES.PENALTY_CYCLES", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "DSB_Switches zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty.. See section 'Optimization for Decoded Icache' in Optimization Manual:. http://www.intel.com/content/www/us/en /architecture-and-technology/64-ia-32-architectures- optimization-manual.html""" class Fetch_Bandwidth: name = "Fetch_Bandwidth" domain = "Slots" area = "FE" level = 2 htoff = False sample = ['FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1:pp', 'FRONTEND_RETIRED.LATENCY_GE_1:pp', 'FRONTEND_RETIRED.LATENCY_GE_2:pp'] errcount = 0 sibling = None metricgroup = frozenset(['FetchBW', 'Frontend', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = self.Frontend_Bound.compute(EV) - self.Fetch_Latency.compute(EV) self.thresh = (self.val > 0.2) except ZeroDivisionError: handle_error(self, "Fetch_Bandwidth zero division") return self.val desc = """ This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend.""" class MITE: name = "MITE" domain = "Slots_Estimated" area = "FE" level = 3 htoff = False sample = ['FRONTEND_RETIRED.ANY_DSB_MISS'] errcount = 0 sibling = None metricgroup = frozenset(['DSBmiss', 'FetchBW']) maxval = None def compute(self, EV): try: self.val = (EV("IDQ.ALL_MITE_CYCLES_ANY_UOPS", 3) - EV("IDQ.ALL_MITE_CYCLES_4_UOPS", 3)) / CORE_CLKS(self, EV, 3) / 2 self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "MITE zero division") return self.val desc = """ This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck.. Consider tuning codegen of 'small hotspots' that can fit in DSB. Read about 'Decoded ICache' in Optimization Manual:. http://www.intel.com/content/www/us/en /architecture-and-technology/64-ia-32-architectures- optimization-manual.html""" class Decoder0_Alone: name = "Decoder0_Alone" domain = "Slots_Estimated" area = "FE" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['DSBmiss', 'FetchBW']) maxval = None def compute(self, EV): try: self.val = (EV("INST_DECODED.DECODERS:c1", 4) - EV("INST_DECODED.DECODERS:c2", 4)) / CORE_CLKS(self, EV, 4) / 2 self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Decoder0_Alone zero division") return self.val desc = """ This metric represents fraction of cycles where decoder-0 was the only active decoder""" class DSB: name = "DSB" domain = "Slots_Estimated" area = "FE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['DSB', 'FetchBW']) maxval = None def compute(self, EV): try: self.val = (EV("IDQ.DSB_CYCLES_ANY", 3) - EV("IDQ.DSB_CYCLES_OK", 3)) / CORE_CLKS(self, EV, 3) / 2 self.thresh = (self.val > 0.15) and self.parent.thresh except ZeroDivisionError: handle_error(self, "DSB zero division") return self.val desc = """ This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.""" class Bad_Speculation: name = "Bad_Speculation" domain = "Slots" area = "BAD" level = 1 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['TmaL1']) maxval = None def compute(self, EV): try: self.val = (EV("UOPS_ISSUED.ANY", 1) - Retired_Slots(self, EV, 1) + Pipeline_Width * Recovery_Cycles(self, EV, 1)) / SLOTS(self, EV, 1) self.thresh = (self.val > 0.15) except ZeroDivisionError: handle_error(self, "Bad_Speculation zero division") return self.val desc = """ This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss- predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.""" class Branch_Mispredicts: name = "Branch_Mispredicts" domain = "Slots" area = "BAD" level = 2 htoff = False sample = ['BR_MISP_RETIRED.ALL_BRANCHES:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BadSpec', 'BrMispredicts', 'BvMP', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = Mispred_Clears_Fraction(self, EV, 2) * self.Bad_Speculation.compute(EV) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Branch_Mispredicts zero division") return self.val desc = """ This metric represents fraction of slots the CPU has wasted due to Branch Misprediction. These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path.. Using profile feedback in the compiler may help. Please see the Optimization Manual for general strategies for addressing branch misprediction issues.. http://www.intel.com/content/www/us/en/architecture-and- technology/64-ia-32-architectures-optimization-manual.html""" class Other_Mispredicts: name = "Other_Mispredicts" domain = "Slots" area = "BAD" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvIO', 'BrMispredicts']) maxval = None def compute(self, EV): try: self.val = max(self.Branch_Mispredicts.compute(EV) * (1 - EV("BR_MISP_RETIRED.ALL_BRANCHES", 3) / (EV("INT_MISC.CLEARS_COUNT", 3) - EV("MACHINE_CLEARS.COUNT", 3))) , 0.0001 ) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Other_Mispredicts zero division") return self.val desc = """ This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types).""" class Machine_Clears: name = "Machine_Clears" domain = "Slots" area = "BAD" level = 2 htoff = False sample = ['MACHINE_CLEARS.COUNT'] errcount = 0 sibling = None metricgroup = frozenset(['BadSpec', 'BvMS', 'MachineClears', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = self.Bad_Speculation.compute(EV) - self.Branch_Mispredicts.compute(EV) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Machine_Clears zero division") return self.val desc = """ This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes.. See \"Memory Disambiguation\" in Optimization Manual and:. https://software.intel.com/sites/default/files/ m/d/4/1/d/8/sma.pdf""" class Other_Nukes: name = "Other_Nukes" domain = "Slots" area = "BAD" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvIO', 'Machine_Clears']) maxval = None def compute(self, EV): try: self.val = max(self.Machine_Clears.compute(EV) * (1 - EV("MACHINE_CLEARS.MEMORY_ORDERING", 3) / EV("MACHINE_CLEARS.COUNT", 3)) , 0.0001 ) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Other_Nukes zero division") return self.val desc = """ This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering.""" class Backend_Bound: name = "Backend_Bound" domain = "Slots" area = "BE" level = 1 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvOB', 'TmaL1']) maxval = None def compute(self, EV): try: self.val = 1 - self.Frontend_Bound.compute(EV) - (EV("UOPS_ISSUED.ANY", 1) + Pipeline_Width * Recovery_Cycles(self, EV, 1)) / SLOTS(self, EV, 1) self.thresh = (self.val > 0.2) except ZeroDivisionError: handle_error(self, "Backend_Bound zero division") return self.val desc = """ This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.""" class Memory_Bound: name = "Memory_Bound" domain = "Slots" area = "BE/Mem" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Backend', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = Memory_Bound_Fraction(self, EV, 2) * self.Backend_Bound.compute(EV) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Memory_Bound zero division") return self.val desc = """ This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).""" class L1_Bound: name = "L1_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = ['MEM_LOAD_RETIRED.L1_HIT:pp', 'MEM_LOAD_RETIRED.FB_HIT:pp'] errcount = 0 sibling = None metricgroup = frozenset(['CacheHits', 'MemoryBound', 'TmaL3mem']) maxval = None def compute(self, EV): try: self.val = max((EV("CYCLE_ACTIVITY.STALLS_MEM_ANY", 3) - EV("CYCLE_ACTIVITY.STALLS_L1D_MISS", 3)) / CLKS(self, EV, 3) , 0 ) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "L1_Bound zero division") return self.val desc = """ This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache.""" class DTLB_Load: name = "DTLB_Load" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_INST_RETIRED.STLB_MISS_LOADS:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvMT', 'MemoryTLB']) maxval = 1.0 def compute(self, EV): try: self.val = min(Mem_STLB_Hit_Cost * EV("DTLB_LOAD_MISSES.STLB_HIT:c1", 4) + EV("DTLB_LOAD_MISSES.WALK_ACTIVE", 4) , max(EV("CYCLE_ACTIVITY.CYCLES_MEM_ANY", 4) - EV("CYCLE_ACTIVITY.CYCLES_L1D_MISS", 4) , 0)) / CLKS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "DTLB_Load zero division") return self.val desc = """ This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss..""" class Load_STLB_Hit: name = "Load_STLB_Hit" domain = "Clocks_Estimated" area = "BE/Mem" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['MemoryTLB']) maxval = 1.0 def compute(self, EV): try: self.val = self.DTLB_Load.compute(EV) - self.Load_STLB_Miss.compute(EV) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Load_STLB_Hit zero division") return self.val desc = """ This metric roughly estimates the fraction of cycles where the (first level) DTLB was missed by load accesses, that later on hit in second-level TLB (STLB)""" class Load_STLB_Miss: name = "Load_STLB_Miss" domain = "Clocks_Calculated" area = "BE/Mem" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['MemoryTLB']) maxval = 1.0 def compute(self, EV): try: self.val = EV("DTLB_LOAD_MISSES.WALK_ACTIVE", 5) / CLKS(self, EV, 5) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Load_STLB_Miss zero division") return self.val desc = """ This metric estimates the fraction of cycles where the Second-level TLB (STLB) was missed by load accesses, performing a hardware page walk""" class Store_Fwd_Blk: name = "Store_Fwd_Blk" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = 1.0 def compute(self, EV): try: self.val = 13 * EV("LD_BLOCKS.STORE_FORWARD", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Store_Fwd_Blk zero division") return self.val desc = """ This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading.""" class L1_Hit_Latency: name = "L1_Hit_Latency" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_LOAD_RETIRED.L1_HIT'] errcount = 0 sibling = None metricgroup = frozenset(['BvML', 'MemoryLat']) maxval = 1.0 def compute(self, EV): try: self.val = min(2 *(EV("MEM_INST_RETIRED.ALL_LOADS", 4) - EV("MEM_LOAD_RETIRED.FB_HIT", 4) - EV("MEM_LOAD_RETIRED.L1_MISS", 4)) * Dependent_Loads_Weight(self, EV, 4) / 100 , max(EV("CYCLE_ACTIVITY.CYCLES_MEM_ANY", 4) - EV("CYCLE_ACTIVITY.CYCLES_L1D_MISS", 4) , 0)) / CLKS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "L1_Hit_Latency zero division") return self.val desc = """ This metric roughly estimates fraction of cycles with demand load accesses that hit the L1 cache. The short latency of the L1 data cache may be exposed in pointer-chasing memory access patterns as an example.""" class Lock_Latency: name = "Lock_Latency" domain = "Clocks" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_INST_RETIRED.LOCK_LOADS'] errcount = 0 sibling = None metricgroup = frozenset(['Offcore']) maxval = 1.0 def compute(self, EV): try: self.val = (12 * max(0 , EV("MEM_INST_RETIRED.LOCK_LOADS", 4) - EV("L2_RQSTS.ALL_RFO", 4)) + Mem_Lock_St_Fraction(self, EV, 4) * (Mem_L2_Store_Cost * EV("L2_RQSTS.RFO_HIT", 4) + ORO_Demand_RFO_C1(self, EV, 4))) / CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Lock_Latency zero division") return self.val desc = """ This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them.""" class Split_Loads: name = "Split_Loads" domain = "Clocks_Calculated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_INST_RETIRED.SPLIT_LOADS:pp'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = 1.0 def compute(self, EV): try: self.val = Load_Miss_Real_Latency(self, EV, 4) * EV("LD_BLOCKS.NO_SR", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Split_Loads zero division") return self.val desc = """ This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. . Consider aligning data or hot structure fields. See the Optimization Manual for more details""" class G4K_Aliasing: name = "4K_Aliasing" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("LD_BLOCKS_PARTIAL.ADDRESS_ALIAS", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "G4K_Aliasing zero division") return self.val desc = """ This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset. False match is possible; which incur a few cycles load re-issue. However; the short re-issue duration is often hidden by the out-of-order core and HW optimizations; hence a user may safely ignore a high value of this metric unless it manages to propagate up into parent nodes of the hierarchy (e.g. to L1_Bound).. Consider reducing independent loads/stores accesses with 4K offsets. See the Optimization Manual for more details""" class FB_Full: name = "FB_Full" domain = "Clocks_Calculated" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvMB', 'MemoryBW']) maxval = None def compute(self, EV): try: self.val = Load_Miss_Real_Latency(self, EV, 4) * EV("L1D_PEND_MISS.FB_FULL:c1", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.3) except ZeroDivisionError: handle_error(self, "FB_Full zero division") return self.val desc = """ This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory).. See $issueBW and $issueSL hints. Avoid software prefetches if indeed memory BW limited.""" class L2_Bound: name = "L2_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = ['MEM_LOAD_RETIRED.L2_HIT:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvML', 'CacheHits', 'MemoryBound', 'TmaL3mem']) maxval = None def compute(self, EV): try: self.val = (LOAD_L2_HIT(self, EV, 3) / (LOAD_L2_HIT(self, EV, 3) + EV("L1D_PEND_MISS.FB_FULL:c1", 3))) * L2_Bound_Ratio(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "L2_Bound zero division") return self.val desc = """ This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance.""" class L3_Bound: name = "L3_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = ['MEM_LOAD_RETIRED.L3_HIT:pp'] errcount = 0 sibling = None metricgroup = frozenset(['CacheHits', 'MemoryBound', 'TmaL3mem']) maxval = None def compute(self, EV): try: self.val = (EV("CYCLE_ACTIVITY.STALLS_L2_MISS", 3) - EV("CYCLE_ACTIVITY.STALLS_L3_MISS", 3)) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "L3_Bound zero division") return self.val desc = """ This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance.""" class Contested_Accesses: name = "Contested_Accesses" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM:pp', 'MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvMS', 'DataSharing', 'Offcore', 'Snoop']) maxval = 1.0 def compute(self, EV): try: self.val = ((Mem_XSNP_HitM_Cost(self, EV, 4) - Mem_L2_Hit_Cost(self, EV, 4)) * LOAD_XSNP_HITM(self, EV, 4) + (Mem_XSNP_Hit_Cost(self, EV, 4) - Mem_L2_Hit_Cost(self, EV, 4)) * LOAD_XSNP_MISS(self, EV, 4)) * FB_Factor(self, EV, 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Contested_Accesses zero division") return self.val desc = """ This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing.""" class Data_Sharing: name = "Data_Sharing" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvMS', 'Offcore', 'Snoop']) maxval = 1.0 def compute(self, EV): try: self.val = (Mem_XSNP_Hit_Cost(self, EV, 4) - Mem_L2_Hit_Cost(self, EV, 4)) * LOAD_XSNP_HIT(self, EV, 4) * FB_Factor(self, EV, 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Data_Sharing zero division") return self.val desc = """ This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance.""" class L3_Hit_Latency: name = "L3_Hit_Latency" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_LOAD_RETIRED.L3_HIT:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvML', 'MemoryLat']) maxval = 1.0 def compute(self, EV): try: self.val = (Mem_XSNP_None_Cost(self, EV, 4) - Mem_L2_Hit_Cost(self, EV, 4)) * LOAD_L3_HIT(self, EV, 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "L3_Hit_Latency zero division") return self.val desc = """ This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings.""" class SQ_Full: name = "SQ_Full" domain = "Clocks" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvMB', 'MemoryBW', 'Offcore']) maxval = None def compute(self, EV): try: self.val = SQ_Full_Cycles(self, EV, 4) / CORE_CLKS(self, EV, 4) self.thresh = (self.val > 0.3) and self.parent.thresh except ZeroDivisionError: handle_error(self, "SQ_Full zero division") return self.val desc = """ This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors).""" class DRAM_Bound: name = "DRAM_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = ['MEM_LOAD_RETIRED.L3_MISS:pp'] errcount = 0 sibling = None metricgroup = frozenset(['MemoryBound', 'TmaL3mem']) maxval = 1.0 def compute(self, EV): try: self.val = MEM_Bound_Ratio(self, EV, 3) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "DRAM_Bound zero division") return self.val desc = """ This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance.""" class MEM_Bandwidth: name = "MEM_Bandwidth" domain = "Clocks" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvMB', 'MemoryBW', 'Offcore']) maxval = None def compute(self, EV): try: self.val = ORO_DRD_BW_Cycles(self, EV, 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "MEM_Bandwidth zero division") return self.val desc = """ This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that).. Improve data accesses to reduce cacheline transfers from/to memory. Examples: 1) Consume all bytes of a each cacheline before it is evicted (e.g. reorder structure elements and split non-hot ones), 2) merge computed-limited with BW-limited loops, 3) NUMA optimizations in multi-socket system. Note: software prefetches will not help BW-limited application..""" class MEM_Latency: name = "MEM_Latency" domain = "Clocks" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvML', 'MemoryLat', 'Offcore']) maxval = None def compute(self, EV): try: self.val = ORO_DRD_Any_Cycles(self, EV, 4) / CLKS(self, EV, 4) - self.MEM_Bandwidth.compute(EV) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "MEM_Latency zero division") return self.val desc = """ This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that).. Improve data accesses or interleave them with compute. Examples: 1) Data layout re-structuring, 2) Software Prefetches (also through the compiler)..""" class Local_MEM: name = "Local_MEM" domain = "Clocks_Estimated" area = "BE/Mem" level = 5 htoff = False sample = ['MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM'] errcount = 0 sibling = None metricgroup = frozenset(['Server']) maxval = 1.0 def compute(self, EV): try: self.val = (Mem_Local_DRAM_Cost(self, EV, 5) - Mem_XSNP_None_Cost(self, EV, 5)) * EV("MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM", 5) * FB_Factor(self, EV, 5) / CLKS(self, EV, 5) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Local_MEM zero division") return self.val desc = """ This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory. Caching will improve the latency and increase performance.""" class Remote_MEM: name = "Remote_MEM" domain = "Clocks_Estimated" area = "BE/Mem" level = 5 htoff = False sample = ['MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM:pp'] errcount = 0 sibling = None metricgroup = frozenset(['Server', 'Snoop']) maxval = 1.0 def compute(self, EV): try: self.val = (Mem_Remote_DRAM_Cost(self, EV, 5) - Mem_XSNP_None_Cost(self, EV, 5)) * EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM", 5) * FB_Factor(self, EV, 5) / CLKS(self, EV, 5) if DS else 0 EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM", 5) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Remote_MEM zero division") return self.val desc = """ This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory. This is caused often due to non-optimal NUMA allocations.""" class Remote_Cache: name = "Remote_Cache" domain = "Clocks_Estimated" area = "BE/Mem" level = 5 htoff = False sample = ['MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM:pp', 'MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD:pp'] errcount = 0 sibling = None metricgroup = frozenset(['Offcore', 'Server', 'Snoop']) maxval = 1.0 def compute(self, EV): try: self.val = ((Mem_Remote_HitM_Cost(self, EV, 5) - Mem_XSNP_None_Cost(self, EV, 5)) * EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM", 5) + (Mem_Remote_Fwd_Cost(self, EV, 5) - Mem_XSNP_None_Cost(self, EV, 5)) * EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD", 5)) * FB_Factor(self, EV, 5) / CLKS(self, EV, 5) if DS else 0 EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM", 5) EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD", 5) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Remote_Cache zero division") return self.val desc = """ This metric estimates fraction of cycles while the memory subsystem was handling loads from remote cache in other sockets including synchronizations issues. This is caused often due to non-optimal NUMA allocations.""" class Store_Bound: name = "Store_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = ['MEM_INST_RETIRED.ALL_STORES:pp'] errcount = 0 sibling = None metricgroup = frozenset(['MemoryBound', 'TmaL3mem']) maxval = None def compute(self, EV): try: self.val = EV("EXE_ACTIVITY.BOUND_ON_STORES", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Store_Bound zero division") return self.val desc = """ This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck.""" class Store_Latency: name = "Store_Latency" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvML', 'MemoryLat', 'Offcore']) maxval = 1.0 def compute(self, EV): try: self.val = (Store_L2_Hit_Cycles(self, EV, 4) + (1 - Mem_Lock_St_Fraction(self, EV, 4)) * ORO_Demand_RFO_C1(self, EV, 4)) / CLKS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Store_Latency zero division") return self.val desc = """ This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Consider to avoid/reduce unnecessary (or easily load-able/computable) memory store.""" class False_Sharing: name = "False_Sharing" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM:pp', 'OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE'] errcount = 0 sibling = None metricgroup = frozenset(['BvMS', 'DataSharing', 'Offcore', 'Snoop']) maxval = 1.0 def compute(self, EV): try: self.val = (Mem_Remote_HitM_Cost(self, EV, 4) * OCR_all_rfo_l3_miss_remote_hitm(self, EV, 4) + Mem_XSNP_HitM_Cost(self, EV, 4) * OCR_all_rfo_l3_hit_snoop_hitm(self, EV, 4)) / CLKS(self, EV, 4) if DS else Mem_XSNP_HitM_Cost(self, EV, 4) * OCR_all_rfo_l3_hit_snoop_hitm(self, EV, 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "False_Sharing zero division") return self.val desc = """ This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. . False Sharing can be easily avoided by padding to make Logical Processors access different lines.""" class Split_Stores: name = "Split_Stores" domain = "Core_Utilization" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_INST_RETIRED.SPLIT_STORES:pp'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("MEM_INST_RETIRED.SPLIT_STORES", 4) / CORE_CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Split_Stores zero division") return self.val desc = """ This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity.""" class DTLB_Store: name = "DTLB_Store" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_INST_RETIRED.STLB_MISS_STORES:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvMT', 'MemoryTLB']) maxval = 1.0 def compute(self, EV): try: self.val = (Mem_STLB_Hit_Cost * EV("DTLB_STORE_MISSES.STLB_HIT:c1", 4) + EV("DTLB_STORE_MISSES.WALK_ACTIVE", 4)) / CORE_CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "DTLB_Store zero division") return self.val desc = """ This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently- used data.""" class Store_STLB_Hit: name = "Store_STLB_Hit" domain = "Clocks_Estimated" area = "BE/Mem" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['MemoryTLB']) maxval = 1.0 def compute(self, EV): try: self.val = self.DTLB_Store.compute(EV) - self.Store_STLB_Miss.compute(EV) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Store_STLB_Hit zero division") return self.val desc = """ This metric roughly estimates the fraction of cycles where the TLB was missed by store accesses, hitting in the second- level TLB (STLB)""" class Store_STLB_Miss: name = "Store_STLB_Miss" domain = "Clocks_Calculated" area = "BE/Mem" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['MemoryTLB']) maxval = 1.0 def compute(self, EV): try: self.val = EV("DTLB_STORE_MISSES.WALK_ACTIVE", 5) / CORE_CLKS(self, EV, 5) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Store_STLB_Miss zero division") return self.val desc = """ This metric estimates the fraction of cycles where the STLB was missed by store accesses, performing a hardware page walk""" class Core_Bound: name = "Core_Bound" domain = "Slots" area = "BE/Core" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Backend', 'TmaL2', 'Compute']) maxval = None def compute(self, EV): try: self.val = self.Backend_Bound.compute(EV) - self.Memory_Bound.compute(EV) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Core_Bound zero division") return self.val desc = """ This metric represents fraction of slots where Core non- memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).. Tip: consider Port Saturation analysis as next step.""" class Divider: name = "Divider" domain = "Clocks" area = "BE/Core" level = 3 htoff = False sample = ['ARITH.DIVIDER_ACTIVE'] errcount = 0 sibling = None metricgroup = frozenset(['BvCB']) maxval = 1.0 def compute(self, EV): try: self.val = EV("ARITH.DIVIDER_ACTIVE", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Divider zero division") return self.val desc = """ This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication.""" class Serializing_Operation: name = "Serializing_Operation" domain = "Clocks" area = "BE/Core" level = 3 htoff = False sample = ['PARTIAL_RAT_STALLS.SCOREBOARD'] errcount = 0 sibling = None metricgroup = frozenset(['BvIO', 'PortsUtil']) maxval = None def compute(self, EV): try: self.val = EV("PARTIAL_RAT_STALLS.SCOREBOARD", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Serializing_Operation zero division") return self.val desc = """ This metric represents fraction of cycles the CPU issue- pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out- of-order execution which may limit performance.""" class Ports_Utilization: name = "Ports_Utilization" domain = "Clocks" area = "BE/Core" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['PortsUtil']) maxval = None def compute(self, EV): try: self.val = Core_Bound_Cycles(self, EV, 3) / CLKS(self, EV, 3) if (EV("ARITH.DIVIDER_ACTIVE", 3)<(EV("CYCLE_ACTIVITY.STALLS_TOTAL", 3) - EV("CYCLE_ACTIVITY.STALLS_MEM_ANY", 3))) else Few_Uops_Executed_Threshold(self, EV, 3) / CLKS(self, EV, 3) EV("CYCLE_ACTIVITY.STALLS_MEM_ANY", 3) EV("ARITH.DIVIDER_ACTIVE", 3) EV("CYCLE_ACTIVITY.STALLS_TOTAL", 3) self.thresh = (self.val > 0.15) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Ports_Utilization zero division") return self.val desc = """ This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.. Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop.""" class Ports_Utilized_0: name = "Ports_Utilized_0" domain = "Clocks" area = "BE/Core" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['PortsUtil']) maxval = None def compute(self, EV): try: self.val = EV("EXE_ACTIVITY.EXE_BOUND_0_PORTS", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Ports_Utilized_0 zero division") return self.val desc = """ This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric.. Check assembly view and Appendix C in Optimization Manual to find out instructions with say 5 or more cycles latency.. http://www.intel.com/content/www/us/en/architecture-and- technology/64-ia-32-architectures-optimization-manual.html""" class Mixing_Vectors: name = "Mixing_Vectors" domain = "Clocks" area = "BE/Core" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = 1.0 def compute(self, EV): try: self.val = EV("UOPS_ISSUED.VECTOR_WIDTH_MISMATCH", 5) / EV("UOPS_ISSUED.ANY", 5) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "Mixing_Vectors zero division") return self.val desc = """ This metric estimates penalty in terms of percentage of injected blend uops out of all Uops Issued -- the Count Domain. Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic.""" class Ports_Utilized_1: name = "Ports_Utilized_1" domain = "Clocks" area = "BE/Core" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['PortsUtil']) maxval = None def compute(self, EV): try: self.val = Cycles_1_Port_Utilized(self, EV, 4) / CORE_CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Ports_Utilized_1 zero division") return self.val desc = """ This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). This can be due to heavy data-dependency among software instructions; or over oversubscribing a particular hardware resource. In some other cases with high 1_Port_Utilized and L1_Bound; this metric can point to L1 data-cache latency bottleneck that may not necessarily manifest with complete execution starvation (due to the short L1 latency e.g. walking a linked list) - looking at the assembly can be helpful.""" class Ports_Utilized_2: name = "Ports_Utilized_2" domain = "Clocks" area = "BE/Core" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['PortsUtil']) maxval = None def compute(self, EV): try: self.val = Cycles_2_Ports_Utilized(self, EV, 4) / CORE_CLKS(self, EV, 4) self.thresh = (self.val > 0.15) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Ports_Utilized_2 zero division") return self.val desc = """ This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto- Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop.""" class Ports_Utilized_3m: name = "Ports_Utilized_3m" domain = "Clocks" area = "BE/Core" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvCB', 'PortsUtil']) maxval = None def compute(self, EV): try: self.val = Cycles_3m_Ports_Utilized(self, EV, 4) / CORE_CLKS(self, EV, 4) self.thresh = (self.val > 0.4) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Ports_Utilized_3m zero division") return self.val desc = """ This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise).""" class ALU_Op_Utilization: name = "ALU_Op_Utilization" domain = "Core_Execution" area = "BE/Core" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = (EV("UOPS_DISPATCHED_PORT.PORT_0", 5) + EV("UOPS_DISPATCHED_PORT.PORT_1", 5) + EV("UOPS_DISPATCHED_PORT.PORT_5", 5) + EV("UOPS_DISPATCHED_PORT.PORT_6", 5)) / (4 * CORE_CLKS(self, EV, 5)) self.thresh = (self.val > 0.4) except ZeroDivisionError: handle_error(self, "ALU_Op_Utilization zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.""" class Port_0: name = "Port_0" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED_PORT.PORT_0'] errcount = 0 sibling = None metricgroup = frozenset(['Compute']) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_0", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_0 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ALU and 2nd branch""" class Port_1: name = "Port_1" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED_PORT.PORT_1'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_1", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_1 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU)""" class Port_5: name = "Port_5" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED_PORT.PORT_5'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_5", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_5 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ALU. See section 'Handling Port 5 Pressure' in Optimization Manual:. http://www.intel.com/content/www/us/en/architecture-and- technology/64-ia-32-architectures-optimization-manual.html""" class Port_6: name = "Port_6" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED_PORT.PORT_6'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_6", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_6 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 Primary Branch and simple ALU""" class Load_Op_Utilization: name = "Load_Op_Utilization" domain = "Core_Execution" area = "BE/Core" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = (EV("UOPS_DISPATCHED_PORT.PORT_2", 5) + EV("UOPS_DISPATCHED_PORT.PORT_3", 5) + EV("UOPS_DISPATCHED_PORT.PORT_7", 5) - EV("UOPS_DISPATCHED_PORT.PORT_4", 5)) / (2 * CORE_CLKS(self, EV, 5)) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Load_Op_Utilization zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations""" class Port_2: name = "Port_2" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED_PORT.PORT_2'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_2", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_2 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 2 Loads and Store-address""" class Port_3: name = "Port_3" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED_PORT.PORT_3'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_3", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_3 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 3 Loads and Store-address""" class Store_Op_Utilization: name = "Store_Op_Utilization" domain = "Core_Execution" area = "BE/Core" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_4", 5) / CORE_CLKS(self, EV, 5) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Store_Op_Utilization zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port for Store operations""" class Port_4: name = "Port_4" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED_PORT.PORT_4'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_4", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_4 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 4 (Store-data)""" class Port_7: name = "Port_7" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED_PORT.PORT_7'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_7", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_7 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 7 simple Store-address""" class Retiring: name = "Retiring" domain = "Slots" area = "RET" level = 1 htoff = False sample = ['UOPS_RETIRED.RETIRE_SLOTS'] errcount = 0 sibling = None metricgroup = frozenset(['BvUW', 'TmaL1']) maxval = None def compute(self, EV): try: self.val = Retired_Slots(self, EV, 1) / SLOTS(self, EV, 1) self.thresh = (self.val > 0.7) or self.Heavy_Operations.thresh except ZeroDivisionError: handle_error(self, "Retiring zero division") return self.val desc = """ This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved. Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. . A high Retiring value for non-vectorized code may be a good hint for programmer to consider vectorizing his code. Doing so essentially lets more computations be done without significantly increasing number of instructions thus improving the performance.""" class Light_Operations: name = "Light_Operations" domain = "Slots" area = "RET" level = 2 htoff = False sample = ['INST_RETIRED.PREC_DIST'] errcount = 0 sibling = None metricgroup = frozenset(['Retire', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = self.Retiring.compute(EV) - self.Heavy_Operations.compute(EV) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Light_Operations zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. . Focus on techniques that reduce instruction count or result in more efficient instructions generation such as vectorization.""" class FP_Arith: name = "FP_Arith" domain = "Uops" area = "RET" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['HPC']) maxval = None def compute(self, EV): try: self.val = self.X87_Use.compute(EV) + self.FP_Scalar.compute(EV) + self.FP_Vector.compute(EV) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "FP_Arith zero division") return self.val desc = """ This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.""" class X87_Use: name = "X87_Use" domain = "Uops" area = "RET" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Compute']) maxval = None def compute(self, EV): try: self.val = self.Retiring.compute(EV) * EV("UOPS_EXECUTED.X87", 4) / EV("UOPS_EXECUTED.THREAD", 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "X87_Use zero division") return self.val desc = """ This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.. Tip: consider compiler flags to generate newer AVX (or SSE) instruction sets; which typically perform better and feature vectors.""" class FP_Scalar: name = "FP_Scalar" domain = "Uops" area = "RET" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Compute', 'Flops']) maxval = None def compute(self, EV): try: self.val = FP_Arith_Scalar(self, EV, 4) / Retired_Slots(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "FP_Scalar zero division") return self.val desc = """ This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting.. Investigate what limits (compiler) generation of vector code.""" class FP_Vector: name = "FP_Vector" domain = "Uops" area = "RET" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Compute', 'Flops']) maxval = 1.0 def compute(self, EV): try: self.val = FP_Arith_Vector(self, EV, 4) / Retired_Slots(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "FP_Vector zero division") return self.val desc = """ This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting.. Check if vector width is expected""" class FP_Vector_128b: name = "FP_Vector_128b" domain = "Uops" area = "RET" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Compute', 'Flops']) maxval = 1.0 def compute(self, EV): try: self.val = (EV("FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", 5) + EV("FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE", 5)) / Retired_Slots(self, EV, 5) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "FP_Vector_128b zero division") return self.val desc = """ This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting.. Try to exploit wider vector length""" class FP_Vector_256b: name = "FP_Vector_256b" domain = "Uops" area = "RET" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Compute', 'Flops']) maxval = 1.0 def compute(self, EV): try: self.val = (EV("FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE", 5) + EV("FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE", 5)) / Retired_Slots(self, EV, 5) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "FP_Vector_256b zero division") return self.val desc = """ This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting.. Try to exploit wider vector length""" class FP_Vector_512b: name = "FP_Vector_512b" domain = "Uops" area = "RET" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Compute', 'Flops']) maxval = 1.0 def compute(self, EV): try: self.val = (EV("FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE", 5) + EV("FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE", 5)) / Retired_Slots(self, EV, 5) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "FP_Vector_512b zero division") return self.val desc = """ This metric approximates arithmetic FP vector uops fraction the CPU has retired for 512-bit wide vectors. May overcount due to FMA double counting.""" class Memory_Operations: name = "Memory_Operations" domain = "Slots" area = "RET" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Pipeline']) maxval = None def compute(self, EV): try: self.val = self.Light_Operations.compute(EV) * EV("MEM_INST_RETIRED.ANY", 3) / EV("INST_RETIRED.ANY", 3) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Memory_Operations zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring memory operations -- uops for memory load or store accesses.""" class Fused_Instructions: name = "Fused_Instructions" domain = "Slots" area = "RET" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Branches', 'BvBO', 'Pipeline']) maxval = None def compute(self, EV): try: self.val = self.Light_Operations.compute(EV) * EV("UOPS_RETIRED.MACRO_FUSED", 3) / Retired_Slots(self, EV, 3) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Fused_Instructions zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions. CMP+JCC or DEC+JCC are common examples of legacy fusions. {}. See section 'Optimizing for Macro-fusion' in Optimization Manual:""" class Non_Fused_Branches: name = "Non_Fused_Branches" domain = "Slots" area = "RET" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Branches', 'BvBO', 'Pipeline']) maxval = None def compute(self, EV): try: self.val = self.Light_Operations.compute(EV) * (EV("BR_INST_RETIRED.ALL_BRANCHES", 3) - EV("UOPS_RETIRED.MACRO_FUSED", 3)) / Retired_Slots(self, EV, 3) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Non_Fused_Branches zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused. Non- conditional branches like direct JMP or CALL would count here. Can be used to examine fusible conditional jumps that were not fused.""" class Other_Light_Ops: name = "Other_Light_Ops" domain = "Slots" area = "RET" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Pipeline']) maxval = None def compute(self, EV): try: self.val = max(0 , self.Light_Operations.compute(EV) - Light_Ops_Sum(self, EV, 3)) self.thresh = (self.val > 0.3) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Other_Light_Ops zero division") return self.val desc = """ This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes. May undercount due to FMA double counting""" class Nop_Instructions: name = "Nop_Instructions" domain = "Slots" area = "RET" level = 4 htoff = False sample = ['INST_RETIRED.NOP'] errcount = 0 sibling = None metricgroup = frozenset(['BvBO', 'Pipeline']) maxval = None def compute(self, EV): try: self.val = self.Light_Operations.compute(EV) * EV("INST_RETIRED.NOP", 4) / Retired_Slots(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Nop_Instructions zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body.. Improve Codegen by correctly placing NOPs outside hot sections (e.g. outside loop body).""" class Heavy_Operations: name = "Heavy_Operations" domain = "Slots" area = "RET" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Retire', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = (Retired_Slots(self, EV, 2) + EV("UOPS_RETIRED.MACRO_FUSED", 2) - EV("INST_RETIRED.ANY", 2)) / SLOTS(self, EV, 2) self.thresh = (self.val > 0.1) except ZeroDivisionError: handle_error(self, "Heavy_Operations zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.""" class Few_Uops_Instructions: name = "Few_Uops_Instructions" domain = "Slots" area = "RET" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = self.Heavy_Operations.compute(EV) - self.Microcode_Sequencer.compute(EV) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Few_Uops_Instructions zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring instructions that that are decoder into two or up to four uops. This highly-correlates with the number of uops in such instructions.""" class Microcode_Sequencer: name = "Microcode_Sequencer" domain = "Slots" area = "RET" level = 3 htoff = False sample = ['IDQ.MS_UOPS'] errcount = 0 sibling = None metricgroup = frozenset(['MicroSeq']) maxval = None def compute(self, EV): try: self.val = Retire_Fraction(self, EV, 3) * EV("IDQ.MS_UOPS", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Microcode_Sequencer zero division") return self.val desc = """ This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided..""" class Assists: name = "Assists" domain = "Slots_Estimated" area = "RET" level = 4 htoff = False sample = ['OTHER_ASSISTS.ANY'] errcount = 0 sibling = None metricgroup = frozenset(['BvIO']) maxval = 1.0 def compute(self, EV): try: self.val = Avg_Assist_Cost *(EV("FP_ASSIST.ANY", 4) + EV("OTHER_ASSISTS.ANY", 4)) / SLOTS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Assists zero division") return self.val desc = """ This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases.""" class FP_Assists: name = "FP_Assists" domain = "Slots_Estimated" area = "RET" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['HPC']) maxval = None def compute(self, EV): try: self.val = 34 * EV("FP_ASSIST.ANY", 5) / SLOTS(self, EV, 5) self.thresh = (self.val > 0.1) except ZeroDivisionError: handle_error(self, "FP_Assists zero division") return self.val desc = """ This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists. FP Assist may apply when working with very small floating point values (so-called Denormals).. Consider DAZ (Denormals Are Zero) and/or FTZ (Flush To Zero) options in your compiler; \"-ffast-math\" with -O2 in GCC for example. This option may improve performance if the denormal values are not critical in your application. Also note that the DAZ and FTZ modes are not compatible with the IEEE Standard 754.. https://www.intel.com/content/www/us/en/develop/docume ntation/vtune-help/top/reference/cpu-metrics-reference/bad- speculation-back-end-bound-pipeline-slots/fp-assists.html""" class CISC: name = "CISC" domain = "Slots" area = "RET" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = max(0 , self.Microcode_Sequencer.compute(EV) - self.Assists.compute(EV)) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "CISC zero division") return self.val desc = """ This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources.""" class Metric_Mispredictions: name = "Mispredictions" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['Bad', 'BadSpec', 'BrMispredicts', 'BvMP']) sibling = None def compute(self, EV): try: self.val = Mispredictions(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Mispredictions zero division") desc = """ Total pipeline cost of Branch Misprediction related bottlenecks""" class Metric_Big_Code: name = "Big_Code" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvBC', 'BigFootprint', 'Fed', 'Frontend', 'IcMiss', 'MemoryTLB']) sibling = None def compute(self, EV): try: self.val = Big_Code(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Big_Code zero division") desc = """ Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)""" class Metric_Instruction_Fetch_BW: name = "Instruction_Fetch_BW" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvFB', 'Fed', 'FetchBW', 'Frontend']) sibling = None def compute(self, EV): try: self.val = Instruction_Fetch_BW(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Instruction_Fetch_BW zero division") desc = """ Total pipeline cost of instruction fetch bandwidth related bottlenecks (when the front-end could not sustain operations delivery to the back-end)""" class Metric_Cache_Memory_Bandwidth: name = "Cache_Memory_Bandwidth" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvMB', 'Mem', 'MemoryBW', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Cache_Memory_Bandwidth(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Cache_Memory_Bandwidth zero division") desc = """ Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks""" class Metric_Cache_Memory_Latency: name = "Cache_Memory_Latency" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvML', 'Mem', 'MemoryLat', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Cache_Memory_Latency(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Cache_Memory_Latency zero division") desc = """ Total pipeline cost of external Memory- or Cache-Latency related bottlenecks""" class Metric_Memory_Data_TLBs: name = "Memory_Data_TLBs" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvMT', 'Mem', 'MemoryTLB', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Memory_Data_TLBs(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Memory_Data_TLBs zero division") desc = """ Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)""" class Metric_Memory_Synchronization: name = "Memory_Synchronization" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvMS', 'Mem', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Memory_Synchronization(self, EV, 0) self.thresh = (self.val > 10) except ZeroDivisionError: handle_error_metric(self, "Memory_Synchronization zero division") desc = """ Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors)""" class Metric_Compute_Bound_Est: name = "Compute_Bound_Est" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvCB', 'Cor']) sibling = None def compute(self, EV): try: self.val = Compute_Bound_Est(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Compute_Bound_Est zero division") desc = """ Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy.""" class Metric_Irregular_Overhead: name = "Irregular_Overhead" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['Bad', 'BvIO', 'Cor', 'Ret']) sibling = None def compute(self, EV): try: self.val = Irregular_Overhead(self, EV, 0) self.thresh = (self.val > 10) except ZeroDivisionError: handle_error_metric(self, "Irregular_Overhead zero division") desc = """ Total pipeline cost of irregular execution (e.g. FP-assists in HPC, Wait time with work imbalance multithreaded workloads, overhead in system services or virtualized environments)""" class Metric_Other_Bottlenecks: name = "Other_Bottlenecks" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvOB', 'Cor', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Other_Bottlenecks(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Other_Bottlenecks zero division") desc = """ Total pipeline cost of remaining bottlenecks in the back- end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls.""" class Metric_Branching_Overhead: name = "Branching_Overhead" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvBO', 'Ret']) sibling = None def compute(self, EV): try: self.val = Branching_Overhead(self, EV, 0) self.thresh = (self.val > 5) except ZeroDivisionError: handle_error_metric(self, "Branching_Overhead zero division") desc = """ Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA. Examples include function calls; loops and alignments. (A lower bound). Consider Loop Unrolling or function inlining optimizations""" class Metric_Useful_Work: name = "Useful_Work" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvUW', 'Ret']) sibling = None def compute(self, EV): try: self.val = Useful_Work(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Useful_Work zero division") desc = """ Total pipeline cost of \"useful operations\" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead.""" class Metric_Core_Bound_Likely: name = "Core_Bound_Likely" domain = "Metric" maxval = 1.0 errcount = 0 area = "Info.Botlnk.L0" metricgroup = frozenset(['Cor', 'SMT']) sibling = None def compute(self, EV): try: self.val = Core_Bound_Likely(self, EV, 0) self.thresh = (self.val > 0.5) except ZeroDivisionError: handle_error_metric(self, "Core_Bound_Likely zero division") desc = """ Probability of Core Bound bottleneck hidden by SMT-profiling artifacts. Tip: consider analysis with SMT disabled""" class Metric_IPC: name = "IPC" domain = "Metric" maxval = Pipeline_Width + 2 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Ret', 'Summary']) sibling = None def compute(self, EV): try: self.val = IPC(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IPC zero division") desc = """ Instructions Per Cycle (per Logical Processor)""" class Metric_UopPI: name = "UopPI" domain = "Metric" maxval = 2.0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Pipeline', 'Ret', 'Retire']) sibling = None def compute(self, EV): try: self.val = UopPI(self, EV, 0) self.thresh = (self.val > 1.05) except ZeroDivisionError: handle_error_metric(self, "UopPI zero division") desc = """ Uops Per Instruction""" class Metric_UpTB: name = "UpTB" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Branches', 'Fed', 'FetchBW']) sibling = None def compute(self, EV): try: self.val = UpTB(self, EV, 0) self.thresh = self.val < Pipeline_Width * 1.5 except ZeroDivisionError: handle_error_metric(self, "UpTB zero division") desc = """ Uops per taken branch""" class Metric_CPI: name = "CPI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Pipeline', 'Mem']) sibling = None def compute(self, EV): try: self.val = CPI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CPI zero division") desc = """ Cycles Per Instruction (per Logical Processor)""" class Metric_CLKS: name = "CLKS" domain = "Count" maxval = 0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Pipeline']) sibling = None def compute(self, EV): try: self.val = CLKS(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CLKS zero division") desc = """ Per-Logical Processor actual clocks when the Logical Processor is active.""" class Metric_SLOTS: name = "SLOTS" domain = "Count" maxval = 0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['TmaL1']) sibling = None def compute(self, EV): try: self.val = SLOTS(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "SLOTS zero division") desc = """ Total issue-pipeline slots (per-Physical Core till ICL; per- Logical Processor ICL onward)""" class Metric_Execute_per_Issue: name = "Execute_per_Issue" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Cor', 'Pipeline']) sibling = None def compute(self, EV): try: self.val = Execute_per_Issue(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Execute_per_Issue zero division") desc = """ The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage.""" class Metric_CoreIPC: name = "CoreIPC" domain = "Core_Metric" maxval = Pipeline_Width + 2 errcount = 0 area = "Info.Core" metricgroup = frozenset(['Ret', 'SMT', 'TmaL1']) sibling = None def compute(self, EV): try: self.val = CoreIPC(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CoreIPC zero division") desc = """ Instructions Per Cycle across hyper-threads (per physical core)""" class Metric_FLOPc: name = "FLOPc" domain = "Core_Metric" maxval = 10.0 errcount = 0 area = "Info.Core" metricgroup = frozenset(['Ret', 'Flops']) sibling = None def compute(self, EV): try: self.val = FLOPc(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "FLOPc zero division") desc = """ Floating Point Operations Per Cycle""" class Metric_FP_Arith_Utilization: name = "FP_Arith_Utilization" domain = "Core_Metric" maxval = 2.0 errcount = 0 area = "Info.Core" metricgroup = frozenset(['Cor', 'Flops', 'HPC']) sibling = None def compute(self, EV): try: self.val = FP_Arith_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "FP_Arith_Utilization zero division") desc = """ Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to Fused-Multiply Add FMA counting - common.""" class Metric_ILP: name = "ILP" domain = "Metric" maxval = Exe_Ports errcount = 0 area = "Info.Core" metricgroup = frozenset(['Backend', 'Cor', 'Pipeline', 'PortsUtil']) sibling = None def compute(self, EV): try: self.val = ILP(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "ILP zero division") desc = """ Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical- processor)""" class Metric_EPC: name = "EPC" domain = "Metric" maxval = 20.0 errcount = 0 area = "Info.Core" metricgroup = frozenset(['Power']) sibling = None def compute(self, EV): try: self.val = EPC(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "EPC zero division") desc = """ uops Executed per Cycle""" class Metric_CORE_CLKS: name = "CORE_CLKS" domain = "Count" maxval = 0 errcount = 0 area = "Info.Core" metricgroup = frozenset(['SMT']) sibling = None def compute(self, EV): try: self.val = CORE_CLKS(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CORE_CLKS zero division") desc = """ Core actual clocks when any Logical Processor is active on the Physical Core""" class Metric_IpLoad: name = "IpLoad" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['InsType']) sibling = None def compute(self, EV): try: self.val = IpLoad(self, EV, 0) self.thresh = (self.val < 3) except ZeroDivisionError: handle_error_metric(self, "IpLoad zero division") desc = """ Instructions per Load (lower number means higher occurrence rate). Tip: reduce memory accesses.""" class Metric_IpStore: name = "IpStore" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['InsType']) sibling = None def compute(self, EV): try: self.val = IpStore(self, EV, 0) self.thresh = (self.val < 8) except ZeroDivisionError: handle_error_metric(self, "IpStore zero division") desc = """ Instructions per Store (lower number means higher occurrence rate). Tip: reduce memory accesses.""" class Metric_IpBranch: name = "IpBranch" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Branches', 'Fed', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpBranch(self, EV, 0) self.thresh = (self.val < 8) except ZeroDivisionError: handle_error_metric(self, "IpBranch zero division") desc = """ Instructions per Branch (lower number means higher occurrence rate)""" class Metric_IpCall: name = "IpCall" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Branches', 'Fed', 'PGO']) sibling = None def compute(self, EV): try: self.val = IpCall(self, EV, 0) self.thresh = (self.val < 200) except ZeroDivisionError: handle_error_metric(self, "IpCall zero division") desc = """ Instructions per (near) call (lower number means higher occurrence rate)""" class Metric_IpTB: name = "IpTB" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Branches', 'Fed', 'FetchBW', 'Frontend', 'PGO']) sibling = None def compute(self, EV): try: self.val = IpTB(self, EV, 0) self.thresh = self.val < Pipeline_Width * 2 + 1 except ZeroDivisionError: handle_error_metric(self, "IpTB zero division") desc = """ Instructions per taken branch""" class Metric_BpTkBranch: name = "BpTkBranch" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Branches', 'Fed', 'PGO']) sibling = None def compute(self, EV): try: self.val = BpTkBranch(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "BpTkBranch zero division") desc = """ Branch instructions per taken branch. . Can be used to approximate PGO-likelihood for non-loopy codes.""" class Metric_IpFLOP: name = "IpFLOP" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Flops', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpFLOP(self, EV, 0) self.thresh = (self.val < 10) except ZeroDivisionError: handle_error_metric(self, "IpFLOP zero division") desc = """ Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate). Reference: Tuning Performance via Metrics with Expectations. https://doi.org/10.1109/LCA.2019.2916408""" class Metric_IpArith: name = "IpArith" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Flops', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpArith(self, EV, 0) self.thresh = (self.val < 10) except ZeroDivisionError: handle_error_metric(self, "IpArith zero division") desc = """ Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW.""" class Metric_IpArith_Scalar_SP: name = "IpArith_Scalar_SP" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Flops', 'FpScalar', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpArith_Scalar_SP(self, EV, 0) self.thresh = (self.val < 10) except ZeroDivisionError: handle_error_metric(self, "IpArith_Scalar_SP zero division") desc = """ Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.""" class Metric_IpArith_Scalar_DP: name = "IpArith_Scalar_DP" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Flops', 'FpScalar', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpArith_Scalar_DP(self, EV, 0) self.thresh = (self.val < 10) except ZeroDivisionError: handle_error_metric(self, "IpArith_Scalar_DP zero division") desc = """ Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.""" class Metric_IpArith_AVX128: name = "IpArith_AVX128" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Flops', 'FpVector', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpArith_AVX128(self, EV, 0) self.thresh = (self.val < 10) except ZeroDivisionError: handle_error_metric(self, "IpArith_AVX128 zero division") desc = """ Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.""" class Metric_IpArith_AVX256: name = "IpArith_AVX256" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Flops', 'FpVector', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpArith_AVX256(self, EV, 0) self.thresh = (self.val < 10) except ZeroDivisionError: handle_error_metric(self, "IpArith_AVX256 zero division") desc = """ Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.""" class Metric_IpArith_AVX512: name = "IpArith_AVX512" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Flops', 'FpVector', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpArith_AVX512(self, EV, 0) self.thresh = (self.val < 10) except ZeroDivisionError: handle_error_metric(self, "IpArith_AVX512 zero division") desc = """ Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.""" class Metric_IpSWPF: name = "IpSWPF" domain = "Inst_Metric" maxval = 1000 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Prefetches']) sibling = None def compute(self, EV): try: self.val = IpSWPF(self, EV, 0) self.thresh = (self.val < 100) except ZeroDivisionError: handle_error_metric(self, "IpSWPF zero division") desc = """ Instructions per Software prefetch instruction (of any type: NTA/T0/T1/T2/Prefetch) (lower number means higher occurrence rate)""" class Metric_Instructions: name = "Instructions" domain = "Count" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Summary', 'TmaL1']) sibling = None def compute(self, EV): try: self.val = Instructions(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Instructions zero division") desc = """ Total number of retired Instructions""" class Metric_Retire: name = "Retire" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Pipeline" metricgroup = frozenset(['Pipeline', 'Ret']) sibling = None def compute(self, EV): try: self.val = Retire(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Retire zero division") desc = """ Average number of Uops retired in cycles where at least one uop has retired.""" class Metric_IpAssist: name = "IpAssist" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Pipeline" metricgroup = frozenset(['MicroSeq', 'Pipeline', 'Ret', 'Retire']) sibling = None def compute(self, EV): try: self.val = IpAssist(self, EV, 0) self.thresh = (self.val < 100000) except ZeroDivisionError: handle_error_metric(self, "IpAssist zero division") desc = """ Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate)""" class Metric_Execute: name = "Execute" domain = "Metric" maxval = Exe_Ports errcount = 0 area = "Info.Pipeline" metricgroup = frozenset(['Cor', 'Pipeline', 'PortsUtil', 'SMT']) sibling = None def compute(self, EV): try: self.val = Execute(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Execute zero division") desc = """ """ class Metric_Fetch_DSB: name = "Fetch_DSB" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Pipeline" metricgroup = frozenset(['Fed', 'FetchBW']) sibling = None def compute(self, EV): try: self.val = Fetch_DSB(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Fetch_DSB zero division") desc = """ Average number of uops fetched from DSB per cycle""" class Metric_Fetch_MITE: name = "Fetch_MITE" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Pipeline" metricgroup = frozenset(['Fed', 'FetchBW']) sibling = None def compute(self, EV): try: self.val = Fetch_MITE(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Fetch_MITE zero division") desc = """ Average number of uops fetched from MITE per cycle""" class Metric_Fetch_UpC: name = "Fetch_UpC" domain = "Metric" maxval = 6.0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['Fed', 'FetchBW']) sibling = None def compute(self, EV): try: self.val = Fetch_UpC(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Fetch_UpC zero division") desc = """ Average number of Uops issued by front-end when it issued something""" class Metric_DSB_Coverage: name = "DSB_Coverage" domain = "Metric" maxval = 1.0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['DSB', 'Fed', 'FetchBW']) sibling = None def compute(self, EV): try: self.val = DSB_Coverage(self, EV, 0) self.thresh = (self.val < 0.7) and HighIPC(self, EV, 1) except ZeroDivisionError: handle_error_metric(self, "DSB_Coverage zero division") desc = """ Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). See section 'Decoded ICache' in Optimization Manual. http://www.intel.com/content/www/us/en/architecture- and-technology/64-ia-32-architectures-optimization- manual.html""" class Metric_DSB_Switch_Cost: name = "DSB_Switch_Cost" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['DSBmiss']) sibling = None def compute(self, EV): try: self.val = DSB_Switch_Cost(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "DSB_Switch_Cost zero division") desc = """ Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.""" class Metric_DSB_Misses: name = "DSB_Misses" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Botlnk.L2" metricgroup = frozenset(['DSBmiss', 'Fed']) sibling = None def compute(self, EV): try: self.val = DSB_Misses(self, EV, 0) self.thresh = (self.val > 10) except ZeroDivisionError: handle_error_metric(self, "DSB_Misses zero division") desc = """ Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck.""" class Metric_DSB_Bandwidth: name = "DSB_Bandwidth" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Botlnk.L2" metricgroup = frozenset(['DSB', 'FetchBW']) sibling = None def compute(self, EV): try: self.val = DSB_Bandwidth(self, EV, 0) self.thresh = (self.val > 10) except ZeroDivisionError: handle_error_metric(self, "DSB_Bandwidth zero division") desc = """ Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck.""" class Metric_ICache_Miss_Latency: name = "ICache_Miss_Latency" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['Fed', 'FetchLat', 'IcMiss']) sibling = None def compute(self, EV): try: self.val = ICache_Miss_Latency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "ICache_Miss_Latency zero division") desc = """ Average Latency for L1 instruction cache misses""" class Metric_IC_Misses: name = "IC_Misses" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Botlnk.L2" metricgroup = frozenset(['Fed', 'FetchLat', 'IcMiss']) sibling = None def compute(self, EV): try: self.val = IC_Misses(self, EV, 0) self.thresh = (self.val > 5) except ZeroDivisionError: handle_error_metric(self, "IC_Misses zero division") desc = """ Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck.""" class Metric_IpDSB_Miss_Ret: name = "IpDSB_Miss_Ret" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['DSBmiss', 'Fed']) sibling = None def compute(self, EV): try: self.val = IpDSB_Miss_Ret(self, EV, 0) self.thresh = (self.val < 50) except ZeroDivisionError: handle_error_metric(self, "IpDSB_Miss_Ret zero division") desc = """ Instructions per non-speculative DSB miss (lower number means higher occurrence rate)""" class Metric_IpUnknown_Branch: name = "IpUnknown_Branch" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['Fed']) sibling = None def compute(self, EV): try: self.val = IpUnknown_Branch(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IpUnknown_Branch zero division") desc = """ Instructions per speculative Unknown Branch Misprediction (BAClear) (lower number means higher occurrence rate)""" class Metric_L2MPKI_Code: name = "L2MPKI_Code" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['IcMiss']) sibling = None def compute(self, EV): try: self.val = L2MPKI_Code(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2MPKI_Code zero division") desc = """ L2 cache true code cacheline misses per kilo instruction""" class Metric_L2MPKI_Code_All: name = "L2MPKI_Code_All" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['IcMiss']) sibling = None def compute(self, EV): try: self.val = L2MPKI_Code_All(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2MPKI_Code_All zero division") desc = """ L2 cache speculative code cacheline misses per kilo instruction""" class Metric_IpMispredict: name = "IpMispredict" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Bad_Spec" metricgroup = frozenset(['Bad', 'BadSpec', 'BrMispredicts']) sibling = None def compute(self, EV): try: self.val = IpMispredict(self, EV, 0) self.thresh = (self.val < 200) except ZeroDivisionError: handle_error_metric(self, "IpMispredict zero division") desc = """ Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)""" class Metric_IpMisp_Indirect: name = "IpMisp_Indirect" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Bad_Spec" metricgroup = frozenset(['Bad', 'BrMispredicts']) sibling = None def compute(self, EV): try: self.val = IpMisp_Indirect(self, EV, 0) self.thresh = (self.val < 1000) except ZeroDivisionError: handle_error_metric(self, "IpMisp_Indirect zero division") desc = """ Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).""" class Metric_Branch_Misprediction_Cost: name = "Branch_Misprediction_Cost" domain = "Core_Metric" maxval = 300 errcount = 0 area = "Info.Bad_Spec" metricgroup = frozenset(['Bad', 'BrMispredicts']) sibling = None def compute(self, EV): try: self.val = Branch_Misprediction_Cost(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Branch_Misprediction_Cost zero division") desc = """ Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)""" class Metric_Spec_Clears_Ratio: name = "Spec_Clears_Ratio" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Bad_Spec" metricgroup = frozenset(['BrMispredicts']) sibling = None def compute(self, EV): try: self.val = Spec_Clears_Ratio(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Spec_Clears_Ratio zero division") desc = """ Speculative to Retired ratio of all clears (covering Mispredicts and nukes)""" class Metric_Cond_NT: name = "Cond_NT" domain = "Fraction" maxval = 1.0 errcount = 0 area = "Info.Branches" metricgroup = frozenset(['Bad', 'Branches', 'CodeGen', 'PGO']) sibling = None def compute(self, EV): try: self.val = Cond_NT(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Cond_NT zero division") desc = """ Fraction of branches that are non-taken conditionals""" class Metric_Cond_TK: name = "Cond_TK" domain = "Fraction" maxval = 1.0 errcount = 0 area = "Info.Branches" metricgroup = frozenset(['Bad', 'Branches', 'CodeGen', 'PGO']) sibling = None def compute(self, EV): try: self.val = Cond_TK(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Cond_TK zero division") desc = """ Fraction of branches that are taken conditionals""" class Metric_CallRet: name = "CallRet" domain = "Fraction" maxval = 1.0 errcount = 0 area = "Info.Branches" metricgroup = frozenset(['Bad', 'Branches']) sibling = None def compute(self, EV): try: self.val = CallRet(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CallRet zero division") desc = """ Fraction of branches that are CALL or RET""" class Metric_Jump: name = "Jump" domain = "Fraction" maxval = 1.0 errcount = 0 area = "Info.Branches" metricgroup = frozenset(['Bad', 'Branches']) sibling = None def compute(self, EV): try: self.val = Jump(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Jump zero division") desc = """ Fraction of branches that are unconditional (direct or indirect) jumps""" class Metric_Load_Miss_Real_Latency: name = "Load_Miss_Real_Latency" domain = "Clocks_Latency" maxval = 1000 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBound', 'MemoryLat']) sibling = None def compute(self, EV): try: self.val = Load_Miss_Real_Latency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Load_Miss_Real_Latency zero division") desc = """ Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)""" class Metric_MLP: name = "MLP" domain = "Metric" maxval = 10.0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBound', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = MLP(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "MLP zero division") desc = """ Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)""" class Metric_L1MPKI: name = "L1MPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheHits', 'Mem']) sibling = None def compute(self, EV): try: self.val = L1MPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L1MPKI zero division") desc = """ L1 cache true misses per kilo instruction for retired demand loads""" class Metric_L1MPKI_Load: name = "L1MPKI_Load" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheHits', 'Mem']) sibling = None def compute(self, EV): try: self.val = L1MPKI_Load(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L1MPKI_Load zero division") desc = """ L1 cache true misses per kilo instruction for all demand loads (including speculative)""" class Metric_L2MPKI: name = "L2MPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'Backend', 'CacheHits']) sibling = None def compute(self, EV): try: self.val = L2MPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2MPKI zero division") desc = """ L2 cache true misses per kilo instruction for retired demand loads""" class Metric_L2MPKI_All: name = "L2MPKI_All" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheHits', 'Mem', 'Offcore']) sibling = None def compute(self, EV): try: self.val = L2MPKI_All(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2MPKI_All zero division") desc = """ L2 cache misses per kilo instruction for all request types (including speculative)""" class Metric_L2MPKI_Load: name = "L2MPKI_Load" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheHits', 'Mem']) sibling = None def compute(self, EV): try: self.val = L2MPKI_Load(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2MPKI_Load zero division") desc = """ L2 cache misses per kilo instruction for all demand loads (including speculative)""" class Metric_L2MPKI_RFO: name = "L2MPKI_RFO" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheMisses', 'Offcore']) sibling = None def compute(self, EV): try: self.val = L2MPKI_RFO(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2MPKI_RFO zero division") desc = """ Offcore requests (L2 cache miss) per kilo instruction for demand RFOs""" class Metric_L2HPKI_All: name = "L2HPKI_All" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheHits', 'Mem']) sibling = None def compute(self, EV): try: self.val = L2HPKI_All(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2HPKI_All zero division") desc = """ L2 cache hits per kilo instruction for all request types (including speculative)""" class Metric_L2HPKI_Load: name = "L2HPKI_Load" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheHits', 'Mem']) sibling = None def compute(self, EV): try: self.val = L2HPKI_Load(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2HPKI_Load zero division") desc = """ L2 cache hits per kilo instruction for all demand loads (including speculative)""" class Metric_L3MPKI: name = "L3MPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem']) sibling = None def compute(self, EV): try: self.val = L3MPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L3MPKI zero division") desc = """ L3 cache true misses per kilo instruction for retired demand loads""" class Metric_FB_HPKI: name = "FB_HPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheHits', 'Mem']) sibling = None def compute(self, EV): try: self.val = FB_HPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "FB_HPKI zero division") desc = """ Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss- handling entries)""" class Metric_L1D_Cache_Fill_BW: name = "L1D_Cache_Fill_BW" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L1D_Cache_Fill_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L1D_Cache_Fill_BW zero division") desc = """ """ class Metric_L2_Cache_Fill_BW: name = "L2_Cache_Fill_BW" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L2_Cache_Fill_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2_Cache_Fill_BW zero division") desc = """ """ class Metric_L3_Cache_Fill_BW: name = "L3_Cache_Fill_BW" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L3_Cache_Fill_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L3_Cache_Fill_BW zero division") desc = """ """ class Metric_L3_Cache_Access_BW: name = "L3_Cache_Access_BW" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBW', 'Offcore']) sibling = None def compute(self, EV): try: self.val = L3_Cache_Access_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L3_Cache_Access_BW zero division") desc = """ """ class Metric_Page_Walks_Utilization: name = "Page_Walks_Utilization" domain = "Core_Metric" maxval = 1.0 errcount = 0 area = "Info.Memory.TLB" metricgroup = frozenset(['Mem', 'MemoryTLB']) sibling = None def compute(self, EV): try: self.val = Page_Walks_Utilization(self, EV, 0) self.thresh = (self.val > 0.5) except ZeroDivisionError: handle_error_metric(self, "Page_Walks_Utilization zero division") desc = """ Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses""" class Metric_Code_STLB_MPKI: name = "Code_STLB_MPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory.TLB" metricgroup = frozenset(['Fed', 'MemoryTLB']) sibling = None def compute(self, EV): try: self.val = Code_STLB_MPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Code_STLB_MPKI zero division") desc = """ STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)""" class Metric_Load_STLB_MPKI: name = "Load_STLB_MPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory.TLB" metricgroup = frozenset(['Mem', 'MemoryTLB']) sibling = None def compute(self, EV): try: self.val = Load_STLB_MPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Load_STLB_MPKI zero division") desc = """ STLB (2nd level TLB) data load speculative misses per kilo instruction (misses of any page-size that complete the page walk)""" class Metric_Store_STLB_MPKI: name = "Store_STLB_MPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory.TLB" metricgroup = frozenset(['Mem', 'MemoryTLB']) sibling = None def compute(self, EV): try: self.val = Store_STLB_MPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Store_STLB_MPKI zero division") desc = """ STLB (2nd level TLB) data store speculative misses per kilo instruction (misses of any page-size that complete the page walk)""" class Metric_L1D_Cache_Fill_BW_2T: name = "L1D_Cache_Fill_BW_2T" domain = "Core_Metric" maxval = 0 errcount = 0 area = "Info.Memory.Core" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L1D_Cache_Fill_BW_2T(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L1D_Cache_Fill_BW_2T zero division") desc = """ Average per-core data fill bandwidth to the L1 data cache [GB / sec]""" class Metric_L2_Cache_Fill_BW_2T: name = "L2_Cache_Fill_BW_2T" domain = "Core_Metric" maxval = 0 errcount = 0 area = "Info.Memory.Core" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L2_Cache_Fill_BW_2T(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2_Cache_Fill_BW_2T zero division") desc = """ Average per-core data fill bandwidth to the L2 cache [GB / sec]""" class Metric_L3_Cache_Fill_BW_2T: name = "L3_Cache_Fill_BW_2T" domain = "Core_Metric" maxval = 0 errcount = 0 area = "Info.Memory.Core" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L3_Cache_Fill_BW_2T(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L3_Cache_Fill_BW_2T zero division") desc = """ Average per-core data fill bandwidth to the L3 cache [GB / sec]""" class Metric_L3_Cache_Access_BW_2T: name = "L3_Cache_Access_BW_2T" domain = "Core_Metric" maxval = 0 errcount = 0 area = "Info.Memory.Core" metricgroup = frozenset(['Mem', 'MemoryBW', 'Offcore']) sibling = None def compute(self, EV): try: self.val = L3_Cache_Access_BW_2T(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L3_Cache_Access_BW_2T zero division") desc = """ Average per-core data access bandwidth to the L3 cache [GB / sec]""" class Metric_L2_Evictions_Silent_PKI: name = "L2_Evictions_Silent_PKI" domain = "Core_Metric" maxval = 0 errcount = 0 area = "Info.Memory.Core" metricgroup = frozenset(['L2Evicts', 'Mem', 'Server']) sibling = None def compute(self, EV): try: self.val = L2_Evictions_Silent_PKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2_Evictions_Silent_PKI zero division") desc = """ Rate of silent evictions from the L2 cache per Kilo instruction where the evicted lines are dropped (no writeback to L3 or memory)""" class Metric_L2_Evictions_NonSilent_PKI: name = "L2_Evictions_NonSilent_PKI" domain = "Core_Metric" maxval = 0 errcount = 0 area = "Info.Memory.Core" metricgroup = frozenset(['L2Evicts', 'Mem', 'Server']) sibling = None def compute(self, EV): try: self.val = L2_Evictions_NonSilent_PKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2_Evictions_NonSilent_PKI zero division") desc = """ Rate of non silent evictions from the L2 cache per Kilo instruction""" class Metric_Load_L2_Miss_Latency: name = "Load_L2_Miss_Latency" domain = "Clocks_Latency" maxval = 1000 errcount = 0 area = "Info.Memory.Latency" metricgroup = frozenset(['Memory_Lat', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Load_L2_Miss_Latency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Load_L2_Miss_Latency zero division") desc = """ Average Latency for L2 cache miss demand Loads""" class Metric_Load_L2_MLP: name = "Load_L2_MLP" domain = "Metric" maxval = 100 errcount = 0 area = "Info.Memory.Latency" metricgroup = frozenset(['Memory_BW', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Load_L2_MLP(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Load_L2_MLP zero division") desc = """ Average Parallel L2 cache miss demand Loads""" class Metric_Data_L2_MLP: name = "Data_L2_MLP" domain = "Metric" maxval = 100 errcount = 0 area = "Info.Memory.Latency" metricgroup = frozenset(['Memory_BW', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Data_L2_MLP(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Data_L2_MLP zero division") desc = """ Average Parallel L2 cache miss data reads""" class Metric_UC_Load_PKI: name = "UC_Load_PKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory.Mix" metricgroup = frozenset(['Mem']) sibling = None def compute(self, EV): try: self.val = UC_Load_PKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "UC_Load_PKI zero division") desc = """ Un-cacheable retired load per kilo instruction""" class Metric_CPU_Utilization: name = "CPU_Utilization" domain = "Metric" maxval = 1 errcount = 0 area = "Info.System" metricgroup = frozenset(['HPC', 'Summary']) sibling = None def compute(self, EV): try: self.val = CPU_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CPU_Utilization zero division") desc = """ Average CPU Utilization (percentage)""" class Metric_CPUs_Utilized: name = "CPUs_Utilized" domain = "Metric" maxval = 300 errcount = 0 area = "Info.System" metricgroup = frozenset(['Summary']) sibling = None def compute(self, EV): try: self.val = CPUs_Utilized(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CPUs_Utilized zero division") desc = """ Average number of utilized CPUs""" class Metric_Core_Frequency: name = "Core_Frequency" domain = "SystemMetric" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Summary', 'Power']) sibling = None def compute(self, EV): try: self.val = Core_Frequency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Core_Frequency zero division") desc = """ Measured Average Core Frequency for unhalted processors [GHz]""" class Metric_Uncore_Frequency: name = "Uncore_Frequency" domain = "SystemMetric" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['SoC']) sibling = None def compute(self, EV): try: self.val = Uncore_Frequency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Uncore_Frequency zero division") desc = """ Measured Average Uncore Frequency for the SoC [GHz]""" class Metric_GFLOPs: name = "GFLOPs" domain = "Metric" maxval = 200 errcount = 0 area = "Info.System" metricgroup = frozenset(['Cor', 'Flops', 'HPC']) sibling = None def compute(self, EV): try: self.val = GFLOPs(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "GFLOPs zero division") desc = """ Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width""" class Metric_Turbo_Utilization: name = "Turbo_Utilization" domain = "Core_Metric" maxval = 10.0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Power']) sibling = None def compute(self, EV): try: self.val = Turbo_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Turbo_Utilization zero division") desc = """ Average Frequency Utilization relative nominal frequency""" class Metric_Power_License0_Utilization: name = "Power_License0_Utilization" domain = "Core_Metric" maxval = 1.0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Power']) sibling = None def compute(self, EV): try: self.val = Power_License0_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Power_License0_Utilization zero division") desc = """ Fraction of Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes.""" class Metric_Power_License1_Utilization: name = "Power_License1_Utilization" domain = "Core_Metric" maxval = 1.0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Power']) sibling = None def compute(self, EV): try: self.val = Power_License1_Utilization(self, EV, 0) self.thresh = (self.val > 0.5) except ZeroDivisionError: handle_error_metric(self, "Power_License1_Utilization zero division") desc = """ Fraction of Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions.""" class Metric_Power_License2_Utilization: name = "Power_License2_Utilization" domain = "Core_Metric" maxval = 1.0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Power']) sibling = None def compute(self, EV): try: self.val = Power_License2_Utilization(self, EV, 0) self.thresh = (self.val > 0.5) except ZeroDivisionError: handle_error_metric(self, "Power_License2_Utilization zero division") desc = """ Fraction of Core cycles where the core was running with power-delivery for license level 2 (introduced in SKX). This includes high current AVX 512-bit instructions.""" class Metric_SMT_2T_Utilization: name = "SMT_2T_Utilization" domain = "Core_Metric" maxval = 1.0 errcount = 0 area = "Info.System" metricgroup = frozenset(['SMT']) sibling = None def compute(self, EV): try: self.val = SMT_2T_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "SMT_2T_Utilization zero division") desc = """ Fraction of cycles where both hardware Logical Processors were active""" class Metric_Kernel_Utilization: name = "Kernel_Utilization" domain = "Metric" maxval = 1.0 errcount = 0 area = "Info.System" metricgroup = frozenset(['OS']) sibling = None def compute(self, EV): try: self.val = Kernel_Utilization(self, EV, 0) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error_metric(self, "Kernel_Utilization zero division") desc = """ Fraction of cycles spent in the Operating System (OS) Kernel mode""" class Metric_Kernel_CPI: name = "Kernel_CPI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['OS']) sibling = None def compute(self, EV): try: self.val = Kernel_CPI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Kernel_CPI zero division") desc = """ Cycles Per Instruction for the Operating System (OS) Kernel mode""" class Metric_DRAM_BW_Use: name = "DRAM_BW_Use" domain = "GB/sec" maxval = 200 errcount = 0 area = "Info.System" metricgroup = frozenset(['HPC', 'MemOffcore', 'MemoryBW', 'SoC']) sibling = None def compute(self, EV): try: self.val = DRAM_BW_Use(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "DRAM_BW_Use zero division") desc = """ Average external Memory Bandwidth Use for reads and writes [GB / sec]""" class Metric_MEM_Read_Latency: name = "MEM_Read_Latency" domain = "NanoSeconds" maxval = 1000 errcount = 0 area = "Info.System" metricgroup = frozenset(['Mem', 'MemoryLat', 'SoC']) sibling = None def compute(self, EV): try: self.val = MEM_Read_Latency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "MEM_Read_Latency zero division") desc = """ Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches.""" class Metric_MEM_Parallel_Reads: name = "MEM_Parallel_Reads" domain = "SystemMetric" maxval = 100 errcount = 0 area = "Info.System" metricgroup = frozenset(['Mem', 'MemoryBW', 'SoC']) sibling = None def compute(self, EV): try: self.val = MEM_Parallel_Reads(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "MEM_Parallel_Reads zero division") desc = """ Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches""" class Metric_MEM_DRAM_Read_Latency: name = "MEM_DRAM_Read_Latency" domain = "NanoSeconds" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['MemOffcore', 'MemoryLat', 'SoC', 'Server']) sibling = None def compute(self, EV): try: self.val = MEM_DRAM_Read_Latency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "MEM_DRAM_Read_Latency zero division") desc = """ Average latency of data read request to external DRAM memory [in nanoseconds]. Accounts for demand loads and L1/L2 data- read prefetches""" class Metric_IO_Read_BW: name = "IO_Read_BW" domain = "GB/sec" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['IoBW', 'MemOffcore', 'SoC', 'Server']) sibling = None def compute(self, EV): try: self.val = IO_Read_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IO_Read_BW zero division") desc = """ Average IO (network or disk) Bandwidth Use for Reads [GB / sec]. Bandwidth of IO reads that are initiated by end device controllers that are requesting memory from the CPU""" class Metric_IO_Write_BW: name = "IO_Write_BW" domain = "GB/sec" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['IoBW', 'MemOffcore', 'SoC', 'Server']) sibling = None def compute(self, EV): try: self.val = IO_Write_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IO_Write_BW zero division") desc = """ Average IO (network or disk) Bandwidth Use for Writes [GB / sec]. Bandwidth of IO writes that are initiated by end device controllers that are writing memory to the CPU""" class Metric_Time: name = "Time" domain = "Seconds" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Summary']) sibling = None def compute(self, EV): try: self.val = Time(self, EV, 0) self.thresh = (self.val < 1) except ZeroDivisionError: handle_error_metric(self, "Time zero division") desc = """ Run duration time in seconds""" class Metric_Socket_CLKS: name = "Socket_CLKS" domain = "Count" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['SoC']) sibling = None def compute(self, EV): try: self.val = Socket_CLKS(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Socket_CLKS zero division") desc = """ Socket actual clocks when any core is active on that socket""" class Metric_IpFarBranch: name = "IpFarBranch" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Branches', 'OS']) sibling = None def compute(self, EV): try: self.val = IpFarBranch(self, EV, 0) self.thresh = (self.val < 1000000) except ZeroDivisionError: handle_error_metric(self, "IpFarBranch zero division") desc = """ Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]""" # Schedule class Setup: def __init__(self, r): o = dict() n = Frontend_Bound() ; r.run(n) ; o["Frontend_Bound"] = n n = Fetch_Latency() ; r.run(n) ; o["Fetch_Latency"] = n n = ICache_Misses() ; r.run(n) ; o["ICache_Misses"] = n n = ITLB_Misses() ; r.run(n) ; o["ITLB_Misses"] = n n = Branch_Resteers() ; r.run(n) ; o["Branch_Resteers"] = n n = Mispredicts_Resteers() ; r.run(n) ; o["Mispredicts_Resteers"] = n n = Clears_Resteers() ; r.run(n) ; o["Clears_Resteers"] = n n = Unknown_Branches() ; r.run(n) ; o["Unknown_Branches"] = n n = MS_Switches() ; r.run(n) ; o["MS_Switches"] = n n = LCP() ; r.run(n) ; o["LCP"] = n n = DSB_Switches() ; r.run(n) ; o["DSB_Switches"] = n n = Fetch_Bandwidth() ; r.run(n) ; o["Fetch_Bandwidth"] = n n = MITE() ; r.run(n) ; o["MITE"] = n n = Decoder0_Alone() ; r.run(n) ; o["Decoder0_Alone"] = n n = DSB() ; r.run(n) ; o["DSB"] = n n = Bad_Speculation() ; r.run(n) ; o["Bad_Speculation"] = n n = Branch_Mispredicts() ; r.run(n) ; o["Branch_Mispredicts"] = n n = Other_Mispredicts() ; r.run(n) ; o["Other_Mispredicts"] = n n = Machine_Clears() ; r.run(n) ; o["Machine_Clears"] = n n = Other_Nukes() ; r.run(n) ; o["Other_Nukes"] = n n = Backend_Bound() ; r.run(n) ; o["Backend_Bound"] = n n = Memory_Bound() ; r.run(n) ; o["Memory_Bound"] = n n = L1_Bound() ; r.run(n) ; o["L1_Bound"] = n n = DTLB_Load() ; r.run(n) ; o["DTLB_Load"] = n n = Load_STLB_Hit() ; r.run(n) ; o["Load_STLB_Hit"] = n n = Load_STLB_Miss() ; r.run(n) ; o["Load_STLB_Miss"] = n n = Store_Fwd_Blk() ; r.run(n) ; o["Store_Fwd_Blk"] = n n = L1_Hit_Latency() ; r.run(n) ; o["L1_Hit_Latency"] = n n = Lock_Latency() ; r.run(n) ; o["Lock_Latency"] = n n = Split_Loads() ; r.run(n) ; o["Split_Loads"] = n n = G4K_Aliasing() ; r.run(n) ; o["G4K_Aliasing"] = n n = FB_Full() ; r.run(n) ; o["FB_Full"] = n n = L2_Bound() ; r.run(n) ; o["L2_Bound"] = n n = L3_Bound() ; r.run(n) ; o["L3_Bound"] = n n = Contested_Accesses() ; r.run(n) ; o["Contested_Accesses"] = n n = Data_Sharing() ; r.run(n) ; o["Data_Sharing"] = n n = L3_Hit_Latency() ; r.run(n) ; o["L3_Hit_Latency"] = n n = SQ_Full() ; r.run(n) ; o["SQ_Full"] = n n = DRAM_Bound() ; r.run(n) ; o["DRAM_Bound"] = n n = MEM_Bandwidth() ; r.run(n) ; o["MEM_Bandwidth"] = n n = MEM_Latency() ; r.run(n) ; o["MEM_Latency"] = n n = Local_MEM() ; r.run(n) ; o["Local_MEM"] = n n = Remote_MEM() ; r.run(n) ; o["Remote_MEM"] = n n = Remote_Cache() ; r.run(n) ; o["Remote_Cache"] = n n = Store_Bound() ; r.run(n) ; o["Store_Bound"] = n n = Store_Latency() ; r.run(n) ; o["Store_Latency"] = n n = False_Sharing() ; r.run(n) ; o["False_Sharing"] = n n = Split_Stores() ; r.run(n) ; o["Split_Stores"] = n n = DTLB_Store() ; r.run(n) ; o["DTLB_Store"] = n n = Store_STLB_Hit() ; r.run(n) ; o["Store_STLB_Hit"] = n n = Store_STLB_Miss() ; r.run(n) ; o["Store_STLB_Miss"] = n n = Core_Bound() ; r.run(n) ; o["Core_Bound"] = n n = Divider() ; r.run(n) ; o["Divider"] = n n = Serializing_Operation() ; r.run(n) ; o["Serializing_Operation"] = n n = Ports_Utilization() ; r.run(n) ; o["Ports_Utilization"] = n n = Ports_Utilized_0() ; r.run(n) ; o["Ports_Utilized_0"] = n n = Mixing_Vectors() ; r.run(n) ; o["Mixing_Vectors"] = n n = Ports_Utilized_1() ; r.run(n) ; o["Ports_Utilized_1"] = n n = Ports_Utilized_2() ; r.run(n) ; o["Ports_Utilized_2"] = n n = Ports_Utilized_3m() ; r.run(n) ; o["Ports_Utilized_3m"] = n n = ALU_Op_Utilization() ; r.run(n) ; o["ALU_Op_Utilization"] = n n = Port_0() ; r.run(n) ; o["Port_0"] = n n = Port_1() ; r.run(n) ; o["Port_1"] = n n = Port_5() ; r.run(n) ; o["Port_5"] = n n = Port_6() ; r.run(n) ; o["Port_6"] = n n = Load_Op_Utilization() ; r.run(n) ; o["Load_Op_Utilization"] = n n = Port_2() ; r.run(n) ; o["Port_2"] = n n = Port_3() ; r.run(n) ; o["Port_3"] = n n = Store_Op_Utilization() ; r.run(n) ; o["Store_Op_Utilization"] = n n = Port_4() ; r.run(n) ; o["Port_4"] = n n = Port_7() ; r.run(n) ; o["Port_7"] = n n = Retiring() ; r.run(n) ; o["Retiring"] = n n = Light_Operations() ; r.run(n) ; o["Light_Operations"] = n n = FP_Arith() ; r.run(n) ; o["FP_Arith"] = n n = X87_Use() ; r.run(n) ; o["X87_Use"] = n n = FP_Scalar() ; r.run(n) ; o["FP_Scalar"] = n n = FP_Vector() ; r.run(n) ; o["FP_Vector"] = n n = FP_Vector_128b() ; r.run(n) ; o["FP_Vector_128b"] = n n = FP_Vector_256b() ; r.run(n) ; o["FP_Vector_256b"] = n n = FP_Vector_512b() ; r.run(n) ; o["FP_Vector_512b"] = n n = Memory_Operations() ; r.run(n) ; o["Memory_Operations"] = n n = Fused_Instructions() ; r.run(n) ; o["Fused_Instructions"] = n n = Non_Fused_Branches() ; r.run(n) ; o["Non_Fused_Branches"] = n n = Other_Light_Ops() ; r.run(n) ; o["Other_Light_Ops"] = n n = Nop_Instructions() ; r.run(n) ; o["Nop_Instructions"] = n n = Heavy_Operations() ; r.run(n) ; o["Heavy_Operations"] = n n = Few_Uops_Instructions() ; r.run(n) ; o["Few_Uops_Instructions"] = n n = Microcode_Sequencer() ; r.run(n) ; o["Microcode_Sequencer"] = n n = Assists() ; r.run(n) ; o["Assists"] = n n = FP_Assists() ; r.run(n) ; o["FP_Assists"] = n n = CISC() ; r.run(n) ; o["CISC"] = n # parents o["Fetch_Latency"].parent = o["Frontend_Bound"] o["ICache_Misses"].parent = o["Fetch_Latency"] o["ITLB_Misses"].parent = o["Fetch_Latency"] o["Branch_Resteers"].parent = o["Fetch_Latency"] o["Mispredicts_Resteers"].parent = o["Branch_Resteers"] o["Clears_Resteers"].parent = o["Branch_Resteers"] o["Unknown_Branches"].parent = o["Branch_Resteers"] o["MS_Switches"].parent = o["Fetch_Latency"] o["LCP"].parent = o["Fetch_Latency"] o["DSB_Switches"].parent = o["Fetch_Latency"] o["Fetch_Bandwidth"].parent = o["Frontend_Bound"] o["MITE"].parent = o["Fetch_Bandwidth"] o["Decoder0_Alone"].parent = o["MITE"] o["DSB"].parent = o["Fetch_Bandwidth"] o["Branch_Mispredicts"].parent = o["Bad_Speculation"] o["Other_Mispredicts"].parent = o["Branch_Mispredicts"] o["Machine_Clears"].parent = o["Bad_Speculation"] o["Other_Nukes"].parent = o["Machine_Clears"] o["Memory_Bound"].parent = o["Backend_Bound"] o["L1_Bound"].parent = o["Memory_Bound"] o["DTLB_Load"].parent = o["L1_Bound"] o["Load_STLB_Hit"].parent = o["DTLB_Load"] o["Load_STLB_Miss"].parent = o["DTLB_Load"] o["Store_Fwd_Blk"].parent = o["L1_Bound"] o["L1_Hit_Latency"].parent = o["L1_Bound"] o["Lock_Latency"].parent = o["L1_Bound"] o["Split_Loads"].parent = o["L1_Bound"] o["G4K_Aliasing"].parent = o["L1_Bound"] o["FB_Full"].parent = o["L1_Bound"] o["L2_Bound"].parent = o["Memory_Bound"] o["L3_Bound"].parent = o["Memory_Bound"] o["Contested_Accesses"].parent = o["L3_Bound"] o["Data_Sharing"].parent = o["L3_Bound"] o["L3_Hit_Latency"].parent = o["L3_Bound"] o["SQ_Full"].parent = o["L3_Bound"] o["DRAM_Bound"].parent = o["Memory_Bound"] o["MEM_Bandwidth"].parent = o["DRAM_Bound"] o["MEM_Latency"].parent = o["DRAM_Bound"] o["Local_MEM"].parent = o["MEM_Latency"] o["Remote_MEM"].parent = o["MEM_Latency"] o["Remote_Cache"].parent = o["MEM_Latency"] o["Store_Bound"].parent = o["Memory_Bound"] o["Store_Latency"].parent = o["Store_Bound"] o["False_Sharing"].parent = o["Store_Bound"] o["Split_Stores"].parent = o["Store_Bound"] o["DTLB_Store"].parent = o["Store_Bound"] o["Store_STLB_Hit"].parent = o["DTLB_Store"] o["Store_STLB_Miss"].parent = o["DTLB_Store"] o["Core_Bound"].parent = o["Backend_Bound"] o["Divider"].parent = o["Core_Bound"] o["Serializing_Operation"].parent = o["Core_Bound"] o["Ports_Utilization"].parent = o["Core_Bound"] o["Ports_Utilized_0"].parent = o["Ports_Utilization"] o["Mixing_Vectors"].parent = o["Ports_Utilized_0"] o["Ports_Utilized_1"].parent = o["Ports_Utilization"] o["Ports_Utilized_2"].parent = o["Ports_Utilization"] o["Ports_Utilized_3m"].parent = o["Ports_Utilization"] o["ALU_Op_Utilization"].parent = o["Ports_Utilized_3m"] o["Port_0"].parent = o["ALU_Op_Utilization"] o["Port_1"].parent = o["ALU_Op_Utilization"] o["Port_5"].parent = o["ALU_Op_Utilization"] o["Port_6"].parent = o["ALU_Op_Utilization"] o["Load_Op_Utilization"].parent = o["Ports_Utilized_3m"] o["Port_2"].parent = o["Load_Op_Utilization"] o["Port_3"].parent = o["Load_Op_Utilization"] o["Store_Op_Utilization"].parent = o["Ports_Utilized_3m"] o["Port_4"].parent = o["Store_Op_Utilization"] o["Port_7"].parent = o["Store_Op_Utilization"] o["Light_Operations"].parent = o["Retiring"] o["FP_Arith"].parent = o["Light_Operations"] o["X87_Use"].parent = o["FP_Arith"] o["FP_Scalar"].parent = o["FP_Arith"] o["FP_Vector"].parent = o["FP_Arith"] o["FP_Vector_128b"].parent = o["FP_Vector"] o["FP_Vector_256b"].parent = o["FP_Vector"] o["FP_Vector_512b"].parent = o["FP_Vector"] o["Memory_Operations"].parent = o["Light_Operations"] o["Fused_Instructions"].parent = o["Light_Operations"] o["Non_Fused_Branches"].parent = o["Light_Operations"] o["Other_Light_Ops"].parent = o["Light_Operations"] o["Nop_Instructions"].parent = o["Other_Light_Ops"] o["Heavy_Operations"].parent = o["Retiring"] o["Few_Uops_Instructions"].parent = o["Heavy_Operations"] o["Microcode_Sequencer"].parent = o["Heavy_Operations"] o["Assists"].parent = o["Microcode_Sequencer"] o["FP_Assists"].parent = o["Assists"] o["CISC"].parent = o["Microcode_Sequencer"] # user visible metrics n = Metric_Mispredictions() ; r.metric(n) ; o["Mispredictions"] = n n = Metric_Big_Code() ; r.metric(n) ; o["Big_Code"] = n n = Metric_Instruction_Fetch_BW() ; r.metric(n) ; o["Instruction_Fetch_BW"] = n n = Metric_Cache_Memory_Bandwidth() ; r.metric(n) ; o["Cache_Memory_Bandwidth"] = n n = Metric_Cache_Memory_Latency() ; r.metric(n) ; o["Cache_Memory_Latency"] = n n = Metric_Memory_Data_TLBs() ; r.metric(n) ; o["Memory_Data_TLBs"] = n n = Metric_Memory_Synchronization() ; r.metric(n) ; o["Memory_Synchronization"] = n n = Metric_Compute_Bound_Est() ; r.metric(n) ; o["Compute_Bound_Est"] = n n = Metric_Irregular_Overhead() ; r.metric(n) ; o["Irregular_Overhead"] = n n = Metric_Other_Bottlenecks() ; r.metric(n) ; o["Other_Bottlenecks"] = n n = Metric_Branching_Overhead() ; r.metric(n) ; o["Branching_Overhead"] = n n = Metric_Useful_Work() ; r.metric(n) ; o["Useful_Work"] = n n = Metric_Core_Bound_Likely() ; r.metric(n) ; o["Core_Bound_Likely"] = n n = Metric_IPC() ; r.metric(n) ; o["IPC"] = n n = Metric_UopPI() ; r.metric(n) ; o["UopPI"] = n n = Metric_UpTB() ; r.metric(n) ; o["UpTB"] = n n = Metric_CPI() ; r.metric(n) ; o["CPI"] = n n = Metric_CLKS() ; r.metric(n) ; o["CLKS"] = n n = Metric_SLOTS() ; r.metric(n) ; o["SLOTS"] = n n = Metric_Execute_per_Issue() ; r.metric(n) ; o["Execute_per_Issue"] = n n = Metric_CoreIPC() ; r.metric(n) ; o["CoreIPC"] = n n = Metric_FLOPc() ; r.metric(n) ; o["FLOPc"] = n n = Metric_FP_Arith_Utilization() ; r.metric(n) ; o["FP_Arith_Utilization"] = n n = Metric_ILP() ; r.metric(n) ; o["ILP"] = n n = Metric_EPC() ; r.metric(n) ; o["EPC"] = n n = Metric_CORE_CLKS() ; r.metric(n) ; o["CORE_CLKS"] = n n = Metric_IpLoad() ; r.metric(n) ; o["IpLoad"] = n n = Metric_IpStore() ; r.metric(n) ; o["IpStore"] = n n = Metric_IpBranch() ; r.metric(n) ; o["IpBranch"] = n n = Metric_IpCall() ; r.metric(n) ; o["IpCall"] = n n = Metric_IpTB() ; r.metric(n) ; o["IpTB"] = n n = Metric_BpTkBranch() ; r.metric(n) ; o["BpTkBranch"] = n n = Metric_IpFLOP() ; r.metric(n) ; o["IpFLOP"] = n n = Metric_IpArith() ; r.metric(n) ; o["IpArith"] = n n = Metric_IpArith_Scalar_SP() ; r.metric(n) ; o["IpArith_Scalar_SP"] = n n = Metric_IpArith_Scalar_DP() ; r.metric(n) ; o["IpArith_Scalar_DP"] = n n = Metric_IpArith_AVX128() ; r.metric(n) ; o["IpArith_AVX128"] = n n = Metric_IpArith_AVX256() ; r.metric(n) ; o["IpArith_AVX256"] = n n = Metric_IpArith_AVX512() ; r.metric(n) ; o["IpArith_AVX512"] = n n = Metric_IpSWPF() ; r.metric(n) ; o["IpSWPF"] = n n = Metric_Instructions() ; r.metric(n) ; o["Instructions"] = n n = Metric_Retire() ; r.metric(n) ; o["Retire"] = n n = Metric_IpAssist() ; r.metric(n) ; o["IpAssist"] = n n = Metric_Execute() ; r.metric(n) ; o["Execute"] = n n = Metric_Fetch_DSB() ; r.metric(n) ; o["Fetch_DSB"] = n n = Metric_Fetch_MITE() ; r.metric(n) ; o["Fetch_MITE"] = n n = Metric_Fetch_UpC() ; r.metric(n) ; o["Fetch_UpC"] = n n = Metric_DSB_Coverage() ; r.metric(n) ; o["DSB_Coverage"] = n n = Metric_DSB_Switch_Cost() ; r.metric(n) ; o["DSB_Switch_Cost"] = n n = Metric_DSB_Misses() ; r.metric(n) ; o["DSB_Misses"] = n n = Metric_DSB_Bandwidth() ; r.metric(n) ; o["DSB_Bandwidth"] = n n = Metric_ICache_Miss_Latency() ; r.metric(n) ; o["ICache_Miss_Latency"] = n n = Metric_IC_Misses() ; r.metric(n) ; o["IC_Misses"] = n n = Metric_IpDSB_Miss_Ret() ; r.metric(n) ; o["IpDSB_Miss_Ret"] = n n = Metric_IpUnknown_Branch() ; r.metric(n) ; o["IpUnknown_Branch"] = n n = Metric_L2MPKI_Code() ; r.metric(n) ; o["L2MPKI_Code"] = n n = Metric_L2MPKI_Code_All() ; r.metric(n) ; o["L2MPKI_Code_All"] = n n = Metric_IpMispredict() ; r.metric(n) ; o["IpMispredict"] = n n = Metric_IpMisp_Indirect() ; r.metric(n) ; o["IpMisp_Indirect"] = n n = Metric_Branch_Misprediction_Cost() ; r.metric(n) ; o["Branch_Misprediction_Cost"] = n n = Metric_Spec_Clears_Ratio() ; r.metric(n) ; o["Spec_Clears_Ratio"] = n n = Metric_Cond_NT() ; r.metric(n) ; o["Cond_NT"] = n n = Metric_Cond_TK() ; r.metric(n) ; o["Cond_TK"] = n n = Metric_CallRet() ; r.metric(n) ; o["CallRet"] = n n = Metric_Jump() ; r.metric(n) ; o["Jump"] = n n = Metric_Load_Miss_Real_Latency() ; r.metric(n) ; o["Load_Miss_Real_Latency"] = n n = Metric_MLP() ; r.metric(n) ; o["MLP"] = n n = Metric_L1MPKI() ; r.metric(n) ; o["L1MPKI"] = n n = Metric_L1MPKI_Load() ; r.metric(n) ; o["L1MPKI_Load"] = n n = Metric_L2MPKI() ; r.metric(n) ; o["L2MPKI"] = n n = Metric_L2MPKI_All() ; r.metric(n) ; o["L2MPKI_All"] = n n = Metric_L2MPKI_Load() ; r.metric(n) ; o["L2MPKI_Load"] = n n = Metric_L2MPKI_RFO() ; r.metric(n) ; o["L2MPKI_RFO"] = n n = Metric_L2HPKI_All() ; r.metric(n) ; o["L2HPKI_All"] = n n = Metric_L2HPKI_Load() ; r.metric(n) ; o["L2HPKI_Load"] = n n = Metric_L3MPKI() ; r.metric(n) ; o["L3MPKI"] = n n = Metric_FB_HPKI() ; r.metric(n) ; o["FB_HPKI"] = n n = Metric_L1D_Cache_Fill_BW() ; r.metric(n) ; o["L1D_Cache_Fill_BW"] = n n = Metric_L2_Cache_Fill_BW() ; r.metric(n) ; o["L2_Cache_Fill_BW"] = n n = Metric_L3_Cache_Fill_BW() ; r.metric(n) ; o["L3_Cache_Fill_BW"] = n n = Metric_L3_Cache_Access_BW() ; r.metric(n) ; o["L3_Cache_Access_BW"] = n n = Metric_Page_Walks_Utilization() ; r.metric(n) ; o["Page_Walks_Utilization"] = n n = Metric_Code_STLB_MPKI() ; r.metric(n) ; o["Code_STLB_MPKI"] = n n = Metric_Load_STLB_MPKI() ; r.metric(n) ; o["Load_STLB_MPKI"] = n n = Metric_Store_STLB_MPKI() ; r.metric(n) ; o["Store_STLB_MPKI"] = n n = Metric_L1D_Cache_Fill_BW_2T() ; r.metric(n) ; o["L1D_Cache_Fill_BW_2T"] = n n = Metric_L2_Cache_Fill_BW_2T() ; r.metric(n) ; o["L2_Cache_Fill_BW_2T"] = n n = Metric_L3_Cache_Fill_BW_2T() ; r.metric(n) ; o["L3_Cache_Fill_BW_2T"] = n n = Metric_L3_Cache_Access_BW_2T() ; r.metric(n) ; o["L3_Cache_Access_BW_2T"] = n n = Metric_L2_Evictions_Silent_PKI() ; r.metric(n) ; o["L2_Evictions_Silent_PKI"] = n n = Metric_L2_Evictions_NonSilent_PKI() ; r.metric(n) ; o["L2_Evictions_NonSilent_PKI"] = n n = Metric_Load_L2_Miss_Latency() ; r.metric(n) ; o["Load_L2_Miss_Latency"] = n n = Metric_Load_L2_MLP() ; r.metric(n) ; o["Load_L2_MLP"] = n n = Metric_Data_L2_MLP() ; r.metric(n) ; o["Data_L2_MLP"] = n n = Metric_UC_Load_PKI() ; r.metric(n) ; o["UC_Load_PKI"] = n n = Metric_CPU_Utilization() ; r.metric(n) ; o["CPU_Utilization"] = n n = Metric_CPUs_Utilized() ; r.metric(n) ; o["CPUs_Utilized"] = n n = Metric_Core_Frequency() ; r.metric(n) ; o["Core_Frequency"] = n n = Metric_Uncore_Frequency() ; r.metric(n) ; o["Uncore_Frequency"] = n n = Metric_GFLOPs() ; r.metric(n) ; o["GFLOPs"] = n n = Metric_Turbo_Utilization() ; r.metric(n) ; o["Turbo_Utilization"] = n n = Metric_Power_License0_Utilization() ; r.metric(n) ; o["Power_License0_Utilization"] = n n = Metric_Power_License1_Utilization() ; r.metric(n) ; o["Power_License1_Utilization"] = n n = Metric_Power_License2_Utilization() ; r.metric(n) ; o["Power_License2_Utilization"] = n n = Metric_SMT_2T_Utilization() ; r.metric(n) ; o["SMT_2T_Utilization"] = n n = Metric_Kernel_Utilization() ; r.metric(n) ; o["Kernel_Utilization"] = n n = Metric_Kernel_CPI() ; r.metric(n) ; o["Kernel_CPI"] = n n = Metric_DRAM_BW_Use() ; r.metric(n) ; o["DRAM_BW_Use"] = n n = Metric_MEM_Read_Latency() ; r.metric(n) ; o["MEM_Read_Latency"] = n n = Metric_MEM_Parallel_Reads() ; r.metric(n) ; o["MEM_Parallel_Reads"] = n n = Metric_MEM_DRAM_Read_Latency() ; r.metric(n) ; o["MEM_DRAM_Read_Latency"] = n n = Metric_IO_Read_BW() ; r.metric(n) ; o["IO_Read_BW"] = n n = Metric_IO_Write_BW() ; r.metric(n) ; o["IO_Write_BW"] = n n = Metric_Time() ; r.metric(n) ; o["Time"] = n n = Metric_Socket_CLKS() ; r.metric(n) ; o["Socket_CLKS"] = n n = Metric_IpFarBranch() ; r.metric(n) ; o["IpFarBranch"] = n # references between groups o["Branch_Resteers"].Unknown_Branches = o["Unknown_Branches"] o["Fetch_Bandwidth"].Frontend_Bound = o["Frontend_Bound"] o["Fetch_Bandwidth"].Fetch_Latency = o["Fetch_Latency"] o["Branch_Mispredicts"].Bad_Speculation = o["Bad_Speculation"] o["Other_Mispredicts"].Bad_Speculation = o["Bad_Speculation"] o["Other_Mispredicts"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Machine_Clears"].Bad_Speculation = o["Bad_Speculation"] o["Machine_Clears"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Other_Nukes"].Machine_Clears = o["Machine_Clears"] o["Other_Nukes"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Other_Nukes"].Bad_Speculation = o["Bad_Speculation"] o["Backend_Bound"].Frontend_Bound = o["Frontend_Bound"] o["Memory_Bound"].Retiring = o["Retiring"] o["Memory_Bound"].Frontend_Bound = o["Frontend_Bound"] o["Memory_Bound"].Backend_Bound = o["Backend_Bound"] o["Load_STLB_Hit"].Load_STLB_Miss = o["Load_STLB_Miss"] o["Load_STLB_Hit"].DTLB_Load = o["DTLB_Load"] o["DRAM_Bound"].L2_Bound = o["L2_Bound"] o["MEM_Latency"].MEM_Bandwidth = o["MEM_Bandwidth"] o["Store_STLB_Hit"].DTLB_Store = o["DTLB_Store"] o["Store_STLB_Hit"].Store_STLB_Miss = o["Store_STLB_Miss"] o["Core_Bound"].Memory_Bound = o["Memory_Bound"] o["Core_Bound"].Backend_Bound = o["Backend_Bound"] o["Core_Bound"].Retiring = o["Retiring"] o["Core_Bound"].Frontend_Bound = o["Frontend_Bound"] o["Ports_Utilization"].Ports_Utilized_0 = o["Ports_Utilized_0"] o["Ports_Utilization"].Retiring = o["Retiring"] o["Retiring"].Heavy_Operations = o["Heavy_Operations"] o["Light_Operations"].Retiring = o["Retiring"] o["Light_Operations"].Heavy_Operations = o["Heavy_Operations"] o["FP_Arith"].Retiring = o["Retiring"] o["FP_Arith"].FP_Scalar = o["FP_Scalar"] o["FP_Arith"].X87_Use = o["X87_Use"] o["FP_Arith"].FP_Vector = o["FP_Vector"] o["X87_Use"].Retiring = o["Retiring"] o["Memory_Operations"].Retiring = o["Retiring"] o["Memory_Operations"].Light_Operations = o["Light_Operations"] o["Memory_Operations"].Heavy_Operations = o["Heavy_Operations"] o["Fused_Instructions"].Retiring = o["Retiring"] o["Fused_Instructions"].Light_Operations = o["Light_Operations"] o["Fused_Instructions"].Heavy_Operations = o["Heavy_Operations"] o["Non_Fused_Branches"].Retiring = o["Retiring"] o["Non_Fused_Branches"].Light_Operations = o["Light_Operations"] o["Non_Fused_Branches"].Heavy_Operations = o["Heavy_Operations"] o["Other_Light_Ops"].Light_Operations = o["Light_Operations"] o["Other_Light_Ops"].Retiring = o["Retiring"] o["Other_Light_Ops"].Heavy_Operations = o["Heavy_Operations"] o["Other_Light_Ops"].Fused_Instructions = o["Fused_Instructions"] o["Other_Light_Ops"].Non_Fused_Branches = o["Non_Fused_Branches"] o["Other_Light_Ops"].FP_Vector = o["FP_Vector"] o["Other_Light_Ops"].FP_Scalar = o["FP_Scalar"] o["Other_Light_Ops"].FP_Arith = o["FP_Arith"] o["Other_Light_Ops"].X87_Use = o["X87_Use"] o["Other_Light_Ops"].Memory_Operations = o["Memory_Operations"] o["Nop_Instructions"].Retiring = o["Retiring"] o["Nop_Instructions"].Light_Operations = o["Light_Operations"] o["Nop_Instructions"].Heavy_Operations = o["Heavy_Operations"] o["Few_Uops_Instructions"].Heavy_Operations = o["Heavy_Operations"] o["Few_Uops_Instructions"].Microcode_Sequencer = o["Microcode_Sequencer"] o["CISC"].Microcode_Sequencer = o["Microcode_Sequencer"] o["CISC"].Assists = o["Assists"] o["Mispredictions"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Mispredictions"].LCP = o["LCP"] o["Mispredictions"].Other_Mispredicts = o["Other_Mispredicts"] o["Mispredictions"].Microcode_Sequencer = o["Microcode_Sequencer"] o["Mispredictions"].DSB_Switches = o["DSB_Switches"] o["Mispredictions"].Branch_Resteers = o["Branch_Resteers"] o["Mispredictions"].ICache_Misses = o["ICache_Misses"] o["Mispredictions"].MS_Switches = o["MS_Switches"] o["Mispredictions"].Bad_Speculation = o["Bad_Speculation"] o["Mispredictions"].ITLB_Misses = o["ITLB_Misses"] o["Mispredictions"].Unknown_Branches = o["Unknown_Branches"] o["Mispredictions"].Fetch_Latency = o["Fetch_Latency"] o["Mispredictions"].Mispredicts_Resteers = o["Mispredicts_Resteers"] o["Big_Code"].LCP = o["LCP"] o["Big_Code"].ICache_Misses = o["ICache_Misses"] o["Big_Code"].DSB_Switches = o["DSB_Switches"] o["Big_Code"].Branch_Resteers = o["Branch_Resteers"] o["Big_Code"].MS_Switches = o["MS_Switches"] o["Big_Code"].ITLB_Misses = o["ITLB_Misses"] o["Big_Code"].Unknown_Branches = o["Unknown_Branches"] o["Big_Code"].Fetch_Latency = o["Fetch_Latency"] o["Instruction_Fetch_BW"].Heavy_Operations = o["Heavy_Operations"] o["Instruction_Fetch_BW"].ICache_Misses = o["ICache_Misses"] o["Instruction_Fetch_BW"].Microcode_Sequencer = o["Microcode_Sequencer"] o["Instruction_Fetch_BW"].Frontend_Bound = o["Frontend_Bound"] o["Instruction_Fetch_BW"].Bad_Speculation = o["Bad_Speculation"] o["Instruction_Fetch_BW"].ITLB_Misses = o["ITLB_Misses"] o["Instruction_Fetch_BW"].Mispredicts_Resteers = o["Mispredicts_Resteers"] o["Instruction_Fetch_BW"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Instruction_Fetch_BW"].LCP = o["LCP"] o["Instruction_Fetch_BW"].Other_Mispredicts = o["Other_Mispredicts"] o["Instruction_Fetch_BW"].Few_Uops_Instructions = o["Few_Uops_Instructions"] o["Instruction_Fetch_BW"].DSB_Switches = o["DSB_Switches"] o["Instruction_Fetch_BW"].Assists = o["Assists"] o["Instruction_Fetch_BW"].Branch_Resteers = o["Branch_Resteers"] o["Instruction_Fetch_BW"].Clears_Resteers = o["Clears_Resteers"] o["Instruction_Fetch_BW"].MS_Switches = o["MS_Switches"] o["Instruction_Fetch_BW"].Unknown_Branches = o["Unknown_Branches"] o["Instruction_Fetch_BW"].Fetch_Latency = o["Fetch_Latency"] o["Cache_Memory_Bandwidth"].L1_Bound = o["L1_Bound"] o["Cache_Memory_Bandwidth"].DTLB_Load = o["DTLB_Load"] o["Cache_Memory_Bandwidth"].G4K_Aliasing = o["G4K_Aliasing"] o["Cache_Memory_Bandwidth"].Retiring = o["Retiring"] o["Cache_Memory_Bandwidth"].Frontend_Bound = o["Frontend_Bound"] o["Cache_Memory_Bandwidth"].Data_Sharing = o["Data_Sharing"] o["Cache_Memory_Bandwidth"].L2_Bound = o["L2_Bound"] o["Cache_Memory_Bandwidth"].Memory_Bound = o["Memory_Bound"] o["Cache_Memory_Bandwidth"].MEM_Latency = o["MEM_Latency"] o["Cache_Memory_Bandwidth"].Store_Bound = o["Store_Bound"] o["Cache_Memory_Bandwidth"].Split_Loads = o["Split_Loads"] o["Cache_Memory_Bandwidth"].L3_Bound = o["L3_Bound"] o["Cache_Memory_Bandwidth"].FB_Full = o["FB_Full"] o["Cache_Memory_Bandwidth"].Contested_Accesses = o["Contested_Accesses"] o["Cache_Memory_Bandwidth"].Store_Fwd_Blk = o["Store_Fwd_Blk"] o["Cache_Memory_Bandwidth"].L1_Hit_Latency = o["L1_Hit_Latency"] o["Cache_Memory_Bandwidth"].MEM_Bandwidth = o["MEM_Bandwidth"] o["Cache_Memory_Bandwidth"].Lock_Latency = o["Lock_Latency"] o["Cache_Memory_Bandwidth"].SQ_Full = o["SQ_Full"] o["Cache_Memory_Bandwidth"].Backend_Bound = o["Backend_Bound"] o["Cache_Memory_Bandwidth"].L3_Hit_Latency = o["L3_Hit_Latency"] o["Cache_Memory_Bandwidth"].DRAM_Bound = o["DRAM_Bound"] o["Cache_Memory_Latency"].L1_Bound = o["L1_Bound"] o["Cache_Memory_Latency"].DTLB_Load = o["DTLB_Load"] o["Cache_Memory_Latency"].False_Sharing = o["False_Sharing"] o["Cache_Memory_Latency"].L1_Hit_Latency = o["L1_Hit_Latency"] o["Cache_Memory_Latency"].Retiring = o["Retiring"] o["Cache_Memory_Latency"].Frontend_Bound = o["Frontend_Bound"] o["Cache_Memory_Latency"].Data_Sharing = o["Data_Sharing"] o["Cache_Memory_Latency"].L2_Bound = o["L2_Bound"] o["Cache_Memory_Latency"].Memory_Bound = o["Memory_Bound"] o["Cache_Memory_Latency"].SQ_Full = o["SQ_Full"] o["Cache_Memory_Latency"].Store_Bound = o["Store_Bound"] o["Cache_Memory_Latency"].Split_Loads = o["Split_Loads"] o["Cache_Memory_Latency"].L3_Bound = o["L3_Bound"] o["Cache_Memory_Latency"].FB_Full = o["FB_Full"] o["Cache_Memory_Latency"].Contested_Accesses = o["Contested_Accesses"] o["Cache_Memory_Latency"].Store_Fwd_Blk = o["Store_Fwd_Blk"] o["Cache_Memory_Latency"].DTLB_Store = o["DTLB_Store"] o["Cache_Memory_Latency"].MEM_Bandwidth = o["MEM_Bandwidth"] o["Cache_Memory_Latency"].Store_Latency = o["Store_Latency"] o["Cache_Memory_Latency"].Split_Stores = o["Split_Stores"] o["Cache_Memory_Latency"].G4K_Aliasing = o["G4K_Aliasing"] o["Cache_Memory_Latency"].Lock_Latency = o["Lock_Latency"] o["Cache_Memory_Latency"].MEM_Latency = o["MEM_Latency"] o["Cache_Memory_Latency"].Backend_Bound = o["Backend_Bound"] o["Cache_Memory_Latency"].L3_Hit_Latency = o["L3_Hit_Latency"] o["Cache_Memory_Latency"].DRAM_Bound = o["DRAM_Bound"] o["Memory_Data_TLBs"].L1_Bound = o["L1_Bound"] o["Memory_Data_TLBs"].Store_Fwd_Blk = o["Store_Fwd_Blk"] o["Memory_Data_TLBs"].L1_Hit_Latency = o["L1_Hit_Latency"] o["Memory_Data_TLBs"].DTLB_Load = o["DTLB_Load"] o["Memory_Data_TLBs"].Store_Latency = o["Store_Latency"] o["Memory_Data_TLBs"].G4K_Aliasing = o["G4K_Aliasing"] o["Memory_Data_TLBs"].Retiring = o["Retiring"] o["Memory_Data_TLBs"].Split_Stores = o["Split_Stores"] o["Memory_Data_TLBs"].False_Sharing = o["False_Sharing"] o["Memory_Data_TLBs"].Frontend_Bound = o["Frontend_Bound"] o["Memory_Data_TLBs"].DTLB_Store = o["DTLB_Store"] o["Memory_Data_TLBs"].L2_Bound = o["L2_Bound"] o["Memory_Data_TLBs"].Memory_Bound = o["Memory_Bound"] o["Memory_Data_TLBs"].Lock_Latency = o["Lock_Latency"] o["Memory_Data_TLBs"].Backend_Bound = o["Backend_Bound"] o["Memory_Data_TLBs"].Store_Bound = o["Store_Bound"] o["Memory_Data_TLBs"].Split_Loads = o["Split_Loads"] o["Memory_Data_TLBs"].L3_Bound = o["L3_Bound"] o["Memory_Data_TLBs"].FB_Full = o["FB_Full"] o["Memory_Data_TLBs"].DRAM_Bound = o["DRAM_Bound"] o["Memory_Synchronization"].L1_Bound = o["L1_Bound"] o["Memory_Synchronization"].Local_MEM = o["Local_MEM"] o["Memory_Synchronization"].Retiring = o["Retiring"] o["Memory_Synchronization"].Data_Sharing = o["Data_Sharing"] o["Memory_Synchronization"].L2_Bound = o["L2_Bound"] o["Memory_Synchronization"].Contested_Accesses = o["Contested_Accesses"] o["Memory_Synchronization"].Bad_Speculation = o["Bad_Speculation"] o["Memory_Synchronization"].Machine_Clears = o["Machine_Clears"] o["Memory_Synchronization"].MEM_Bandwidth = o["MEM_Bandwidth"] o["Memory_Synchronization"].Store_Latency = o["Store_Latency"] o["Memory_Synchronization"].Backend_Bound = o["Backend_Bound"] o["Memory_Synchronization"].L3_Hit_Latency = o["L3_Hit_Latency"] o["Memory_Synchronization"].Remote_Cache = o["Remote_Cache"] o["Memory_Synchronization"].Remote_MEM = o["Remote_MEM"] o["Memory_Synchronization"].False_Sharing = o["False_Sharing"] o["Memory_Synchronization"].Frontend_Bound = o["Frontend_Bound"] o["Memory_Synchronization"].Memory_Bound = o["Memory_Bound"] o["Memory_Synchronization"].SQ_Full = o["SQ_Full"] o["Memory_Synchronization"].Store_Bound = o["Store_Bound"] o["Memory_Synchronization"].L3_Bound = o["L3_Bound"] o["Memory_Synchronization"].DTLB_Store = o["DTLB_Store"] o["Memory_Synchronization"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Memory_Synchronization"].Split_Stores = o["Split_Stores"] o["Memory_Synchronization"].MEM_Latency = o["MEM_Latency"] o["Memory_Synchronization"].Other_Nukes = o["Other_Nukes"] o["Memory_Synchronization"].DRAM_Bound = o["DRAM_Bound"] o["Compute_Bound_Est"].Serializing_Operation = o["Serializing_Operation"] o["Compute_Bound_Est"].Ports_Utilization = o["Ports_Utilization"] o["Compute_Bound_Est"].Retiring = o["Retiring"] o["Compute_Bound_Est"].Frontend_Bound = o["Frontend_Bound"] o["Compute_Bound_Est"].Ports_Utilized_2 = o["Ports_Utilized_2"] o["Compute_Bound_Est"].Memory_Bound = o["Memory_Bound"] o["Compute_Bound_Est"].Ports_Utilized_1 = o["Ports_Utilized_1"] o["Compute_Bound_Est"].Core_Bound = o["Core_Bound"] o["Compute_Bound_Est"].Backend_Bound = o["Backend_Bound"] o["Compute_Bound_Est"].Ports_Utilized_3m = o["Ports_Utilized_3m"] o["Compute_Bound_Est"].Divider = o["Divider"] o["Compute_Bound_Est"].Ports_Utilized_0 = o["Ports_Utilized_0"] o["Irregular_Overhead"].Heavy_Operations = o["Heavy_Operations"] o["Irregular_Overhead"].Ports_Utilization = o["Ports_Utilization"] o["Irregular_Overhead"].Retiring = o["Retiring"] o["Irregular_Overhead"].ICache_Misses = o["ICache_Misses"] o["Irregular_Overhead"].Microcode_Sequencer = o["Microcode_Sequencer"] o["Irregular_Overhead"].Frontend_Bound = o["Frontend_Bound"] o["Irregular_Overhead"].Serializing_Operation = o["Serializing_Operation"] o["Irregular_Overhead"].Core_Bound = o["Core_Bound"] o["Irregular_Overhead"].Bad_Speculation = o["Bad_Speculation"] o["Irregular_Overhead"].ITLB_Misses = o["ITLB_Misses"] o["Irregular_Overhead"].Divider = o["Divider"] o["Irregular_Overhead"].Mispredicts_Resteers = o["Mispredicts_Resteers"] o["Irregular_Overhead"].Memory_Bound = o["Memory_Bound"] o["Irregular_Overhead"].Machine_Clears = o["Machine_Clears"] o["Irregular_Overhead"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Irregular_Overhead"].LCP = o["LCP"] o["Irregular_Overhead"].Other_Mispredicts = o["Other_Mispredicts"] o["Irregular_Overhead"].Few_Uops_Instructions = o["Few_Uops_Instructions"] o["Irregular_Overhead"].DSB_Switches = o["DSB_Switches"] o["Irregular_Overhead"].Ports_Utilized_0 = o["Ports_Utilized_0"] o["Irregular_Overhead"].Assists = o["Assists"] o["Irregular_Overhead"].Backend_Bound = o["Backend_Bound"] o["Irregular_Overhead"].Branch_Resteers = o["Branch_Resteers"] o["Irregular_Overhead"].Clears_Resteers = o["Clears_Resteers"] o["Irregular_Overhead"].MS_Switches = o["MS_Switches"] o["Irregular_Overhead"].Other_Nukes = o["Other_Nukes"] o["Irregular_Overhead"].Unknown_Branches = o["Unknown_Branches"] o["Irregular_Overhead"].Fetch_Latency = o["Fetch_Latency"] o["Other_Bottlenecks"].Local_MEM = o["Local_MEM"] o["Other_Bottlenecks"].Retiring = o["Retiring"] o["Other_Bottlenecks"].Data_Sharing = o["Data_Sharing"] o["Other_Bottlenecks"].L2_Bound = o["L2_Bound"] o["Other_Bottlenecks"].Contested_Accesses = o["Contested_Accesses"] o["Other_Bottlenecks"].L3_Bound = o["L3_Bound"] o["Other_Bottlenecks"].Machine_Clears = o["Machine_Clears"] o["Other_Bottlenecks"].MEM_Bandwidth = o["MEM_Bandwidth"] o["Other_Bottlenecks"].Store_Latency = o["Store_Latency"] o["Other_Bottlenecks"].Other_Mispredicts = o["Other_Mispredicts"] o["Other_Bottlenecks"].DSB_Switches = o["DSB_Switches"] o["Other_Bottlenecks"].Ports_Utilized_0 = o["Ports_Utilized_0"] o["Other_Bottlenecks"].Ports_Utilized_1 = o["Ports_Utilized_1"] o["Other_Bottlenecks"].Ports_Utilized_2 = o["Ports_Utilized_2"] o["Other_Bottlenecks"].DTLB_Load = o["DTLB_Load"] o["Other_Bottlenecks"].ICache_Misses = o["ICache_Misses"] o["Other_Bottlenecks"].Memory_Bound = o["Memory_Bound"] o["Other_Bottlenecks"].SQ_Full = o["SQ_Full"] o["Other_Bottlenecks"].Store_Bound = o["Store_Bound"] o["Other_Bottlenecks"].Bad_Speculation = o["Bad_Speculation"] o["Other_Bottlenecks"].FB_Full = o["FB_Full"] o["Other_Bottlenecks"].Mispredicts_Resteers = o["Mispredicts_Resteers"] o["Other_Bottlenecks"].Store_Fwd_Blk = o["Store_Fwd_Blk"] o["Other_Bottlenecks"].Split_Stores = o["Split_Stores"] o["Other_Bottlenecks"].Few_Uops_Instructions = o["Few_Uops_Instructions"] o["Other_Bottlenecks"].Other_Nukes = o["Other_Nukes"] o["Other_Bottlenecks"].Unknown_Branches = o["Unknown_Branches"] o["Other_Bottlenecks"].DRAM_Bound = o["DRAM_Bound"] o["Other_Bottlenecks"].L1_Bound = o["L1_Bound"] o["Other_Bottlenecks"].G4K_Aliasing = o["G4K_Aliasing"] o["Other_Bottlenecks"].Core_Bound = o["Core_Bound"] o["Other_Bottlenecks"].Divider = o["Divider"] o["Other_Bottlenecks"].L1_Hit_Latency = o["L1_Hit_Latency"] o["Other_Bottlenecks"].Assists = o["Assists"] o["Other_Bottlenecks"].Backend_Bound = o["Backend_Bound"] o["Other_Bottlenecks"].Branch_Resteers = o["Branch_Resteers"] o["Other_Bottlenecks"].L3_Hit_Latency = o["L3_Hit_Latency"] o["Other_Bottlenecks"].Remote_Cache = o["Remote_Cache"] o["Other_Bottlenecks"].Microcode_Sequencer = o["Microcode_Sequencer"] o["Other_Bottlenecks"].Fetch_Latency = o["Fetch_Latency"] o["Other_Bottlenecks"].Remote_MEM = o["Remote_MEM"] o["Other_Bottlenecks"].Ports_Utilization = o["Ports_Utilization"] o["Other_Bottlenecks"].False_Sharing = o["False_Sharing"] o["Other_Bottlenecks"].Heavy_Operations = o["Heavy_Operations"] o["Other_Bottlenecks"].Frontend_Bound = o["Frontend_Bound"] o["Other_Bottlenecks"].Serializing_Operation = o["Serializing_Operation"] o["Other_Bottlenecks"].MEM_Latency = o["MEM_Latency"] o["Other_Bottlenecks"].Split_Loads = o["Split_Loads"] o["Other_Bottlenecks"].ITLB_Misses = o["ITLB_Misses"] o["Other_Bottlenecks"].DTLB_Store = o["DTLB_Store"] o["Other_Bottlenecks"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Other_Bottlenecks"].LCP = o["LCP"] o["Other_Bottlenecks"].Lock_Latency = o["Lock_Latency"] o["Other_Bottlenecks"].Clears_Resteers = o["Clears_Resteers"] o["Other_Bottlenecks"].MS_Switches = o["MS_Switches"] o["Other_Bottlenecks"].Ports_Utilized_3m = o["Ports_Utilized_3m"] o["Useful_Work"].Retiring = o["Retiring"] o["Useful_Work"].Heavy_Operations = o["Heavy_Operations"] o["Useful_Work"].Microcode_Sequencer = o["Microcode_Sequencer"] o["Useful_Work"].Few_Uops_Instructions = o["Few_Uops_Instructions"] o["Useful_Work"].Assists = o["Assists"] o["Core_Bound_Likely"].Ports_Utilized_0 = o["Ports_Utilized_0"] o["Core_Bound_Likely"].Ports_Utilization = o["Ports_Utilization"] o["Core_Bound_Likely"].Retiring = o["Retiring"] o["Core_Bound_Likely"].Frontend_Bound = o["Frontend_Bound"] o["Core_Bound_Likely"].Memory_Bound = o["Memory_Bound"] o["Core_Bound_Likely"].Core_Bound = o["Core_Bound"] o["Core_Bound_Likely"].Backend_Bound = o["Backend_Bound"] o["DSB_Misses"].MITE = o["MITE"] o["DSB_Misses"].LCP = o["LCP"] o["DSB_Misses"].Fetch_Bandwidth = o["Fetch_Bandwidth"] o["DSB_Misses"].Frontend_Bound = o["Frontend_Bound"] o["DSB_Misses"].DSB_Switches = o["DSB_Switches"] o["DSB_Misses"].Branch_Resteers = o["Branch_Resteers"] o["DSB_Misses"].ICache_Misses = o["ICache_Misses"] o["DSB_Misses"].MS_Switches = o["MS_Switches"] o["DSB_Misses"].ITLB_Misses = o["ITLB_Misses"] o["DSB_Misses"].DSB = o["DSB"] o["DSB_Misses"].Unknown_Branches = o["Unknown_Branches"] o["DSB_Misses"].Fetch_Latency = o["Fetch_Latency"] o["DSB_Bandwidth"].Fetch_Bandwidth = o["Fetch_Bandwidth"] o["DSB_Bandwidth"].Frontend_Bound = o["Frontend_Bound"] o["DSB_Bandwidth"].DSB = o["DSB"] o["DSB_Bandwidth"].MITE = o["MITE"] o["DSB_Bandwidth"].Fetch_Latency = o["Fetch_Latency"] o["IC_Misses"].Fetch_Latency = o["Fetch_Latency"] o["IC_Misses"].LCP = o["LCP"] o["IC_Misses"].MS_Switches = o["MS_Switches"] o["IC_Misses"].ICache_Misses = o["ICache_Misses"] o["IC_Misses"].ITLB_Misses = o["ITLB_Misses"] o["IC_Misses"].Unknown_Branches = o["Unknown_Branches"] o["IC_Misses"].DSB_Switches = o["DSB_Switches"] o["IC_Misses"].Branch_Resteers = o["Branch_Resteers"] o["Branch_Misprediction_Cost"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Branch_Misprediction_Cost"].LCP = o["LCP"] o["Branch_Misprediction_Cost"].Other_Mispredicts = o["Other_Mispredicts"] o["Branch_Misprediction_Cost"].Microcode_Sequencer = o["Microcode_Sequencer"] o["Branch_Misprediction_Cost"].DSB_Switches = o["DSB_Switches"] o["Branch_Misprediction_Cost"].Branch_Resteers = o["Branch_Resteers"] o["Branch_Misprediction_Cost"].ICache_Misses = o["ICache_Misses"] o["Branch_Misprediction_Cost"].MS_Switches = o["MS_Switches"] o["Branch_Misprediction_Cost"].Bad_Speculation = o["Bad_Speculation"] o["Branch_Misprediction_Cost"].ITLB_Misses = o["ITLB_Misses"] o["Branch_Misprediction_Cost"].Unknown_Branches = o["Unknown_Branches"] o["Branch_Misprediction_Cost"].Fetch_Latency = o["Fetch_Latency"] o["Branch_Misprediction_Cost"].Mispredicts_Resteers = o["Mispredicts_Resteers"] # siblings cross-tree o["Mispredicts_Resteers"].sibling = (o["Branch_Mispredicts"],) o["Clears_Resteers"].sibling = (o["MS_Switches"], o["Machine_Clears"], o["L1_Bound"], o["Microcode_Sequencer"],) o["MS_Switches"].sibling = (o["Clears_Resteers"], o["Machine_Clears"], o["L1_Bound"], o["Serializing_Operation"], o["Mixing_Vectors"], o["Microcode_Sequencer"],) o["LCP"].sibling = (o["DSB_Switches"], o["Fetch_Bandwidth"],) o["DSB_Switches"].sibling = (o["LCP"], o["Fetch_Bandwidth"],) o["Fetch_Bandwidth"].sibling = (o["LCP"], o["DSB_Switches"],) o["Decoder0_Alone"].sibling = (o["Few_Uops_Instructions"],) o["Branch_Mispredicts"].sibling = (o["Mispredicts_Resteers"],) o["Machine_Clears"].sibling = (o["Clears_Resteers"], o["MS_Switches"], o["L1_Bound"], o["Contested_Accesses"], o["Data_Sharing"], o["Remote_Cache"], o["False_Sharing"], o["Microcode_Sequencer"],) o["L1_Bound"].sibling = (o["Clears_Resteers"], o["MS_Switches"], o["Machine_Clears"], o["Ports_Utilized_1"], o["Microcode_Sequencer"],) o["DTLB_Load"].sibling = (o["DTLB_Store"],) o["Lock_Latency"].sibling = (o["Store_Latency"],) o["FB_Full"].sibling = (o["SQ_Full"], o["MEM_Bandwidth"], o["Store_Latency"],) o["Contested_Accesses"].sibling = (o["Machine_Clears"], o["Data_Sharing"], o["Remote_Cache"], o["False_Sharing"],) o["Data_Sharing"].sibling = (o["Machine_Clears"], o["Contested_Accesses"], o["Remote_Cache"], o["False_Sharing"],) o["L3_Hit_Latency"].sibling = (o["MEM_Latency"],) o["L3_Hit_Latency"].overlap = True o["SQ_Full"].sibling = (o["FB_Full"], o["MEM_Bandwidth"],) o["MEM_Bandwidth"].sibling = (o["FB_Full"], o["SQ_Full"],) o["MEM_Latency"].sibling = (o["L3_Hit_Latency"],) o["Remote_Cache"].sibling = (o["Machine_Clears"], o["Contested_Accesses"], o["Data_Sharing"], o["False_Sharing"],) o["Store_Latency"].sibling = (o["Lock_Latency"], o["FB_Full"],) o["Store_Latency"].overlap = True o["False_Sharing"].sibling = (o["Machine_Clears"], o["Contested_Accesses"], o["Data_Sharing"], o["Remote_Cache"],) o["Split_Stores"].sibling = (o["Port_4"],) o["DTLB_Store"].sibling = (o["DTLB_Load"],) o["Serializing_Operation"].sibling = (o["MS_Switches"],) o["Mixing_Vectors"].sibling = (o["MS_Switches"],) o["Ports_Utilized_1"].sibling = (o["L1_Bound"],) o["Ports_Utilized_2"].sibling = (o["Port_0"], o["Port_1"], o["Port_5"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["FP_Vector_512b"],) o["Port_0"].sibling = (o["Ports_Utilized_2"], o["Port_1"], o["Port_5"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["FP_Vector_512b"],) o["Port_1"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_5"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["FP_Vector_512b"],) o["Port_5"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["FP_Vector_512b"],) o["Port_6"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["FP_Vector_512b"],) o["Port_4"].sibling = (o["Split_Stores"],) o["FP_Scalar"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"], o["Port_6"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["FP_Vector_512b"],) o["FP_Vector"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"], o["Port_6"], o["FP_Scalar"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["FP_Vector_512b"],) o["FP_Vector_128b"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_256b"], o["FP_Vector_512b"],) o["FP_Vector_256b"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_512b"],) o["FP_Vector_512b"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"],) o["Few_Uops_Instructions"].sibling = (o["Decoder0_Alone"],) o["Microcode_Sequencer"].sibling = (o["Clears_Resteers"], o["MS_Switches"], o["Machine_Clears"], o["L1_Bound"],) o["Mispredictions"].sibling = (o["Mispredicts_Resteers"], o["Branch_Mispredicts"],) o["Cache_Memory_Bandwidth"].sibling = (o["FB_Full"], o["SQ_Full"], o["MEM_Bandwidth"],) o["Cache_Memory_Latency"].sibling = (o["L3_Hit_Latency"], o["MEM_Latency"],) o["Memory_Data_TLBs"].sibling = (o["DTLB_Load"], o["DTLB_Store"],) o["Memory_Synchronization"].sibling = (o["DTLB_Load"], o["DTLB_Store"],) o["Irregular_Overhead"].sibling = (o["MS_Switches"], o["Microcode_Sequencer"],) o["IpTB"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],) o["DSB_Coverage"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],) o["DSB_Misses"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],) o["DSB_Bandwidth"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],) o["Branch_Misprediction_Cost"].sibling = (o["Mispredicts_Resteers"], o["Branch_Mispredicts"],) o["DRAM_BW_Use"].sibling = (o["FB_Full"], o["SQ_Full"], o["MEM_Bandwidth"],)
244,262
Python
.py
5,556
38.026278
2,229
0.657624
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,906
mtl_cmt_ratios.py
andikleen_pmu-tools/mtl_cmt_ratios.py
# -*- coding: latin-1 -*- # # auto generated TopDown/TMA 3.51 description for Intel 14th gen Core (code name Meteorlake) with Crestmont # Please see http://ark.intel.com for more details on these CPUs. # # References: # http://bit.ly/tma-ispass14 # http://halobates.de/blog/p/262 # https://sites.google.com/site/analysismethods/yasin-pubs # https://download.01.org/perfmon/ # https://github.com/andikleen/pmu-tools/wiki/toplev-manual # # Helpers print_error = lambda msg: False version = "3.51" base_frequency = -1.0 Memory = 0 Average_Frequency = 0.0 use_aux = False def handle_error(obj, msg): print_error(msg) obj.errcount += 1 obj.val = 0 obj.thresh = False def handle_error_metric(obj, msg): print_error(msg) obj.errcount += 1 obj.val = 0 # Constants # Aux. formulas # pipeline allocation width def Pipeline_Width(self, EV, level): return 6 def MEM_BOUND_STALLS_AT_RET_CORRECTION(self, EV, level): return max((EV("MEM_BOUND_STALLS_LOAD.ALL", level) - EV("LD_HEAD.L1_MISS_AT_RET", level)) / CLKS(self, EV, level) , 0 ) def CLKS(self, EV, level): return EV("CPU_CLK_UNHALTED.CORE", level) def CLKS_P(self, EV, level): return EV("CPU_CLK_UNHALTED.CORE_P", level) def SLOTS(self, EV, level): return Pipeline_Width(self, EV, level) * CLKS(self, EV, level) # Instructions Per Cycle def IPC(self, EV, level): return EV("INST_RETIRED.ANY", level) / CLKS(self, EV, level) # Cycles Per Instruction def CPI(self, EV, level): return CLKS(self, EV, level) / EV("INST_RETIRED.ANY", level) # Uops Per Instruction def UPI(self, EV, level): return EV("UOPS_RETIRED.ALL", level) / EV("INST_RETIRED.ANY", level) # Percentage of total non-speculative loads with a store forward or unknown store address block def Store_Fwd_Blocks(self, EV, level): return 100 * EV("LD_BLOCKS.DATA_UNKNOWN", level) / EV("MEM_UOPS_RETIRED.ALL_LOADS", level) # Percentage of total non-speculative loads with a address aliasing block def Address_Alias_Blocks(self, EV, level): return 100 * EV("LD_BLOCKS.ADDRESS_ALIAS", level) / EV("MEM_UOPS_RETIRED.ALL_LOADS", level) # Percentage of total non-speculative loads that are splits def Load_Splits(self, EV, level): return 100 * EV("MEM_UOPS_RETIRED.SPLIT_LOADS", level) / EV("MEM_UOPS_RETIRED.ALL_LOADS", level) # Instructions per Branch (lower number means higher occurrence rate) def IpBranch(self, EV, level): return EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.ALL_BRANCHES", level) # Instruction per (near) call (lower number means higher occurrence rate) def IpCall(self, EV, level): return EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.NEAR_CALL", level) # Instructions per Load def IpLoad(self, EV, level): return EV("INST_RETIRED.ANY", level) / EV("MEM_UOPS_RETIRED.ALL_LOADS", level) # Instructions per Store def IpStore(self, EV, level): return EV("INST_RETIRED.ANY", level) / EV("MEM_UOPS_RETIRED.ALL_STORES", level) # Instructions per retired Branch Misprediction def IpMispredict(self, EV, level): return EV("INST_RETIRED.ANY", level) / EV("BR_MISP_RETIRED.ALL_BRANCHES", level) # Instructions per retired conditional Branch Misprediction where the branch was not taken def IpMisp_Cond_Ntaken(self, EV, level): return EV("INST_RETIRED.ANY", level) / (EV("BR_MISP_RETIRED.COND", level) - EV("BR_MISP_RETIRED.COND_TAKEN", level)) # Instructions per retired conditional Branch Misprediction where the branch was taken def IpMisp_Cond_Taken(self, EV, level): return EV("INST_RETIRED.ANY", level) / EV("BR_MISP_RETIRED.COND_TAKEN", level) # Instructions per retired return Branch Misprediction def IpMisp_Ret(self, EV, level): return EV("INST_RETIRED.ANY", level) / EV("BR_MISP_RETIRED.RETURN", level) # Instructions per retired indirect call or jump Branch Misprediction def IpMisp_Indirect(self, EV, level): return EV("INST_RETIRED.ANY", level) / EV("BR_MISP_RETIRED.INDIRECT", level) # Instructions per Far Branch def IpFarBranch(self, EV, level): return EV("INST_RETIRED.ANY", level) / (EV("BR_INST_RETIRED.FAR_BRANCH", level) / 2 ) # Ratio of all branches which mispredict def Branch_Mispredict_Ratio(self, EV, level): return EV("BR_MISP_RETIRED.ALL_BRANCHES", level) / EV("BR_INST_RETIRED.ALL_BRANCHES", level) # Ratio between Mispredicted branches and unknown branches def Branch_Mispredict_to_Unknown_Branch_Ratio(self, EV, level): return EV("BR_MISP_RETIRED.ALL_BRANCHES", level) / EV("BACLEARS.ANY", level) # Percentage of all uops which are ucode ops def Microcode_Uop_Ratio(self, EV, level): return 100 * EV("UOPS_RETIRED.MS", level) / EV("UOPS_RETIRED.ALL", level) # Percentage of all uops which are FPDiv uops def FPDiv_Uop_Ratio(self, EV, level): return 100 * EV("UOPS_RETIRED.FPDIV", level) / EV("UOPS_RETIRED.ALL", level) # Percentage of all uops which are IDiv uops def IDiv_Uop_Ratio(self, EV, level): return 100 * EV("UOPS_RETIRED.IDIV", level) / EV("UOPS_RETIRED.ALL", level) # Percentage of all uops which are x87 uops def X87_Uop_Ratio(self, EV, level): return 100 * EV("UOPS_RETIRED.X87", level) / EV("UOPS_RETIRED.ALL", level) # Average Frequency Utilization relative nominal frequency def Turbo_Utilization(self, EV, level): return CLKS(self, EV, level) / EV("CPU_CLK_UNHALTED.REF_TSC", level) # Fraction of cycles spent in Kernel mode def Kernel_Utilization(self, EV, level): return EV("CPU_CLK_UNHALTED.CORE:sup", level) / EV("CPU_CLK_UNHALTED.CORE", level) # Average CPU Utilization def CPU_Utilization(self, EV, level): return EV("CPU_CLK_UNHALTED.REF_TSC", level) / EV("msr/tsc/", 0) # Cycle cost per L2 hit def Cycles_per_Demand_Load_L2_Hit(self, EV, level): return EV("MEM_BOUND_STALLS_LOAD.L2_HIT", level) / EV("MEM_LOAD_UOPS_RETIRED.L2_HIT", level) # Cycle cost per LLC hit def Cycles_per_Demand_Load_L3_Hit(self, EV, level): return EV("MEM_BOUND_STALLS_LOAD.LLC_HIT", level) / EV("MEM_LOAD_UOPS_RETIRED.L3_HIT", level) # Percent of instruction miss cost that hit in the L2 def Inst_Miss_Cost_L2Hit_Percent(self, EV, level): return 100 * EV("MEM_BOUND_STALLS_IFETCH.L2_HIT", level) / (EV("MEM_BOUND_STALLS_IFETCH.ALL", level)) # Percent of instruction miss cost that hit in the L3 def Inst_Miss_Cost_L3Hit_Percent(self, EV, level): return 100 * EV("MEM_BOUND_STALLS_IFETCH.LLC_HIT", level) / (EV("MEM_BOUND_STALLS_IFETCH.ALL", level)) # Percent of instruction miss cost that hit in DRAM def Inst_Miss_Cost_DRAMHit_Percent(self, EV, level): return 100 * EV("MEM_BOUND_STALLS_IFETCH.LLC_MISS", level) / (EV("MEM_BOUND_STALLS_IFETCH.ALL", level)) # load ops retired per 1000 instruction def MemLoadPKI(self, EV, level): return 1000 * EV("MEM_UOPS_RETIRED.ALL_LOADS", level) / EV("INST_RETIRED.ANY", level) # Event groups class Frontend_Bound: name = "Frontend_Bound" domain = "Slots" area = "FE" level = 1 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("TOPDOWN_FE_BOUND.ALL", 1) / SLOTS(self, EV, 1) self.thresh = (self.val > 0.20) except ZeroDivisionError: handle_error(self, "Frontend_Bound zero division") return self.val desc = """ Counts the number of issue slots that were not consumed by the backend due to frontend stalls.""" class Fetch_Latency: name = "Fetch_Latency" domain = "Slots" area = "FE" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("TOPDOWN_FE_BOUND.FRONTEND_LATENCY", 2) / SLOTS(self, EV, 2) self.thresh = (self.val > 0.15) except ZeroDivisionError: handle_error(self, "Fetch_Latency zero division") return self.val desc = """ Counts the number of issue slots that were not delivered by the frontend due to frontend bandwidth restrictions due to decode, predecode, cisc, and other limitations.""" class ICache_Misses: name = "ICache_Misses" domain = "Slots" area = "FE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("TOPDOWN_FE_BOUND.ICACHE", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "ICache_Misses zero division") return self.val desc = """ Counts the number of issue slots that were not delivered by the frontend due to instruction cache misses.""" class ITLB_Misses: name = "ITLB_Misses" domain = "Slots" area = "FE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("TOPDOWN_FE_BOUND.ITLB_MISS", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "ITLB_Misses zero division") return self.val desc = """ Counts the number of issue slots that were not delivered by the frontend due to Instruction Table Lookaside Buffer (ITLB) misses.""" class Branch_Detect: name = "Branch_Detect" domain = "Slots" area = "FE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("TOPDOWN_FE_BOUND.BRANCH_DETECT", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "Branch_Detect zero division") return self.val desc = """ Counts the number of issue slots that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.""" class Branch_Resteer: name = "Branch_Resteer" domain = "Slots" area = "FE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("TOPDOWN_FE_BOUND.BRANCH_RESTEER", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "Branch_Resteer zero division") return self.val desc = """ Counts the number of issue slots that were not delivered by the frontend due to BTCLEARS, which occurs when the Branch Target Buffer (BTB) predicts a taken branch.""" class Fetch_Bandwidth: name = "Fetch_Bandwidth" domain = "Slots" area = "FE" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("TOPDOWN_FE_BOUND.FRONTEND_BANDWIDTH", 2) / SLOTS(self, EV, 2) self.thresh = (self.val > 0.10) except ZeroDivisionError: handle_error(self, "Fetch_Bandwidth zero division") return self.val desc = """ Counts the number of issue slots that were not delivered by the frontend due to frontend bandwidth restrictions due to decode, predecode, cisc, and other limitations.""" class Cisc: name = "Cisc" domain = "Slots" area = "FE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("TOPDOWN_FE_BOUND.CISC", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "Cisc zero division") return self.val desc = """ Counts the number of issue slots that were not delivered by the frontend due to the microcode sequencer (MS).""" class Decode: name = "Decode" domain = "Slots" area = "FE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("TOPDOWN_FE_BOUND.DECODE", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "Decode zero division") return self.val desc = """ Counts the number of issue slots that were not delivered by the frontend due to decode stalls.""" class Predecode: name = "Predecode" domain = "Slots" area = "FE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("TOPDOWN_FE_BOUND.PREDECODE", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "Predecode zero division") return self.val desc = """ Counts the number of issue slots that were not delivered by the frontend due to wrong predecodes.""" class Other_FB: name = "Other_FB" domain = "Slots" area = "FE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("TOPDOWN_FE_BOUND.OTHER", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "Other_FB zero division") return self.val desc = """ Counts the number of issue slots that were not delivered by the frontend due to other common frontend stalls not categorized.""" class Bad_Speculation: name = "Bad_Speculation" domain = "Slots" area = "BAD" level = 1 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("TOPDOWN_BAD_SPECULATION.ALL", 1) / SLOTS(self, EV, 1) self.thresh = (self.val > 0.15) except ZeroDivisionError: handle_error(self, "Bad_Speculation zero division") return self.val desc = """ Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window including relevant microcode flows and while uops are not yet available in the instruction queue (IQ). Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear.""" class Branch_Mispredicts: name = "Branch_Mispredicts" domain = "Slots" area = "BAD" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("TOPDOWN_BAD_SPECULATION.MISPREDICT", 2) / SLOTS(self, EV, 2) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "Branch_Mispredicts zero division") return self.val desc = """ Counts the number of issue slots that were not consumed by the backend due to branch mispredicts.""" class Machine_Clears: name = "Machine_Clears" domain = "Slots" area = "BAD" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("TOPDOWN_BAD_SPECULATION.MACHINE_CLEARS", 2) / SLOTS(self, EV, 2) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "Machine_Clears zero division") return self.val desc = """ Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a machine clear (nuke) of any kind including memory ordering and memory disambiguation.""" class Nuke: name = "Nuke" domain = "Slots" area = "BAD" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("TOPDOWN_BAD_SPECULATION.NUKE", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "Nuke zero division") return self.val desc = """ Counts the number of issue slots that were not consumed by the backend due to a machine clear (slow nuke).""" class SMC: name = "SMC" domain = "Count" area = "BAD" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = self.Nuke.compute(EV) * (EV("MACHINE_CLEARS.SMC", 4) / EV("MACHINE_CLEARS.SLOW", 4)) self.thresh = (self.val > 0.02) except ZeroDivisionError: handle_error(self, "SMC zero division") return self.val desc = """ Counts the number of machine clears relative to the number of nuke slots due to SMC.""" class Memory_Ordering: name = "Memory_Ordering" domain = "Count" area = "BAD" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = self.Nuke.compute(EV) * (EV("MACHINE_CLEARS.MEMORY_ORDERING", 4) / EV("MACHINE_CLEARS.SLOW", 4)) self.thresh = (self.val > 0.02) except ZeroDivisionError: handle_error(self, "Memory_Ordering zero division") return self.val desc = """ Counts the number of machine clears relative to the number of nuke slots due to memory ordering.""" class FP_Assist: name = "FP_Assist" domain = "Count" area = "BAD" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = self.Nuke.compute(EV) * (EV("MACHINE_CLEARS.FP_ASSIST", 4) / EV("MACHINE_CLEARS.SLOW", 4)) self.thresh = (self.val > 0.02) except ZeroDivisionError: handle_error(self, "FP_Assist zero division") return self.val desc = """ Counts the number of machine clears relative to the number of nuke slots due to FP assists.""" class Disambiguation: name = "Disambiguation" domain = "Count" area = "BAD" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = self.Nuke.compute(EV) * (EV("MACHINE_CLEARS.DISAMBIGUATION", 4) / EV("MACHINE_CLEARS.SLOW", 4)) self.thresh = (self.val > 0.02) except ZeroDivisionError: handle_error(self, "Disambiguation zero division") return self.val desc = """ Counts the number of machine clears relative to the number of nuke slots due to memory disambiguation.""" class Page_Fault: name = "Page_Fault" domain = "Count" area = "BAD" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = self.Nuke.compute(EV) * (EV("MACHINE_CLEARS.PAGE_FAULT", 4) / EV("MACHINE_CLEARS.SLOW", 4)) self.thresh = (self.val > 0.02) except ZeroDivisionError: handle_error(self, "Page_Fault zero division") return self.val desc = """ Counts the number of machine clears relative to the number of nuke slots due to page faults.""" class Fast_Nuke: name = "Fast_Nuke" domain = "Slots" area = "BAD" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("TOPDOWN_BAD_SPECULATION.FASTNUKE", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "Fast_Nuke zero division") return self.val desc = """ Counts the number of issue slots that were not consumed by the backend due to a machine clear classified as a fast nuke due to memory ordering, memory disambiguation and memory renaming.""" class Backend_Bound: name = "Backend_Bound" domain = "Slots" area = "BE" level = 1 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("TOPDOWN_BE_BOUND.ALL", 1) / SLOTS(self, EV, 1) self.thresh = (self.val > 0.10) except ZeroDivisionError: handle_error(self, "Backend_Bound zero division") return self.val desc = """ Counts the total number of issue slots that were not consumed by the backend due to backend stalls. Note that uops must be available for consumption in order for this event to count. If a uop is not available (IQ is empty), this event will not count. The rest of these subevents count backend stalls, in cycles, due to an outstanding request which is memory bound vs core bound. The subevents are not slot based events and therefore can not be precisely added or subtracted from the Backend_Bound_Aux subevents which are slot based.""" class Core_Bound: name = "Core_Bound" domain = "Cycles" area = "BE" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = max(0 , self.Backend_Bound.compute(EV) - self.Memory_Bound.compute(EV)) self.thresh = (self.val > 0.10) except ZeroDivisionError: handle_error(self, "Core_Bound zero division") return self.val desc = """ Counts the number of cycles due to backend bound stalls that are core execution bound and not attributed to outstanding demand load or store stalls.""" class Memory_Bound: name = "Memory_Bound" domain = "Cycles" area = "BE" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = min((EV("TOPDOWN_BE_BOUND.ALL", 2) / SLOTS(self, EV, 2)) , (EV("LD_HEAD.ANY_AT_RET", 2) / CLKS(self, EV, 2)) + self.Store_Bound.compute(EV)) self.thresh = (self.val > 0.20) except ZeroDivisionError: handle_error(self, "Memory_Bound zero division") return self.val desc = """ Counts the number of cycles the core is stalled due to stores or loads.""" class Store_Bound: name = "Store_Bound" domain = "Cycles" area = "BE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = self.Mem_Scheduler.compute(EV) * (EV("MEM_SCHEDULER_BLOCK.ST_BUF", 3) / EV("MEM_SCHEDULER_BLOCK.ALL", 3)) self.thresh = (self.val > 0.10) except ZeroDivisionError: handle_error(self, "Store_Bound zero division") return self.val desc = """ Counts the number of cycles the core is stalled due to store buffer full.""" class L1_Bound: name = "L1_Bound" domain = "Cycles" area = "BE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("LD_HEAD.L1_BOUND_AT_RET", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.10) except ZeroDivisionError: handle_error(self, "L1_Bound zero division") return self.val desc = """ Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a load block.""" class Store_Fwd_Blk: name = "Store_Fwd_Blk" domain = "Cycles" area = "BE" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("LD_HEAD.ST_ADDR_AT_RET", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "Store_Fwd_Blk zero division") return self.val desc = """ Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a store forward block.""" class STLB_Hit: name = "STLB_Hit" domain = "Cycles" area = "BE" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("LD_HEAD.DTLB_MISS_AT_RET", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "STLB_Hit zero division") return self.val desc = """ Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a first level TLB miss.""" class STLB_Miss: name = "STLB_Miss" domain = "Cycles" area = "BE" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("LD_HEAD.PGWALK_AT_RET", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "STLB_Miss zero division") return self.val desc = """ Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a second level TLB miss requiring a page walk.""" class Other_L1: name = "Other_L1" domain = "Cycles" area = "BE" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("LD_HEAD.OTHER_AT_RET", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "Other_L1 zero division") return self.val desc = """ Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a number of other load blocks.""" class L2_Bound: name = "L2_Bound" domain = "Cycles" area = "BE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = (EV("MEM_BOUND_STALLS_LOAD.L2_HIT", 3) / CLKS(self, EV, 3)) - (MEM_BOUND_STALLS_AT_RET_CORRECTION(self, EV, 3) * EV("MEM_BOUND_STALLS_LOAD.L2_HIT", 3) / EV("MEM_BOUND_STALLS_LOAD.ALL", 3)) self.thresh = (self.val > 0.10) except ZeroDivisionError: handle_error(self, "L2_Bound zero division") return self.val desc = """ Counts the number of cycles a core is stalled due to a demand load which hit in the L2 Cache.""" class L3_Bound: name = "L3_Bound" domain = "Cycles" area = "BE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = (EV("MEM_BOUND_STALLS_LOAD.LLC_HIT", 3) / CLKS(self, EV, 3)) - (MEM_BOUND_STALLS_AT_RET_CORRECTION(self, EV, 3) * EV("MEM_BOUND_STALLS_LOAD.LLC_HIT", 3) / EV("MEM_BOUND_STALLS_LOAD.ALL", 3)) self.thresh = (self.val > 0.10) except ZeroDivisionError: handle_error(self, "L3_Bound zero division") return self.val desc = """ Counts the number of cycles a core is stalled due to a demand load which hit in the Last Level Cache (LLC) or other core with HITE/F/M.""" class DRAM_Bound: name = "DRAM_Bound" domain = "Cycles" area = "BE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = (EV("MEM_BOUND_STALLS_LOAD.LLC_MISS", 3) / CLKS(self, EV, 3)) - (MEM_BOUND_STALLS_AT_RET_CORRECTION(self, EV, 3) * EV("MEM_BOUND_STALLS_LOAD.LLC_MISS", 3) / EV("MEM_BOUND_STALLS_LOAD.ALL", 3)) self.thresh = (self.val > 0.10) except ZeroDivisionError: handle_error(self, "DRAM_Bound zero division") return self.val desc = """ Counts the number of cycles the core is stalled due to a demand load miss which hit in DRAM or MMIO (Non-DRAM).""" class Other_Load_Store: name = "Other_Load_Store" domain = "Cycles" area = "BE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = max(0 , self.Memory_Bound.compute(EV) - (self.Store_Bound.compute(EV) + self.L1_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV))) self.thresh = (self.val > 0.10) except ZeroDivisionError: handle_error(self, "Other_Load_Store zero division") return self.val desc = """ Counts the number of cycles the core is stalled due to a demand load miss which hits in the L2, LLC, DRAM or MMIO (Non-DRAM) but could not be correctly attributed or cycles in which the load miss is waiting on a request buffer.""" class Backend_Bound_Aux: name = "Backend_Bound_Aux" domain = "Slots" area = "BE_aux" level = 1 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = self.Backend_Bound.compute(EV) self.thresh = (self.val > 0.20) except ZeroDivisionError: handle_error(self, "Backend_Bound_Aux zero division") return self.val desc = """ Counts the total number of issue slots that were not consumed by the backend due to backend stalls. Note that UOPS must be available for consumption in order for this event to count. If a uop is not available (IQ is empty), this event will not count. All of these subevents count backend stalls, in slots, due to a resource limitation. These are not cycle based events and therefore can not be precisely added or subtracted from the Backend_Bound subevents which are cycle based. These subevents are supplementary to Backend_Bound and can be used to analyze results from a resource perspective at allocation.""" class Resource_Bound: name = "Resource_Bound" domain = "Slots" area = "BE_aux" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = self.Backend_Bound.compute(EV) self.thresh = (self.val > 0.20) except ZeroDivisionError: handle_error(self, "Resource_Bound zero division") return self.val desc = """ Counts the total number of issue slots that were not consumed by the backend due to backend stalls. Note that uops must be available for consumption in order for this event to count. If a uop is not available (IQ is empty), this event will not count.""" class Mem_Scheduler: name = "Mem_Scheduler" domain = "Slots" area = "BE_aux" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("TOPDOWN_BE_BOUND.MEM_SCHEDULER", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.10) except ZeroDivisionError: handle_error(self, "Mem_Scheduler zero division") return self.val desc = """ Counts the number of issue slots that were not consumed by the backend due to memory reservation stalls in which a scheduler is not able to accept uops.""" class ST_Buffer: name = "ST_Buffer" domain = "Count" area = "BE_aux" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = self.Mem_Scheduler.compute(EV) * (EV("MEM_SCHEDULER_BLOCK.ST_BUF", 4) / EV("MEM_SCHEDULER_BLOCK.ALL", 4)) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "ST_Buffer zero division") return self.val desc = """ Counts the number of cycles, relative to the number of mem_scheduler slots, in which uops are blocked due to store buffer full""" class LD_Buffer: name = "LD_Buffer" domain = "Count" area = "BE_aux" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = self.Mem_Scheduler.compute(EV) * EV("MEM_SCHEDULER_BLOCK.LD_BUF", 4) / EV("MEM_SCHEDULER_BLOCK.ALL", 4) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "LD_Buffer zero division") return self.val desc = """ Counts the number of cycles, relative to the number of mem_scheduler slots, in which uops are blocked due to load buffer full""" class RSV: name = "RSV" domain = "Count" area = "BE_aux" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = self.Mem_Scheduler.compute(EV) * EV("MEM_SCHEDULER_BLOCK.RSV", 4) / EV("MEM_SCHEDULER_BLOCK.ALL", 4) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "RSV zero division") return self.val desc = """ Counts the number of cycles, relative to the number of mem_scheduler slots, in which uops are blocked due to RSV full relative""" class Non_Mem_Scheduler: name = "Non_Mem_Scheduler" domain = "Slots" area = "BE_aux" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("TOPDOWN_BE_BOUND.NON_MEM_SCHEDULER", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.10) except ZeroDivisionError: handle_error(self, "Non_Mem_Scheduler zero division") return self.val desc = """ Counts the number of issue slots that were not consumed by the backend due to IEC or FPC RAT stalls, which can be due to FIQ or IEC reservation stalls in which the integer, floating point or SIMD scheduler is not able to accept uops.""" class Register: name = "Register" domain = "Slots" area = "BE_aux" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("TOPDOWN_BE_BOUND.REGISTER", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.10) except ZeroDivisionError: handle_error(self, "Register zero division") return self.val desc = """ Counts the number of issue slots that were not consumed by the backend due to the physical register file unable to accept an entry (marble stalls).""" class Reorder_Buffer: name = "Reorder_Buffer" domain = "Slots" area = "BE_aux" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("TOPDOWN_BE_BOUND.REORDER_BUFFER", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.10) except ZeroDivisionError: handle_error(self, "Reorder_Buffer zero division") return self.val desc = """ Counts the number of issue slots that were not consumed by the backend due to the reorder buffer being full (ROB stalls).""" class Alloc_Restriction: name = "Alloc_Restriction" domain = "Slots" area = "BE_aux" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("TOPDOWN_BE_BOUND.ALLOC_RESTRICTIONS", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.10) except ZeroDivisionError: handle_error(self, "Alloc_Restriction zero division") return self.val desc = """ Counts the number of issue slots that were not consumed by the backend due to certain allocation restrictions.""" class Serialization: name = "Serialization" domain = "Slots" area = "BE_aux" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("TOPDOWN_BE_BOUND.SERIALIZATION", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.10) except ZeroDivisionError: handle_error(self, "Serialization zero division") return self.val desc = """ Counts the number of issue slots that were not consumed by the backend due to scoreboards from the instruction queue (IQ), jump execution unit (JEU), or microcode sequencer (MS).""" class Retiring: name = "Retiring" domain = "Slots" area = "RET" level = 1 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("TOPDOWN_RETIRING.ALL", 1) / SLOTS(self, EV, 1) self.thresh = (self.val > 0.75) except ZeroDivisionError: handle_error(self, "Retiring zero division") return self.val desc = """ Counts the number of issue slots that result in retirement slots.""" class Base: name = "Base" domain = "Slots" area = "RET" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = (EV("TOPDOWN_RETIRING.ALL", 2) - EV("UOPS_RETIRED.MS", 2)) / SLOTS(self, EV, 2) self.thresh = (self.val > 0.60) except ZeroDivisionError: handle_error(self, "Base zero division") return self.val desc = """ Counts the number of uops that are not from the microsequencer.""" class FPDIV_uops: name = "FPDIV_uops" domain = "Slots" area = "RET" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_RETIRED.FPDIV", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.20) except ZeroDivisionError: handle_error(self, "FPDIV_uops zero division") return self.val desc = """ Counts the number of floating point divide operations per uop.""" class Other_Ret: name = "Other_Ret" domain = "Slots" area = "RET" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = (EV("TOPDOWN_RETIRING.ALL", 3) - EV("UOPS_RETIRED.MS", 3) - EV("UOPS_RETIRED.FPDIV", 3)) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.30) except ZeroDivisionError: handle_error(self, "Other_Ret zero division") return self.val desc = """ Counts the number of uops retired excluding ms and fp div uops.""" class MS_uops: name = "MS_uops" domain = "Slots" area = "RET" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_RETIRED.MS", 2) / SLOTS(self, EV, 2) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "MS_uops zero division") return self.val desc = """ Counts the number of uops that are from the complex flows issued by the micro-sequencer (MS). This includes uops from flows due to complex instructions, faults, assists, and inserted flows.""" class Metric_CLKS: name = "CLKS" domain = "Cycles" maxval = 0 errcount = 0 area = "Info.Core" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = CLKS(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CLKS zero division") desc = """ """ class Metric_CLKS_P: name = "CLKS_P" domain = "Cycles" maxval = 0 errcount = 0 area = "Info.Core" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = CLKS_P(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CLKS_P zero division") desc = """ """ class Metric_SLOTS: name = "SLOTS" domain = "Cycles" maxval = 0 errcount = 0 area = "Info.Core" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = SLOTS(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "SLOTS zero division") desc = """ """ class Metric_IPC: name = "IPC" domain = "" maxval = 0 errcount = 0 area = "Info.Core" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = IPC(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IPC zero division") desc = """ Instructions Per Cycle""" class Metric_CPI: name = "CPI" domain = "" maxval = 0 errcount = 0 area = "Info.Core" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = CPI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CPI zero division") desc = """ Cycles Per Instruction""" class Metric_UPI: name = "UPI" domain = "" maxval = 0 errcount = 0 area = "Info.Core" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = UPI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "UPI zero division") desc = """ Uops Per Instruction""" class Metric_Store_Fwd_Blocks: name = "Store_Fwd_Blocks" domain = "" maxval = 0 errcount = 0 area = "Info.L1_Bound" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = Store_Fwd_Blocks(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Store_Fwd_Blocks zero division") desc = """ Percentage of total non-speculative loads with a store forward or unknown store address block""" class Metric_Address_Alias_Blocks: name = "Address_Alias_Blocks" domain = "" maxval = 0 errcount = 0 area = "Info.L1_Bound" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = Address_Alias_Blocks(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Address_Alias_Blocks zero division") desc = """ Percentage of total non-speculative loads with a address aliasing block""" class Metric_Load_Splits: name = "Load_Splits" domain = "" maxval = 0 errcount = 0 area = "Info.L1_Bound" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = Load_Splits(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Load_Splits zero division") desc = """ Percentage of total non-speculative loads that are splits""" class Metric_IpBranch: name = "IpBranch" domain = "" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = IpBranch(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IpBranch zero division") desc = """ Instructions per Branch (lower number means higher occurrence rate)""" class Metric_IpCall: name = "IpCall" domain = "" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = IpCall(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IpCall zero division") desc = """ Instruction per (near) call (lower number means higher occurrence rate)""" class Metric_IpLoad: name = "IpLoad" domain = "" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = IpLoad(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IpLoad zero division") desc = """ Instructions per Load""" class Metric_IpStore: name = "IpStore" domain = "" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = IpStore(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IpStore zero division") desc = """ Instructions per Store""" class Metric_IpMispredict: name = "IpMispredict" domain = "" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = IpMispredict(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IpMispredict zero division") desc = """ Instructions per retired Branch Misprediction""" class Metric_IpMisp_Cond_Ntaken: name = "IpMisp_Cond_Ntaken" domain = "" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = IpMisp_Cond_Ntaken(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IpMisp_Cond_Ntaken zero division") desc = """ Instructions per retired conditional Branch Misprediction where the branch was not taken""" class Metric_IpMisp_Cond_Taken: name = "IpMisp_Cond_Taken" domain = "" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = IpMisp_Cond_Taken(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IpMisp_Cond_Taken zero division") desc = """ Instructions per retired conditional Branch Misprediction where the branch was taken""" class Metric_IpMisp_Ret: name = "IpMisp_Ret" domain = "" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = IpMisp_Ret(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IpMisp_Ret zero division") desc = """ Instructions per retired return Branch Misprediction""" class Metric_IpMisp_Indirect: name = "IpMisp_Indirect" domain = "" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = IpMisp_Indirect(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IpMisp_Indirect zero division") desc = """ Instructions per retired indirect call or jump Branch Misprediction""" class Metric_IpFarBranch: name = "IpFarBranch" domain = "" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = IpFarBranch(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IpFarBranch zero division") desc = """ Instructions per Far Branch""" class Metric_Branch_Mispredict_Ratio: name = "Branch_Mispredict_Ratio" domain = "" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = Branch_Mispredict_Ratio(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Branch_Mispredict_Ratio zero division") desc = """ Ratio of all branches which mispredict""" class Metric_Branch_Mispredict_to_Unknown_Branch_Ratio: name = "Branch_Mispredict_to_Unknown_Branch_Ratio" domain = "" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = Branch_Mispredict_to_Unknown_Branch_Ratio(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Branch_Mispredict_to_Unknown_Branch_Ratio zero division") desc = """ Ratio between Mispredicted branches and unknown branches""" class Metric_Microcode_Uop_Ratio: name = "Microcode_Uop_Ratio" domain = "" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = Microcode_Uop_Ratio(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Microcode_Uop_Ratio zero division") desc = """ Percentage of all uops which are ucode ops""" class Metric_FPDiv_Uop_Ratio: name = "FPDiv_Uop_Ratio" domain = "" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = FPDiv_Uop_Ratio(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "FPDiv_Uop_Ratio zero division") desc = """ Percentage of all uops which are FPDiv uops""" class Metric_IDiv_Uop_Ratio: name = "IDiv_Uop_Ratio" domain = "" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = IDiv_Uop_Ratio(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IDiv_Uop_Ratio zero division") desc = """ Percentage of all uops which are IDiv uops""" class Metric_X87_Uop_Ratio: name = "X87_Uop_Ratio" domain = "" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = X87_Uop_Ratio(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "X87_Uop_Ratio zero division") desc = """ Percentage of all uops which are x87 uops""" class Metric_Turbo_Utilization: name = "Turbo_Utilization" domain = "" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = Turbo_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Turbo_Utilization zero division") desc = """ Average Frequency Utilization relative nominal frequency""" class Metric_Kernel_Utilization: name = "Kernel_Utilization" domain = "" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = Kernel_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Kernel_Utilization zero division") desc = """ Fraction of cycles spent in Kernel mode""" class Metric_CPU_Utilization: name = "CPU_Utilization" domain = "" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = CPU_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CPU_Utilization zero division") desc = """ Average CPU Utilization""" class Metric_Cycles_per_Demand_Load_L2_Hit: name = "Cycles_per_Demand_Load_L2_Hit" domain = "" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = Cycles_per_Demand_Load_L2_Hit(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Cycles_per_Demand_Load_L2_Hit zero division") desc = """ Cycle cost per L2 hit""" class Metric_Cycles_per_Demand_Load_L3_Hit: name = "Cycles_per_Demand_Load_L3_Hit" domain = "" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = Cycles_per_Demand_Load_L3_Hit(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Cycles_per_Demand_Load_L3_Hit zero division") desc = """ Cycle cost per LLC hit""" class Metric_Inst_Miss_Cost_L2Hit_Percent: name = "Inst_Miss_Cost_L2Hit_Percent" domain = "" maxval = 0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = Inst_Miss_Cost_L2Hit_Percent(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Inst_Miss_Cost_L2Hit_Percent zero division") desc = """ Percent of instruction miss cost that hit in the L2""" class Metric_Inst_Miss_Cost_L3Hit_Percent: name = "Inst_Miss_Cost_L3Hit_Percent" domain = "" maxval = 0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = Inst_Miss_Cost_L3Hit_Percent(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Inst_Miss_Cost_L3Hit_Percent zero division") desc = """ Percent of instruction miss cost that hit in the L3""" class Metric_Inst_Miss_Cost_DRAMHit_Percent: name = "Inst_Miss_Cost_DRAMHit_Percent" domain = "" maxval = 0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = Inst_Miss_Cost_DRAMHit_Percent(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Inst_Miss_Cost_DRAMHit_Percent zero division") desc = """ Percent of instruction miss cost that hit in DRAM""" class Metric_MemLoadPKI: name = "MemLoadPKI" domain = "" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = MemLoadPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "MemLoadPKI zero division") desc = """ load ops retired per 1000 instruction""" # Schedule class Setup: def __init__(self, r): o = dict() n = Frontend_Bound() ; r.run(n) ; o["Frontend_Bound"] = n n = Fetch_Latency() ; r.run(n) ; o["Fetch_Latency"] = n n = ICache_Misses() ; r.run(n) ; o["ICache_Misses"] = n n = ITLB_Misses() ; r.run(n) ; o["ITLB_Misses"] = n n = Branch_Detect() ; r.run(n) ; o["Branch_Detect"] = n n = Branch_Resteer() ; r.run(n) ; o["Branch_Resteer"] = n n = Fetch_Bandwidth() ; r.run(n) ; o["Fetch_Bandwidth"] = n n = Cisc() ; r.run(n) ; o["Cisc"] = n n = Decode() ; r.run(n) ; o["Decode"] = n n = Predecode() ; r.run(n) ; o["Predecode"] = n n = Other_FB() ; r.run(n) ; o["Other_FB"] = n n = Bad_Speculation() ; r.run(n) ; o["Bad_Speculation"] = n n = Branch_Mispredicts() ; r.run(n) ; o["Branch_Mispredicts"] = n n = Machine_Clears() ; r.run(n) ; o["Machine_Clears"] = n n = Nuke() ; r.run(n) ; o["Nuke"] = n n = SMC() ; r.run(n) ; o["SMC"] = n n = Memory_Ordering() ; r.run(n) ; o["Memory_Ordering"] = n n = FP_Assist() ; r.run(n) ; o["FP_Assist"] = n n = Disambiguation() ; r.run(n) ; o["Disambiguation"] = n n = Page_Fault() ; r.run(n) ; o["Page_Fault"] = n n = Fast_Nuke() ; r.run(n) ; o["Fast_Nuke"] = n n = Backend_Bound() ; r.run(n) ; o["Backend_Bound"] = n n = Core_Bound() ; r.run(n) ; o["Core_Bound"] = n n = Memory_Bound() ; r.run(n) ; o["Memory_Bound"] = n n = Store_Bound() ; r.run(n) ; o["Store_Bound"] = n n = L1_Bound() ; r.run(n) ; o["L1_Bound"] = n n = Store_Fwd_Blk() ; r.run(n) ; o["Store_Fwd_Blk"] = n n = STLB_Hit() ; r.run(n) ; o["STLB_Hit"] = n n = STLB_Miss() ; r.run(n) ; o["STLB_Miss"] = n n = Other_L1() ; r.run(n) ; o["Other_L1"] = n n = L2_Bound() ; r.run(n) ; o["L2_Bound"] = n n = L3_Bound() ; r.run(n) ; o["L3_Bound"] = n n = DRAM_Bound() ; r.run(n) ; o["DRAM_Bound"] = n n = Other_Load_Store() ; r.run(n) ; o["Other_Load_Store"] = n if use_aux: n = Backend_Bound_Aux() ; r.run(n) ; o["Backend_Bound_Aux"] = n if use_aux: n = Resource_Bound() ; r.run(n) ; o["Resource_Bound"] = n n = Mem_Scheduler() ; r.run(n) ; o["Mem_Scheduler"] = n if use_aux: n = ST_Buffer() ; r.run(n) ; o["ST_Buffer"] = n if use_aux: n = LD_Buffer() ; r.run(n) ; o["LD_Buffer"] = n if use_aux: n = RSV() ; r.run(n) ; o["RSV"] = n if use_aux: n = Non_Mem_Scheduler() ; r.run(n) ; o["Non_Mem_Scheduler"] = n if use_aux: n = Register() ; r.run(n) ; o["Register"] = n if use_aux: n = Reorder_Buffer() ; r.run(n) ; o["Reorder_Buffer"] = n if use_aux: n = Alloc_Restriction() ; r.run(n) ; o["Alloc_Restriction"] = n if use_aux: n = Serialization() ; r.run(n) ; o["Serialization"] = n n = Retiring() ; r.run(n) ; o["Retiring"] = n n = Base() ; r.run(n) ; o["Base"] = n n = FPDIV_uops() ; r.run(n) ; o["FPDIV_uops"] = n n = Other_Ret() ; r.run(n) ; o["Other_Ret"] = n n = MS_uops() ; r.run(n) ; o["MS_uops"] = n # parents o["Fetch_Latency"].parent = o["Frontend_Bound"] o["ICache_Misses"].parent = o["Fetch_Latency"] o["ITLB_Misses"].parent = o["Fetch_Latency"] o["Branch_Detect"].parent = o["Fetch_Latency"] o["Branch_Resteer"].parent = o["Fetch_Latency"] o["Fetch_Bandwidth"].parent = o["Frontend_Bound"] o["Cisc"].parent = o["Fetch_Bandwidth"] o["Decode"].parent = o["Fetch_Bandwidth"] o["Predecode"].parent = o["Fetch_Bandwidth"] o["Other_FB"].parent = o["Fetch_Bandwidth"] o["Branch_Mispredicts"].parent = o["Bad_Speculation"] o["Machine_Clears"].parent = o["Bad_Speculation"] o["Nuke"].parent = o["Machine_Clears"] o["SMC"].parent = o["Nuke"] o["Memory_Ordering"].parent = o["Nuke"] o["FP_Assist"].parent = o["Nuke"] o["Disambiguation"].parent = o["Nuke"] o["Page_Fault"].parent = o["Nuke"] o["Fast_Nuke"].parent = o["Machine_Clears"] o["Core_Bound"].parent = o["Backend_Bound"] o["Memory_Bound"].parent = o["Backend_Bound"] o["Store_Bound"].parent = o["Memory_Bound"] o["L1_Bound"].parent = o["Memory_Bound"] o["Store_Fwd_Blk"].parent = o["L1_Bound"] o["STLB_Hit"].parent = o["L1_Bound"] o["STLB_Miss"].parent = o["L1_Bound"] o["Other_L1"].parent = o["L1_Bound"] o["L2_Bound"].parent = o["Memory_Bound"] o["L3_Bound"].parent = o["Memory_Bound"] o["DRAM_Bound"].parent = o["Memory_Bound"] o["Other_Load_Store"].parent = o["Memory_Bound"] if use_aux: o["Resource_Bound"].parent = o["Backend_Bound_Aux"] if use_aux: o["Mem_Scheduler"].parent = o["Resource_Bound"] if use_aux: o["ST_Buffer"].parent = o["Mem_Scheduler"] if use_aux: o["LD_Buffer"].parent = o["Mem_Scheduler"] if use_aux: o["RSV"].parent = o["Mem_Scheduler"] if use_aux: o["Non_Mem_Scheduler"].parent = o["Resource_Bound"] if use_aux: o["Register"].parent = o["Resource_Bound"] if use_aux: o["Reorder_Buffer"].parent = o["Resource_Bound"] if use_aux: o["Alloc_Restriction"].parent = o["Resource_Bound"] if use_aux: o["Serialization"].parent = o["Resource_Bound"] o["Base"].parent = o["Retiring"] o["FPDIV_uops"].parent = o["Base"] o["Other_Ret"].parent = o["Base"] o["MS_uops"].parent = o["Retiring"] # user visible metrics n = Metric_CLKS() ; r.metric(n) ; o["CLKS"] = n n = Metric_CLKS_P() ; r.metric(n) ; o["CLKS_P"] = n n = Metric_SLOTS() ; r.metric(n) ; o["SLOTS"] = n n = Metric_IPC() ; r.metric(n) ; o["IPC"] = n n = Metric_CPI() ; r.metric(n) ; o["CPI"] = n n = Metric_UPI() ; r.metric(n) ; o["UPI"] = n n = Metric_Store_Fwd_Blocks() ; r.metric(n) ; o["Store_Fwd_Blocks"] = n n = Metric_Address_Alias_Blocks() ; r.metric(n) ; o["Address_Alias_Blocks"] = n n = Metric_Load_Splits() ; r.metric(n) ; o["Load_Splits"] = n n = Metric_IpBranch() ; r.metric(n) ; o["IpBranch"] = n n = Metric_IpCall() ; r.metric(n) ; o["IpCall"] = n n = Metric_IpLoad() ; r.metric(n) ; o["IpLoad"] = n n = Metric_IpStore() ; r.metric(n) ; o["IpStore"] = n n = Metric_IpMispredict() ; r.metric(n) ; o["IpMispredict"] = n n = Metric_IpMisp_Cond_Ntaken() ; r.metric(n) ; o["IpMisp_Cond_Ntaken"] = n n = Metric_IpMisp_Cond_Taken() ; r.metric(n) ; o["IpMisp_Cond_Taken"] = n n = Metric_IpMisp_Ret() ; r.metric(n) ; o["IpMisp_Ret"] = n n = Metric_IpMisp_Indirect() ; r.metric(n) ; o["IpMisp_Indirect"] = n n = Metric_IpFarBranch() ; r.metric(n) ; o["IpFarBranch"] = n n = Metric_Branch_Mispredict_Ratio() ; r.metric(n) ; o["Branch_Mispredict_Ratio"] = n n = Metric_Branch_Mispredict_to_Unknown_Branch_Ratio() ; r.metric(n) ; o["Branch_Mispredict_to_Unknown_Branch_Ratio"] = n n = Metric_Microcode_Uop_Ratio() ; r.metric(n) ; o["Microcode_Uop_Ratio"] = n n = Metric_FPDiv_Uop_Ratio() ; r.metric(n) ; o["FPDiv_Uop_Ratio"] = n n = Metric_IDiv_Uop_Ratio() ; r.metric(n) ; o["IDiv_Uop_Ratio"] = n n = Metric_X87_Uop_Ratio() ; r.metric(n) ; o["X87_Uop_Ratio"] = n n = Metric_Turbo_Utilization() ; r.metric(n) ; o["Turbo_Utilization"] = n n = Metric_Kernel_Utilization() ; r.metric(n) ; o["Kernel_Utilization"] = n n = Metric_CPU_Utilization() ; r.metric(n) ; o["CPU_Utilization"] = n n = Metric_Cycles_per_Demand_Load_L2_Hit() ; r.metric(n) ; o["Cycles_per_Demand_Load_L2_Hit"] = n n = Metric_Cycles_per_Demand_Load_L3_Hit() ; r.metric(n) ; o["Cycles_per_Demand_Load_L3_Hit"] = n n = Metric_Inst_Miss_Cost_L2Hit_Percent() ; r.metric(n) ; o["Inst_Miss_Cost_L2Hit_Percent"] = n n = Metric_Inst_Miss_Cost_L3Hit_Percent() ; r.metric(n) ; o["Inst_Miss_Cost_L3Hit_Percent"] = n n = Metric_Inst_Miss_Cost_DRAMHit_Percent() ; r.metric(n) ; o["Inst_Miss_Cost_DRAMHit_Percent"] = n n = Metric_MemLoadPKI() ; r.metric(n) ; o["MemLoadPKI"] = n # references between groups o["SMC"].Nuke = o["Nuke"] o["Memory_Ordering"].Nuke = o["Nuke"] o["FP_Assist"].Nuke = o["Nuke"] o["Disambiguation"].Nuke = o["Nuke"] o["Page_Fault"].Nuke = o["Nuke"] o["Core_Bound"].Memory_Bound = o["Memory_Bound"] o["Core_Bound"].Mem_Scheduler = o["Mem_Scheduler"] o["Core_Bound"].Backend_Bound = o["Backend_Bound"] o["Core_Bound"].Store_Bound = o["Store_Bound"] o["Memory_Bound"].Mem_Scheduler = o["Mem_Scheduler"] o["Memory_Bound"].Store_Bound = o["Store_Bound"] o["Store_Bound"].Mem_Scheduler = o["Mem_Scheduler"] o["Other_Load_Store"].Mem_Scheduler = o["Mem_Scheduler"] o["Other_Load_Store"].L1_Bound = o["L1_Bound"] o["Other_Load_Store"].L2_Bound = o["L2_Bound"] o["Other_Load_Store"].Memory_Bound = o["Memory_Bound"] o["Other_Load_Store"].Store_Bound = o["Store_Bound"] o["Other_Load_Store"].L3_Bound = o["L3_Bound"] o["Other_Load_Store"].DRAM_Bound = o["DRAM_Bound"] if use_aux: o["Backend_Bound_Aux"].Backend_Bound = o["Backend_Bound"] if use_aux: o["Resource_Bound"].Backend_Bound = o["Backend_Bound"] if use_aux: o["ST_Buffer"].Mem_Scheduler = o["Mem_Scheduler"] if use_aux: o["LD_Buffer"].Mem_Scheduler = o["Mem_Scheduler"] if use_aux: o["RSV"].Mem_Scheduler = o["Mem_Scheduler"] # siblings cross-tree
67,922
Python
.py
1,984
27.995968
215
0.618673
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,907
adl_glc_ratios.py
andikleen_pmu-tools/adl_glc_ratios.py
# -*- coding: latin-1 -*- # # auto generated TopDown/TMA 4.8-full-perf description for Intel 12th gen Core (code name Alderlake) with Golden Cove # Please see http://ark.intel.com for more details on these CPUs. # # References: # http://bit.ly/tma-ispass14 # http://halobates.de/blog/p/262 # https://sites.google.com/site/analysismethods/yasin-pubs # https://download.01.org/perfmon/ # https://github.com/andikleen/pmu-tools/wiki/toplev-manual # # Helpers print_error = lambda msg: False smt_enabled = False ebs_mode = False version = "4.8-full-perf" base_frequency = -1.0 Memory = 0 Average_Frequency = 0.0 num_cores = 1 num_threads = 1 num_sockets = 1 topdown_use_fixed = False def handle_error(obj, msg): print_error(msg) obj.errcount += 1 obj.val = 0 obj.thresh = False def handle_error_metric(obj, msg): print_error(msg) obj.errcount += 1 obj.val = 0 # Constants Exe_Ports = 12 Mem_L2_Store_Cost = 10 Mem_STLB_Hit_Cost = 7 MS_Switches_Cost = 3 Avg_Assist_Cost = ( 99 *3 + 63 + 30 ) / 5 Pipeline_Width = 6 OneMillion = 1000000 OneBillion = 1000000000 Energy_Unit = 61 Errata_Whitelist = "ADL038;ADL066" PERF_METRICS_MSR = 1 DS = 0 # Aux. formulas def Br_DoI_Jumps(self, EV, level): return EV("BR_INST_RETIRED.NEAR_TAKEN", level) - EV("BR_INST_RETIRED.COND_TAKEN", level) - 2 * EV("BR_INST_RETIRED.NEAR_CALL", level) def Branching_Retired(self, EV, level): return (EV("BR_INST_RETIRED.ALL_BRANCHES", level) + 2 * EV("BR_INST_RETIRED.NEAR_CALL", level) + EV("INST_RETIRED.NOP", level)) / SLOTS(self, EV, level) def Serialize_Core(self, EV, level): return self.Core_Bound.compute(EV) * (self.Serializing_Operation.compute(EV) + EV("RS.EMPTY:u1", level) / CLKS(self, EV, level) * self.Ports_Utilized_0.compute(EV)) / (self.Serializing_Operation.compute(EV) + self.Ports_Utilization.compute(EV) + self.Divider.compute(EV)) def Umisp(self, EV, level): return 10 * self.Microcode_Sequencer.compute(EV) * self.Other_Mispredicts.compute(EV) / self.Branch_Mispredicts.compute(EV) def Assist(self, EV, level): return (self.Microcode_Sequencer.compute(EV) / (self.Microcode_Sequencer.compute(EV) + self.Few_Uops_Instructions.compute(EV))) * (self.Assists.compute(EV) / self.Microcode_Sequencer.compute(EV)) def Assist_Frontend(self, EV, level): return (1 - EV("INST_RETIRED.REP_ITERATION", level) / EV("UOPS_RETIRED.MS:c1", level)) * (self.Fetch_Latency.compute(EV) * (self.MS_Switches.compute(EV) + self.Branch_Resteers.compute(EV) * (self.Clears_Resteers.compute(EV) + self.Mispredicts_Resteers.compute(EV) * self.Other_Mispredicts.compute(EV) / self.Branch_Mispredicts.compute(EV)) / (self.Clears_Resteers.compute(EV) + self.Unknown_Branches.compute(EV) + self.Mispredicts_Resteers.compute(EV))) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV))) def Assist_Retired(self, EV, level): return Assist(self, EV, level) * self.Heavy_Operations.compute(EV) def Core_Bound_Cycles(self, EV, level): return self.Ports_Utilized_0.compute(EV) * CLKS(self, EV, level) + Few_Uops_Executed_Threshold(self, EV, level) def DurationTimeInSeconds(self, EV, level): return EV("interval-ms", 0) / 1000 def Execute_Cycles(self, EV, level): return (EV("UOPS_EXECUTED.CORE_CYCLES_GE_1", level) / 2) if smt_enabled else EV("UOPS_EXECUTED.THREAD:c1", level) # factor used for metrics associating fixed costs for FB Hits - according to probability theory if all FB Hits come at a random rate in original L1_Miss cost interval then the average cost for each one is 0.5 of the fixed cost def FB_Factor(self, EV, level): return 1 + FBHit_per_L1Miss(self, EV, level) / 2 def FBHit_per_L1Miss(self, EV, level): return EV("MEM_LOAD_RETIRED.FB_HIT", level) / EV("MEM_LOAD_RETIRED.L1_MISS", level) def Fetched_Uops(self, EV, level): return EV("UOPS_ISSUED.ANY", level) def Few_Uops_Executed_Threshold(self, EV, level): return EV("EXE_ACTIVITY.1_PORTS_UTIL", level) + self.Retiring.compute(EV) * EV("EXE_ACTIVITY.2_PORTS_UTIL:u0xc", level) # Floating Point computational (arithmetic) Operations Count def FLOP_Count(self, EV, level): return EV("FP_ARITH_INST_RETIRED.SCALAR", level) + 2 * EV("FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", level) + 4 * EV("FP_ARITH_INST_RETIRED.4_FLOPS", level) + 8 * EV("FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE", level) # Floating Point computational (arithmetic) Operations Count def FP_Arith_Scalar(self, EV, level): return EV("FP_ARITH_INST_RETIRED.SCALAR", level) # Floating Point computational (arithmetic) Operations Count def FP_Arith_Vector(self, EV, level): return EV("FP_ARITH_INST_RETIRED.VECTOR", level) def HighIPC(self, EV, level): val = IPC(self, EV, level) / Pipeline_Width return val def Light_Ops_Sum(self, EV, level): return self.FP_Arith.compute(EV) + self.Int_Operations.compute(EV) + self.Memory_Operations.compute(EV) + self.Fused_Instructions.compute(EV) + self.Non_Fused_Branches.compute(EV) def LOAD_L3_HIT(self, EV, level): return EV("MEM_LOAD_RETIRED.L3_HIT", level) * FB_Factor(self, EV, level) def LOAD_XSNP_HIT(self, EV, level): return EV("MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD", level) + EV("MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD", level) * (1 - True_XSNP_HitM_Fraction(self, EV, level)) def LOAD_XSNP_HITM(self, EV, level): return EV("MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD", level) * True_XSNP_HitM_Fraction(self, EV, level) def LOAD_XSNP_MISS(self, EV, level): return EV("MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS", level) def MEM_Bound_Ratio(self, EV, level): return EV("MEMORY_ACTIVITY.STALLS_L3_MISS", level) / CLKS(self, EV, level) def Mem_Lock_St_Fraction(self, EV, level): return EV("MEM_INST_RETIRED.LOCK_LOADS", level) / EV("MEM_INST_RETIRED.ALL_STORES", level) def Mispred_Clears_Fraction(self, EV, level): return self.Branch_Mispredicts.compute(EV) / self.Bad_Speculation.compute(EV) def ORO_Demand_RFO_C1(self, EV, level): return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO", level)) , level ) def ORO_DRD_Any_Cycles(self, EV, level): return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", level)) , level ) def ORO_DRD_BW_Cycles(self, EV, level): return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD:c4", level)) , level ) def Store_L2_Hit_Cycles(self, EV, level): return EV("MEM_STORE_RETIRED.L2_HIT", level) * Mem_L2_Store_Cost *(1 - Mem_Lock_St_Fraction(self, EV, level)) def True_XSNP_HitM_Fraction(self, EV, level): return EV("OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM", level) / (EV("OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM", level) + EV("OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD", level)) def Mem_XSNP_HitM_Cost(self, EV, level): return 28 * Core_Frequency(self, EV, level) def Mem_XSNP_Hit_Cost(self, EV, level): return 27 * Core_Frequency(self, EV, level) def Mem_XSNP_None_Cost(self, EV, level): return 12 * Core_Frequency(self, EV, level) def Mem_L2_Hit_Cost(self, EV, level): return 3 * Core_Frequency(self, EV, level) def PERF_METRICS_SUM(self, EV, level): return (EV("PERF_METRICS.FRONTEND_BOUND", level) / EV("TOPDOWN.SLOTS", level)) + (EV("PERF_METRICS.BAD_SPECULATION", level) / EV("TOPDOWN.SLOTS", level)) + (EV("PERF_METRICS.RETIRING", level) / EV("TOPDOWN.SLOTS", level)) + (EV("PERF_METRICS.BACKEND_BOUND", level) / EV("TOPDOWN.SLOTS", level)) def Retire_Fraction(self, EV, level): return EV("UOPS_RETIRED.SLOTS", level) / EV("UOPS_ISSUED.ANY", level) # Retired slots per Logical Processor def Retired_Slots(self, EV, level): return self.Retiring.compute(EV) * SLOTS(self, EV, level) # Number of logical processors (enabled or online) on the target system def Num_CPUs(self, EV, level): return num_cores * num_threads if num_cores else(8 + 16 /(2 - smt_enabled)) # A system parameter for dependent-loads (pointer chasing like access pattern) of the workload. An integer fraction in range from 0 (no dependent loads) to 100 (all loads are dependent loads) def Dependent_Loads_Weight(self, EV, level): return 20 # Total pipeline cost of Branch Misprediction related bottlenecks def Mispredictions(self, EV, level): val = 100 *(1 - Umisp(self, EV, level)) * (self.Branch_Mispredicts.compute(EV) + self.Fetch_Latency.compute(EV) * self.Mispredicts_Resteers.compute(EV) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV))) self.thresh = (val > 20) return val # Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses) def Big_Code(self, EV, level): val = 100 * self.Fetch_Latency.compute(EV) * (self.ITLB_Misses.compute(EV) + self.ICache_Misses.compute(EV) + self.Unknown_Branches.compute(EV)) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV)) self.thresh = (val > 20) return val # Total pipeline cost of instruction fetch bandwidth related bottlenecks (when the front-end could not sustain operations delivery to the back-end) def Instruction_Fetch_BW(self, EV, level): val = 100 *(self.Frontend_Bound.compute(EV) - (1 - Umisp(self, EV, level)) * self.Fetch_Latency.compute(EV) * self.Mispredicts_Resteers.compute(EV) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV)) - Assist_Frontend(self, EV, level)) - Big_Code(self, EV, level) self.thresh = (val > 20) return val # Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks def Cache_Memory_Bandwidth(self, EV, level): val = 100 *((self.Memory_Bound.compute(EV) * (self.DRAM_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * (self.MEM_Bandwidth.compute(EV) / (self.MEM_Latency.compute(EV) + self.MEM_Bandwidth.compute(EV)))) + (self.Memory_Bound.compute(EV) * (self.L3_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * (self.SQ_Full.compute(EV) / (self.L3_Hit_Latency.compute(EV) + self.Contested_Accesses.compute(EV) + self.SQ_Full.compute(EV) + self.Data_Sharing.compute(EV)))) + (self.Memory_Bound.compute(EV) * (self.L1_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * (self.FB_Full.compute(EV) / (self.Store_Fwd_Blk.compute(EV) + self.L1_Hit_Latency.compute(EV) + self.DTLB_Load.compute(EV) + self.Lock_Latency.compute(EV) + self.Split_Loads.compute(EV) + self.FB_Full.compute(EV))))) self.thresh = (val > 20) return val # Total pipeline cost of external Memory- or Cache-Latency related bottlenecks def Cache_Memory_Latency(self, EV, level): val = 100 *((self.Memory_Bound.compute(EV) * (self.DRAM_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * (self.MEM_Latency.compute(EV) / (self.MEM_Latency.compute(EV) + self.MEM_Bandwidth.compute(EV)))) + (self.Memory_Bound.compute(EV) * (self.L3_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * (self.L3_Hit_Latency.compute(EV) / (self.L3_Hit_Latency.compute(EV) + self.Contested_Accesses.compute(EV) + self.SQ_Full.compute(EV) + self.Data_Sharing.compute(EV)))) + (self.Memory_Bound.compute(EV) * self.L2_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) + (self.Memory_Bound.compute(EV) * (self.Store_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * (self.Store_Latency.compute(EV) / (self.Split_Stores.compute(EV) + self.DTLB_Store.compute(EV) + self.Streaming_Stores.compute(EV) + self.Store_Latency.compute(EV) + self.False_Sharing.compute(EV)))) + (self.Memory_Bound.compute(EV) * (self.L1_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * (self.L1_Hit_Latency.compute(EV) / (self.Store_Fwd_Blk.compute(EV) + self.L1_Hit_Latency.compute(EV) + self.DTLB_Load.compute(EV) + self.Lock_Latency.compute(EV) + self.Split_Loads.compute(EV) + self.FB_Full.compute(EV))))) self.thresh = (val > 20) return val # Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs) def Memory_Data_TLBs(self, EV, level): val = 100 *(self.Memory_Bound.compute(EV) * (self.L1_Bound.compute(EV) / max(self.Memory_Bound.compute(EV) , (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV)))) * (self.DTLB_Load.compute(EV) / max(self.L1_Bound.compute(EV) , (self.Store_Fwd_Blk.compute(EV) + self.L1_Hit_Latency.compute(EV) + self.DTLB_Load.compute(EV) + self.Lock_Latency.compute(EV) + self.Split_Loads.compute(EV) + self.FB_Full.compute(EV)))) + (self.Memory_Bound.compute(EV) * (self.Store_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * (self.DTLB_Store.compute(EV) / (self.Split_Stores.compute(EV) + self.DTLB_Store.compute(EV) + self.Streaming_Stores.compute(EV) + self.Store_Latency.compute(EV) + self.False_Sharing.compute(EV))))) self.thresh = (val > 20) return val # Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors) def Memory_Synchronization(self, EV, level): val = 100 *(self.Memory_Bound.compute(EV) * ((self.L3_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * (self.Contested_Accesses.compute(EV) + self.Data_Sharing.compute(EV)) / (self.L3_Hit_Latency.compute(EV) + self.Contested_Accesses.compute(EV) + self.SQ_Full.compute(EV) + self.Data_Sharing.compute(EV)) + (self.Store_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * self.False_Sharing.compute(EV) / ((self.Split_Stores.compute(EV) + self.DTLB_Store.compute(EV) + self.Streaming_Stores.compute(EV) + self.Store_Latency.compute(EV) + self.False_Sharing.compute(EV)) - self.Store_Latency.compute(EV))) + self.Machine_Clears.compute(EV) * (1 - self.Other_Nukes.compute(EV) / (self.Other_Nukes.compute(EV)))) self.thresh = (val > 10) return val # Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy. def Compute_Bound_Est(self, EV, level): val = 100 *((self.Core_Bound.compute(EV) * self.Divider.compute(EV) / (self.Serializing_Operation.compute(EV) + self.Ports_Utilization.compute(EV) + self.Divider.compute(EV))) + (self.Core_Bound.compute(EV) * (self.Ports_Utilization.compute(EV) / (self.Serializing_Operation.compute(EV) + self.Ports_Utilization.compute(EV) + self.Divider.compute(EV))) * (self.Ports_Utilized_3m.compute(EV) / (self.Ports_Utilized_0.compute(EV) + self.Ports_Utilized_1.compute(EV) + self.Ports_Utilized_2.compute(EV) + self.Ports_Utilized_3m.compute(EV))))) self.thresh = (val > 20) return val # Total pipeline cost of irregular execution (e.g. FP-assists in HPC, Wait time with work imbalance multithreaded workloads, overhead in system services or virtualized environments) def Irregular_Overhead(self, EV, level): val = 100 *(Assist_Frontend(self, EV, level) + Umisp(self, EV, level) * self.Branch_Mispredicts.compute(EV) + (self.Machine_Clears.compute(EV) * self.Other_Nukes.compute(EV) / (self.Other_Nukes.compute(EV))) + Serialize_Core(self, EV, level) + Assist_Retired(self, EV, level)) self.thresh = (val > 10) return val # Total pipeline cost of remaining bottlenecks in the back-end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls. def Other_Bottlenecks(self, EV, level): val = 100 -(Big_Code(self, EV, level) + Instruction_Fetch_BW(self, EV, level) + Mispredictions(self, EV, level) + Cache_Memory_Bandwidth(self, EV, level) + Cache_Memory_Latency(self, EV, level) + Memory_Data_TLBs(self, EV, level) + Memory_Synchronization(self, EV, level) + Compute_Bound_Est(self, EV, level) + Irregular_Overhead(self, EV, level) + Branching_Overhead(self, EV, level) + Useful_Work(self, EV, level)) self.thresh = (val > 20) return val # Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA. Examples include function calls; loops and alignments. (A lower bound). Consider Loop Unrolling or function inlining optimizations def Branching_Overhead(self, EV, level): val = 100 * Branching_Retired(self, EV, level) self.thresh = (val > 5) return val # Total pipeline cost of "useful operations" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead. def Useful_Work(self, EV, level): val = 100 *(self.Retiring.compute(EV) - Branching_Retired(self, EV, level) - Assist_Retired(self, EV, level)) self.thresh = (val > 20) return val # Probability of Core Bound bottleneck hidden by SMT-profiling artifacts. Tip: consider analysis with SMT disabled def Core_Bound_Likely(self, EV, level): val = 100 *(1 - self.Core_Bound.compute(EV) / self.Ports_Utilization.compute(EV) if self.Core_Bound.compute(EV)< self.Ports_Utilization.compute(EV) else 1) if SMT_2T_Utilization(self, EV, level)> 0.5 else 0 self.thresh = (val > 0.5) return val # Instructions Per Cycle (per Logical Processor) def IPC(self, EV, level): return EV("INST_RETIRED.ANY", level) / CLKS(self, EV, level) # Uops Per Instruction def UopPI(self, EV, level): val = Retired_Slots(self, EV, level) / EV("INST_RETIRED.ANY", level) self.thresh = (val > 1.05) return val # Uops per taken branch def UpTB(self, EV, level): val = Retired_Slots(self, EV, level) / EV("BR_INST_RETIRED.NEAR_TAKEN", level) self.thresh = val < Pipeline_Width * 1.5 return val # Cycles Per Instruction (per Logical Processor) def CPI(self, EV, level): return 1 / IPC(self, EV, level) # Per-Logical Processor actual clocks when the Logical Processor is active. def CLKS(self, EV, level): return EV("CPU_CLK_UNHALTED.THREAD", level) # Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward) def SLOTS(self, EV, level): return EV("TOPDOWN.SLOTS", level) if topdown_use_fixed else EV("TOPDOWN.SLOTS", level) # Fraction of Physical Core issue-slots utilized by this Logical Processor def Slots_Utilization(self, EV, level): return SLOTS(self, EV, level) / (EV("TOPDOWN.SLOTS:percore", level) / 2) if smt_enabled else 1 # The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of "execute" at rename stage. def Execute_per_Issue(self, EV, level): return EV("UOPS_EXECUTED.THREAD", level) / EV("UOPS_ISSUED.ANY", level) # Instructions Per Cycle across hyper-threads (per physical core) def CoreIPC(self, EV, level): return EV("INST_RETIRED.ANY", level) / CORE_CLKS(self, EV, level) # Floating Point Operations Per Cycle def FLOPc(self, EV, level): return FLOP_Count(self, EV, level) / CORE_CLKS(self, EV, level) # Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to Fused-Multiply Add use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common. def FP_Arith_Utilization(self, EV, level): return (EV("FP_ARITH_DISPATCHED.PORT_0", level) + EV("FP_ARITH_DISPATCHED.PORT_1", level) + EV("FP_ARITH_DISPATCHED.PORT_5", level)) / (2 * CORE_CLKS(self, EV, level)) # Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor) def ILP(self, EV, level): return EV("UOPS_EXECUTED.THREAD", level) / EV("UOPS_EXECUTED.THREAD:c1", level) # uops Executed per Cycle def EPC(self, EV, level): return EV("UOPS_EXECUTED.THREAD", level) / CLKS(self, EV, level) # Core actual clocks when any Logical Processor is active on the Physical Core def CORE_CLKS(self, EV, level): return EV("CPU_CLK_UNHALTED.DISTRIBUTED", level) if smt_enabled else CLKS(self, EV, level) # Instructions per Load (lower number means higher occurrence rate). Tip: reduce memory accesses. #Link Opt Guide section: Minimize Register Spills def IpLoad(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("MEM_INST_RETIRED.ALL_LOADS", level) self.thresh = (val < 3) return val # Instructions per Store (lower number means higher occurrence rate). Tip: reduce memory accesses. #Link Opt Guide section: Minimize Register Spills def IpStore(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("MEM_INST_RETIRED.ALL_STORES", level) self.thresh = (val < 8) return val # Instructions per Branch (lower number means higher occurrence rate) def IpBranch(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.ALL_BRANCHES", level) self.thresh = (val < 8) return val # Instructions per (near) call (lower number means higher occurrence rate) def IpCall(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.NEAR_CALL", level) self.thresh = (val < 200) return val # Instructions per taken branch def IpTB(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.NEAR_TAKEN", level) self.thresh = val < Pipeline_Width * 2 + 1 return val # Branch instructions per taken branch. . Can be used to approximate PGO-likelihood for non-loopy codes. def BpTkBranch(self, EV, level): return EV("BR_INST_RETIRED.ALL_BRANCHES", level) / EV("BR_INST_RETIRED.NEAR_TAKEN", level) # Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate). Reference: Tuning Performance via Metrics with Expectations. https://doi.org/10.1109/LCA.2019.2916408 def IpFLOP(self, EV, level): val = EV("INST_RETIRED.ANY", level) / FLOP_Count(self, EV, level) self.thresh = (val < 10) return val # Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW. def IpArith(self, EV, level): val = EV("INST_RETIRED.ANY", level) / (FP_Arith_Scalar(self, EV, level) + FP_Arith_Vector(self, EV, level)) self.thresh = (val < 10) return val # Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. def IpArith_Scalar_SP(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("FP_ARITH_INST_RETIRED.SCALAR_SINGLE", level) self.thresh = (val < 10) return val # Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. def IpArith_Scalar_DP(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("FP_ARITH_INST_RETIRED.SCALAR_DOUBLE", level) self.thresh = (val < 10) return val # Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. def IpArith_AVX128(self, EV, level): val = EV("INST_RETIRED.ANY", level) / (EV("FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", level) + EV("FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE", level)) self.thresh = (val < 10) return val # Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. def IpArith_AVX256(self, EV, level): val = EV("INST_RETIRED.ANY", level) / (EV("FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE", level) + EV("FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE", level)) self.thresh = (val < 10) return val # Instructions per PAUSE (lower number means higher occurrence rate) def IpPause(self, EV, level): return Instructions(self, EV, level) / EV("CPU_CLK_UNHALTED.PAUSE_INST", level) # Instructions per Software prefetch instruction (of any type: NTA/T0/T1/T2/Prefetch) (lower number means higher occurrence rate) def IpSWPF(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("SW_PREFETCH_ACCESS.T0:u0xF", level) self.thresh = (val < 100) return val # Total number of retired Instructions def Instructions(self, EV, level): return EV("INST_RETIRED.ANY", level) # Average number of Uops retired in cycles where at least one uop has retired. def Retire(self, EV, level): return Retired_Slots(self, EV, level) / EV("UOPS_RETIRED.SLOTS:c1", level) # Estimated fraction of retirement-cycles dealing with repeat instructions def Strings_Cycles(self, EV, level): val = EV("INST_RETIRED.REP_ITERATION", level) / EV("UOPS_RETIRED.SLOTS:c1", level) self.thresh = (val > 0.1) return val # Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate) def IpAssist(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("ASSISTS.ANY", level) self.thresh = (val < 100000) return val def Execute(self, EV, level): return EV("UOPS_EXECUTED.THREAD", level) / Execute_Cycles(self, EV, level) # Average number of uops fetched from LSD per cycle def Fetch_LSD(self, EV, level): return EV("LSD.UOPS", level) / EV("LSD.CYCLES_ACTIVE", level) # Average number of uops fetched from DSB per cycle def Fetch_DSB(self, EV, level): return EV("IDQ.DSB_UOPS", level) / EV("IDQ.DSB_CYCLES_ANY", level) # Average number of uops fetched from MITE per cycle def Fetch_MITE(self, EV, level): return EV("IDQ.MITE_UOPS", level) / EV("IDQ.MITE_CYCLES_ANY", level) # Average number of Uops issued by front-end when it issued something def Fetch_UpC(self, EV, level): return EV("UOPS_ISSUED.ANY", level) / EV("UOPS_ISSUED.ANY:c1", level) # Fraction of Uops delivered by the LSD (Loop Stream Detector; aka Loop Cache) def LSD_Coverage(self, EV, level): return EV("LSD.UOPS", level) / Fetched_Uops(self, EV, level) # Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). See section 'Decoded ICache' in Optimization Manual. http://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-optimization-manual.html def DSB_Coverage(self, EV, level): val = EV("IDQ.DSB_UOPS", level) / Fetched_Uops(self, EV, level) self.thresh = (val < 0.7) and HighIPC(self, EV, 1) return val # Average number of cycles the front-end was delayed due to an Unknown Branch detection. See Unknown_Branches node. def Unknown_Branch_Cost(self, EV, level): return EV("INT_MISC.UNKNOWN_BRANCH_CYCLES", level) / EV("INT_MISC.UNKNOWN_BRANCH_CYCLES:c1:e1", level) # Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details. def DSB_Switch_Cost(self, EV, level): return EV("DSB2MITE_SWITCHES.PENALTY_CYCLES", level) / EV("DSB2MITE_SWITCHES.PENALTY_CYCLES:c1:e1", level) # Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck. def DSB_Misses(self, EV, level): val = 100 *(self.Fetch_Latency.compute(EV) * self.DSB_Switches.compute(EV) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV)) + self.Fetch_Bandwidth.compute(EV) * self.MITE.compute(EV) / (self.LSD.compute(EV) + self.MITE.compute(EV) + self.DSB.compute(EV))) self.thresh = (val > 10) return val # Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck. def DSB_Bandwidth(self, EV, level): val = 100 *(self.Frontend_Bound.compute(EV) * (self.Fetch_Bandwidth.compute(EV) / (self.Fetch_Bandwidth.compute(EV) + self.Fetch_Latency.compute(EV))) * (self.DSB.compute(EV) / (self.LSD.compute(EV) + self.MITE.compute(EV) + self.DSB.compute(EV)))) self.thresh = (val > 10) return val # Average Latency for L1 instruction cache misses def ICache_Miss_Latency(self, EV, level): return EV("ICACHE_DATA.STALLS", level) / EV("ICACHE_DATA.STALLS:c1:e1", level) # Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck. def IC_Misses(self, EV, level): val = 100 *(self.Fetch_Latency.compute(EV) * self.ICache_Misses.compute(EV) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV))) self.thresh = (val > 5) return val # Instructions per non-speculative DSB miss (lower number means higher occurrence rate) def IpDSB_Miss_Ret(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("FRONTEND_RETIRED.ANY_DSB_MISS", level) self.thresh = (val < 50) return val # Instructions per speculative Unknown Branch Misprediction (BAClear) (lower number means higher occurrence rate) def IpUnknown_Branch(self, EV, level): return Instructions(self, EV, level) / EV("BACLEARS.ANY", level) # L2 cache true code cacheline misses per kilo instruction def L2MPKI_Code(self, EV, level): return 1000 * EV("FRONTEND_RETIRED.L2_MISS", level) / EV("INST_RETIRED.ANY", level) # L2 cache speculative code cacheline misses per kilo instruction def L2MPKI_Code_All(self, EV, level): return 1000 * EV("L2_RQSTS.CODE_RD_MISS", level) / EV("INST_RETIRED.ANY", level) # Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate) def IpMispredict(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_MISP_RETIRED.ALL_BRANCHES", level) self.thresh = (val < 200) return val # Instructions per retired Mispredicts for conditional non-taken branches (lower number means higher occurrence rate). def IpMisp_Cond_Ntaken(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_MISP_RETIRED.COND_NTAKEN", level) self.thresh = (val < 200) return val # Instructions per retired Mispredicts for conditional taken branches (lower number means higher occurrence rate). def IpMisp_Cond_Taken(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_MISP_RETIRED.COND_TAKEN", level) self.thresh = (val < 200) return val # Instructions per retired Mispredicts for return branches (lower number means higher occurrence rate). def IpMisp_Ret(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_MISP_RETIRED.RET", level) self.thresh = (val < 500) return val # Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate). def IpMisp_Indirect(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_MISP_RETIRED.INDIRECT", level) self.thresh = (val < 1000) return val # Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear) def Branch_Misprediction_Cost(self, EV, level): return Mispredictions(self, EV, level) * SLOTS(self, EV, level) / EV("BR_MISP_RETIRED.ALL_BRANCHES", level) / 100 # Speculative to Retired ratio of all clears (covering Mispredicts and nukes) def Spec_Clears_Ratio(self, EV, level): return EV("INT_MISC.CLEARS_COUNT", level) / (EV("BR_MISP_RETIRED.ALL_BRANCHES", level) + EV("MACHINE_CLEARS.COUNT", level)) # Fraction of branches that are non-taken conditionals def Cond_NT(self, EV, level): return EV("BR_INST_RETIRED.COND_NTAKEN", level) / EV("BR_INST_RETIRED.ALL_BRANCHES", level) # Fraction of branches that are taken conditionals def Cond_TK(self, EV, level): return EV("BR_INST_RETIRED.COND_TAKEN", level) / EV("BR_INST_RETIRED.ALL_BRANCHES", level) # Fraction of branches that are CALL or RET def CallRet(self, EV, level): return (EV("BR_INST_RETIRED.NEAR_CALL", level) + EV("BR_INST_RETIRED.NEAR_RETURN", level)) / EV("BR_INST_RETIRED.ALL_BRANCHES", level) # Fraction of branches that are unconditional (direct or indirect) jumps def Jump(self, EV, level): return Br_DoI_Jumps(self, EV, level) / EV("BR_INST_RETIRED.ALL_BRANCHES", level) # Fraction of branches of other types (not individually covered by other metrics in Info.Branches group) def Other_Branches(self, EV, level): return 1 -(Cond_NT(self, EV, level) + Cond_TK(self, EV, level) + CallRet(self, EV, level) + Jump(self, EV, level)) # Actual Average Latency for L1 data-cache miss demand load operations (in core cycles) def Load_Miss_Real_Latency(self, EV, level): return EV("L1D_PEND_MISS.PENDING", level) / EV("MEM_LOAD_COMPLETED.L1_MISS_ANY", level) # Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor) def MLP(self, EV, level): return EV("L1D_PEND_MISS.PENDING", level) / EV("L1D_PEND_MISS.PENDING_CYCLES", level) # L1 cache true misses per kilo instruction for retired demand loads def L1MPKI(self, EV, level): return 1000 * EV("MEM_LOAD_RETIRED.L1_MISS", level) / EV("INST_RETIRED.ANY", level) # L1 cache true misses per kilo instruction for all demand loads (including speculative) def L1MPKI_Load(self, EV, level): return 1000 * EV("L2_RQSTS.ALL_DEMAND_DATA_RD", level) / EV("INST_RETIRED.ANY", level) # L2 cache true misses per kilo instruction for retired demand loads def L2MPKI(self, EV, level): return 1000 * EV("MEM_LOAD_RETIRED.L2_MISS", level) / EV("INST_RETIRED.ANY", level) # L2 cache true misses per kilo instruction for all request types (including speculative) def L2MPKI_All(self, EV, level): return 1000 * EV("L2_RQSTS.MISS", level) / EV("INST_RETIRED.ANY", level) # L2 cache true misses per kilo instruction for all demand loads (including speculative) def L2MPKI_Load(self, EV, level): return 1000 * EV("L2_RQSTS.DEMAND_DATA_RD_MISS", level) / EV("INST_RETIRED.ANY", level) # Offcore requests (L2 cache miss) per kilo instruction for demand RFOs def L2MPKI_RFO(self, EV, level): return 1000 * EV("L2_RQSTS.RFO_MISS", level) / EV("INST_RETIRED.ANY", level) # L2 cache hits per kilo instruction for all request types (including speculative) def L2HPKI_All(self, EV, level): return 1000 *(EV("L2_RQSTS.REFERENCES", level) - EV("L2_RQSTS.MISS", level)) / EV("INST_RETIRED.ANY", level) # L2 cache hits per kilo instruction for all demand loads (including speculative) def L2HPKI_Load(self, EV, level): return 1000 * EV("L2_RQSTS.DEMAND_DATA_RD_HIT", level) / EV("INST_RETIRED.ANY", level) # L3 cache true misses per kilo instruction for retired demand loads def L3MPKI(self, EV, level): return 1000 * EV("MEM_LOAD_RETIRED.L3_MISS", level) / EV("INST_RETIRED.ANY", level) # Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries) def FB_HPKI(self, EV, level): return 1000 * EV("MEM_LOAD_RETIRED.FB_HIT", level) / EV("INST_RETIRED.ANY", level) def L1D_Cache_Fill_BW(self, EV, level): return 64 * EV("L1D.REPLACEMENT", level) / OneBillion / Time(self, EV, level) def L2_Cache_Fill_BW(self, EV, level): return 64 * EV("L2_LINES_IN.ALL", level) / OneBillion / Time(self, EV, level) def L3_Cache_Fill_BW(self, EV, level): return 64 * EV("LONGEST_LAT_CACHE.MISS", level) / OneBillion / Time(self, EV, level) def L3_Cache_Access_BW(self, EV, level): return 64 * EV("OFFCORE_REQUESTS.ALL_REQUESTS", level) / OneBillion / Time(self, EV, level) # Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses def Page_Walks_Utilization(self, EV, level): val = (EV("ITLB_MISSES.WALK_PENDING", level) + EV("DTLB_LOAD_MISSES.WALK_PENDING", level) + EV("DTLB_STORE_MISSES.WALK_PENDING", level)) / (4 * CORE_CLKS(self, EV, level)) self.thresh = (val > 0.5) return val # STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk) def Code_STLB_MPKI(self, EV, level): return 1000 * EV("ITLB_MISSES.WALK_COMPLETED", level) / EV("INST_RETIRED.ANY", level) # STLB (2nd level TLB) data load speculative misses per kilo instruction (misses of any page-size that complete the page walk) def Load_STLB_MPKI(self, EV, level): return 1000 * EV("DTLB_LOAD_MISSES.WALK_COMPLETED", level) / EV("INST_RETIRED.ANY", level) # STLB (2nd level TLB) data store speculative misses per kilo instruction (misses of any page-size that complete the page walk) def Store_STLB_MPKI(self, EV, level): return 1000 * EV("DTLB_STORE_MISSES.WALK_COMPLETED", level) / EV("INST_RETIRED.ANY", level) # Average per-core data fill bandwidth to the L1 data cache [GB / sec] def L1D_Cache_Fill_BW_2T(self, EV, level): return L1D_Cache_Fill_BW(self, EV, level) # Average per-core data fill bandwidth to the L2 cache [GB / sec] def L2_Cache_Fill_BW_2T(self, EV, level): return L2_Cache_Fill_BW(self, EV, level) # Average per-core data fill bandwidth to the L3 cache [GB / sec] def L3_Cache_Fill_BW_2T(self, EV, level): return L3_Cache_Fill_BW(self, EV, level) # Average per-core data access bandwidth to the L3 cache [GB / sec] def L3_Cache_Access_BW_2T(self, EV, level): return L3_Cache_Access_BW(self, EV, level) # Average Latency for L2 cache miss demand Loads def Load_L2_Miss_Latency(self, EV, level): return EV("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD", level) / EV("OFFCORE_REQUESTS.DEMAND_DATA_RD", level) # Average Latency for L3 cache miss demand Loads def Load_L3_Miss_Latency(self, EV, level): return EV("OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD", level) / EV("OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD", level) # Average Parallel L2 cache miss demand Loads def Load_L2_MLP(self, EV, level): return EV("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD", level) / EV("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD:c1", level) # Average Parallel L2 cache miss data reads def Data_L2_MLP(self, EV, level): return EV("OFFCORE_REQUESTS_OUTSTANDING.DATA_RD", level) / EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", level) # Un-cacheable retired load per kilo instruction def UC_Load_PKI(self, EV, level): return 1000 * EV("MEM_LOAD_MISC_RETIRED.UC", level) / EV("INST_RETIRED.ANY", level) # "Bus lock" per kilo instruction def Bus_Lock_PKI(self, EV, level): return 1000 * EV("SQ_MISC.BUS_LOCK", level) / EV("INST_RETIRED.ANY", level) # Average CPU Utilization (percentage) def CPU_Utilization(self, EV, level): return CPUs_Utilized(self, EV, level) / Num_CPUs(self, EV, level) # Average number of utilized CPUs def CPUs_Utilized(self, EV, level): return EV("CPU_CLK_UNHALTED.REF_TSC", level) / EV("msr/tsc/", 0) # Measured Average Core Frequency for unhalted processors [GHz] def Core_Frequency(self, EV, level): return Turbo_Utilization(self, EV, level) * EV("msr/tsc/", 0) / OneBillion / Time(self, EV, level) # Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width def GFLOPs(self, EV, level): return (FLOP_Count(self, EV, level) / OneBillion) / Time(self, EV, level) # Average Frequency Utilization relative nominal frequency def Turbo_Utilization(self, EV, level): return CLKS(self, EV, level) / EV("CPU_CLK_UNHALTED.REF_TSC", level) # Fraction of cycles where both hardware Logical Processors were active def SMT_2T_Utilization(self, EV, level): return 1 - EV("CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE", level) / EV("CPU_CLK_UNHALTED.REF_DISTRIBUTED", level) if smt_enabled else 0 # Fraction of cycles spent in the Operating System (OS) Kernel mode def Kernel_Utilization(self, EV, level): val = EV("CPU_CLK_UNHALTED.THREAD_P:SUP", level) / EV("CPU_CLK_UNHALTED.THREAD", level) self.thresh = (val > 0.05) return val # Cycles Per Instruction for the Operating System (OS) Kernel mode def Kernel_CPI(self, EV, level): return EV("CPU_CLK_UNHALTED.THREAD_P:SUP", level) / EV("INST_RETIRED.ANY_P:SUP", level) # Fraction of cycles the processor is waiting yet unhalted; covering legacy PAUSE instruction, as well as C0.1 / C0.2 power-performance optimized states. Sample code of TPAUSE: https://github.com/torvalds/linux/blob/master/arch/x86/lib/delay.c#L105. If running on Linux, please check the power control interface: https://github.com/torvalds/linux/blob/master/arch/x86/kernel/cpu/umwait.c and https://github.com/torvalds/linux/blob/master/Documentation/ABI/testing/sysfs-devices-system-cpu#L587 def C0_Wait(self, EV, level): val = EV("CPU_CLK_UNHALTED.C0_WAIT", level) / CLKS(self, EV, level) self.thresh = (val > 0.05) return val # Average external Memory Bandwidth Use for reads and writes [GB / sec] def DRAM_BW_Use(self, EV, level): return 64 *(EV("UNC_ARB_TRK_REQUESTS.ALL", level) + EV("UNC_ARB_COH_TRK_REQUESTS.ALL", level)) / OneMillion / Time(self, EV, level) / 1000 # Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches. memory-controller only def MEM_Read_Latency(self, EV, level): return (EV("UNC_ARB_TRK_OCCUPANCY.RD", level) + EV("UNC_ARB_DAT_OCCUPANCY.RD", level)) / EV("UNC_ARB_TRK_REQUESTS.RD", level) # Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches def MEM_Parallel_Reads(self, EV, level): return EV("UNC_ARB_DAT_OCCUPANCY.RD", level) / EV("UNC_ARB_DAT_OCCUPANCY.RD:c1", level) # Total package Power in Watts def Power(self, EV, level): return EV("UNC_PKG_ENERGY_STATUS", level) * Energy_Unit /(Time(self, EV, level) * OneMillion ) # Run duration time in seconds def Time(self, EV, level): val = EV("interval-s", 0) self.thresh = (val < 1) return val # Socket actual clocks when any core is active on that socket def Socket_CLKS(self, EV, level): return EV("UNC_CLOCK.SOCKET", level) # Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate] def IpFarBranch(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.FAR_BRANCH:USER", level) self.thresh = (val < 1000000) return val # Event groups class Frontend_Bound: name = "Frontend_Bound" domain = "Slots" area = "FE" level = 1 htoff = False sample = ['FRONTEND_RETIRED.LATENCY_GE_4:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvFB', 'BvIO', 'TmaL1', 'PGO']) maxval = None def compute(self, EV): try: self.val = (EV("PERF_METRICS.FRONTEND_BOUND", 1) / EV("TOPDOWN.SLOTS", 1)) / PERF_METRICS_SUM(self, EV, 1) - EV("INT_MISC.UOP_DROPPING", 1) / SLOTS(self, EV, 1) if topdown_use_fixed else(EV("IDQ_BUBBLES.CORE", 1) - EV("INT_MISC.UOP_DROPPING", 1)) / SLOTS(self, EV, 1) self.thresh = (self.val > 0.15) except ZeroDivisionError: handle_error(self, "Frontend_Bound zero division") return self.val desc = """ This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.""" class Fetch_Latency: name = "Fetch_Latency" domain = "Slots" area = "FE" level = 2 htoff = False sample = ['FRONTEND_RETIRED.LATENCY_GE_16:pp', 'FRONTEND_RETIRED.LATENCY_GE_8:pp'] errcount = 0 sibling = None metricgroup = frozenset(['Frontend', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = ((EV("PERF_METRICS.FETCH_LATENCY", 2) / EV("TOPDOWN.SLOTS", 2)) / PERF_METRICS_SUM(self, EV, 2) - EV("INT_MISC.UOP_DROPPING", 2) / SLOTS(self, EV, 2)) if topdown_use_fixed else(EV("IDQ_BUBBLES.CYCLES_0_UOPS_DELIV.CORE", 2) * Pipeline_Width - EV("INT_MISC.UOP_DROPPING", 2)) / SLOTS(self, EV, 2) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Fetch_Latency zero division") return self.val desc = """ This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction- cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period.""" class ICache_Misses: name = "ICache_Misses" domain = "Clocks" area = "FE" level = 3 htoff = False sample = ['FRONTEND_RETIRED.L2_MISS:pp', 'FRONTEND_RETIRED.L1I_MISS:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BigFootprint', 'BvBC', 'FetchLat', 'IcMiss']) maxval = None def compute(self, EV): try: self.val = EV("ICACHE_DATA.STALLS", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "ICache_Misses zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to instruction cache misses.. Using compiler's Profile-Guided Optimization (PGO) can reduce i-cache misses through improved hot code layout.""" class ITLB_Misses: name = "ITLB_Misses" domain = "Clocks" area = "FE" level = 3 htoff = False sample = ['FRONTEND_RETIRED.STLB_MISS:pp', 'FRONTEND_RETIRED.ITLB_MISS:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BigFootprint', 'BvBC', 'FetchLat', 'MemoryTLB']) maxval = None def compute(self, EV): try: self.val = EV("ICACHE_TAG.STALLS", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "ITLB_Misses zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses.. Consider large 2M pages for code (selectively prefer hot large-size function, due to limited 2M entries). Linux options: standard binaries use libhugetlbfs; Hfsort.. https://github. com/libhugetlbfs/libhugetlbfs;https://research.fb.com/public ations/optimizing-function-placement-for-large-scale-data- center-applications-2/""" class Branch_Resteers: name = "Branch_Resteers" domain = "Clocks" area = "FE" level = 3 htoff = False sample = ['BR_MISP_RETIRED.ALL_BRANCHES'] errcount = 0 sibling = None metricgroup = frozenset(['FetchLat']) maxval = None def compute(self, EV): try: self.val = EV("INT_MISC.CLEAR_RESTEER_CYCLES", 3) / CLKS(self, EV, 3) + self.Unknown_Branches.compute(EV) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Branch_Resteers zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings.""" class Mispredicts_Resteers: name = "Mispredicts_Resteers" domain = "Clocks" area = "FE" level = 4 htoff = False sample = ['INT_MISC.CLEAR_RESTEER_CYCLES'] errcount = 0 sibling = None metricgroup = frozenset(['BadSpec', 'BrMispredicts', 'BvMP']) maxval = None def compute(self, EV): try: self.val = Mispred_Clears_Fraction(self, EV, 4) * EV("INT_MISC.CLEAR_RESTEER_CYCLES", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Mispredicts_Resteers zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage.""" class Clears_Resteers: name = "Clears_Resteers" domain = "Clocks" area = "FE" level = 4 htoff = False sample = ['INT_MISC.CLEAR_RESTEER_CYCLES'] errcount = 0 sibling = None metricgroup = frozenset(['BadSpec', 'MachineClears']) maxval = None def compute(self, EV): try: self.val = (1 - Mispred_Clears_Fraction(self, EV, 4)) * EV("INT_MISC.CLEAR_RESTEER_CYCLES", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Clears_Resteers zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears.""" class Unknown_Branches: name = "Unknown_Branches" domain = "Clocks" area = "FE" level = 4 htoff = False sample = ['FRONTEND_RETIRED.UNKNOWN_BRANCH'] errcount = 0 sibling = None metricgroup = frozenset(['BigFootprint', 'BvBC', 'FetchLat']) maxval = None def compute(self, EV): try: self.val = EV("INT_MISC.UNKNOWN_BRANCH_CYCLES", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Unknown_Branches zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches""" class MS_Switches: name = "MS_Switches" domain = "Clocks_Estimated" area = "FE" level = 3 htoff = False sample = ['FRONTEND_RETIRED.MS_FLOWS'] errcount = 0 sibling = None metricgroup = frozenset(['FetchLat', 'MicroSeq']) maxval = 1.0 def compute(self, EV): try: self.val = MS_Switches_Cost * EV("UOPS_RETIRED.MS:c1:e1", 3) / Retire_Fraction(self, EV, 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "MS_Switches zero division") return self.val desc = """ This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals.""" class LCP: name = "LCP" domain = "Clocks" area = "FE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['FetchLat']) maxval = None def compute(self, EV): try: self.val = EV("DECODE.LCP", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "LCP zero division") return self.val desc = """ This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this.""" class DSB_Switches: name = "DSB_Switches" domain = "Clocks" area = "FE" level = 3 htoff = False sample = ['FRONTEND_RETIRED.DSB_MISS:pp'] errcount = 0 sibling = None metricgroup = frozenset(['DSBmiss', 'FetchLat']) maxval = None def compute(self, EV): try: self.val = EV("DSB2MITE_SWITCHES.PENALTY_CYCLES", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "DSB_Switches zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty.. See section 'Optimization for Decoded Icache' in Optimization Manual:. http://www.intel.com/content/www/us/en /architecture-and-technology/64-ia-32-architectures- optimization-manual.html""" class Fetch_Bandwidth: name = "Fetch_Bandwidth" domain = "Slots" area = "FE" level = 2 htoff = False sample = ['FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1:pp', 'FRONTEND_RETIRED.LATENCY_GE_1:pp', 'FRONTEND_RETIRED.LATENCY_GE_2:pp'] errcount = 0 sibling = None metricgroup = frozenset(['FetchBW', 'Frontend', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = max(0 , self.Frontend_Bound.compute(EV) - self.Fetch_Latency.compute(EV)) self.thresh = (self.val > 0.2) except ZeroDivisionError: handle_error(self, "Fetch_Bandwidth zero division") return self.val desc = """ This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend.""" class MITE: name = "MITE" domain = "Slots_Estimated" area = "FE" level = 3 htoff = False sample = ['FRONTEND_RETIRED.ANY_DSB_MISS'] errcount = 0 sibling = None metricgroup = frozenset(['DSBmiss', 'FetchBW']) maxval = None def compute(self, EV): try: self.val = (EV("IDQ.MITE_CYCLES_ANY", 3) - EV("IDQ.MITE_CYCLES_OK", 3)) / CORE_CLKS(self, EV, 3) / 2 self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "MITE zero division") return self.val desc = """ This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck.. Consider tuning codegen of 'small hotspots' that can fit in DSB. Read about 'Decoded ICache' in Optimization Manual:. http://www.intel.com/content/www/us/en /architecture-and-technology/64-ia-32-architectures- optimization-manual.html""" class Decoder0_Alone: name = "Decoder0_Alone" domain = "Slots_Estimated" area = "FE" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['DSBmiss', 'FetchBW']) maxval = None def compute(self, EV): try: self.val = (EV("INST_DECODED.DECODERS:c1", 4) - EV("INST_DECODED.DECODERS:c2", 4)) / CORE_CLKS(self, EV, 4) / 2 self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Decoder0_Alone zero division") return self.val desc = """ This metric represents fraction of cycles where decoder-0 was the only active decoder""" class DSB: name = "DSB" domain = "Slots_Estimated" area = "FE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['DSB', 'FetchBW']) maxval = None def compute(self, EV): try: self.val = (EV("IDQ.DSB_CYCLES_ANY", 3) - EV("IDQ.DSB_CYCLES_OK", 3)) / CORE_CLKS(self, EV, 3) / 2 self.thresh = (self.val > 0.15) and self.parent.thresh except ZeroDivisionError: handle_error(self, "DSB zero division") return self.val desc = """ This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.""" class LSD: name = "LSD" domain = "Slots_Estimated" area = "FE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['FetchBW', 'LSD']) maxval = None def compute(self, EV): try: self.val = (EV("LSD.CYCLES_ACTIVE", 3) - EV("LSD.CYCLES_OK", 3)) / CORE_CLKS(self, EV, 3) / 2 self.thresh = (self.val > 0.15) and self.parent.thresh except ZeroDivisionError: handle_error(self, "LSD zero division") return self.val desc = """ This metric represents Core fraction of cycles in which CPU was likely limited due to LSD (Loop Stream Detector) unit. LSD typically does well sustaining Uop supply. However; in some rare cases; optimal uop-delivery could not be reached for small loops whose size (in terms of number of uops) does not suit well the LSD structure.""" class Bad_Speculation: name = "Bad_Speculation" domain = "Slots" area = "BAD" level = 1 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['TmaL1']) maxval = None def compute(self, EV): try: self.val = max(1 -(self.Frontend_Bound.compute(EV) + self.Backend_Bound.compute(EV) + self.Retiring.compute(EV)) , 0 ) self.thresh = (self.val > 0.15) except ZeroDivisionError: handle_error(self, "Bad_Speculation zero division") return self.val desc = """ This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss- predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.""" class Branch_Mispredicts: name = "Branch_Mispredicts" domain = "Slots" area = "BAD" level = 2 htoff = False sample = ['TOPDOWN.BR_MISPREDICT_SLOTS'] errcount = 0 sibling = None metricgroup = frozenset(['BadSpec', 'BrMispredicts', 'BvMP', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = (EV("PERF_METRICS.BRANCH_MISPREDICTS", 2) / EV("TOPDOWN.SLOTS", 2)) / PERF_METRICS_SUM(self, EV, 2) if topdown_use_fixed else EV("TOPDOWN.BR_MISPREDICT_SLOTS", 2) / SLOTS(self, EV, 2) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Branch_Mispredicts zero division") return self.val desc = """ This metric represents fraction of slots the CPU has wasted due to Branch Misprediction. These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path.. Using profile feedback in the compiler may help. Please see the Optimization Manual for general strategies for addressing branch misprediction issues.. http://www.intel.com/content/www/us/en/architecture-and- technology/64-ia-32-architectures-optimization-manual.html""" class Other_Mispredicts: name = "Other_Mispredicts" domain = "Slots" area = "BAD" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvIO', 'BrMispredicts']) maxval = None def compute(self, EV): try: self.val = max(self.Branch_Mispredicts.compute(EV) * (1 - EV("BR_MISP_RETIRED.ALL_BRANCHES", 3) / (EV("INT_MISC.CLEARS_COUNT", 3) - EV("MACHINE_CLEARS.COUNT", 3))) , 0.0001 ) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Other_Mispredicts zero division") return self.val desc = """ This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types).""" class Machine_Clears: name = "Machine_Clears" domain = "Slots" area = "BAD" level = 2 htoff = False sample = ['MACHINE_CLEARS.COUNT'] errcount = 0 sibling = None metricgroup = frozenset(['BadSpec', 'BvMS', 'MachineClears', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = max(0 , self.Bad_Speculation.compute(EV) - self.Branch_Mispredicts.compute(EV)) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Machine_Clears zero division") return self.val desc = """ This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes.. See \"Memory Disambiguation\" in Optimization Manual and:. https://software.intel.com/sites/default/files/ m/d/4/1/d/8/sma.pdf""" class Other_Nukes: name = "Other_Nukes" domain = "Slots" area = "BAD" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvIO', 'Machine_Clears']) maxval = None def compute(self, EV): try: self.val = max(self.Machine_Clears.compute(EV) * (1 - EV("MACHINE_CLEARS.MEMORY_ORDERING", 3) / EV("MACHINE_CLEARS.COUNT", 3)) , 0.0001 ) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Other_Nukes zero division") return self.val desc = """ This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering.""" class Backend_Bound: name = "Backend_Bound" domain = "Slots" area = "BE" level = 1 htoff = False sample = ['TOPDOWN.BACKEND_BOUND_SLOTS'] errcount = 0 sibling = None metricgroup = frozenset(['BvOB', 'TmaL1']) maxval = None def compute(self, EV): try: self.val = (EV("PERF_METRICS.BACKEND_BOUND", 1) / EV("TOPDOWN.SLOTS", 1)) / PERF_METRICS_SUM(self, EV, 1) if topdown_use_fixed else EV("TOPDOWN.BACKEND_BOUND_SLOTS", 1) / SLOTS(self, EV, 1) self.thresh = (self.val > 0.2) except ZeroDivisionError: handle_error(self, "Backend_Bound zero division") return self.val desc = """ This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.""" class Memory_Bound: name = "Memory_Bound" domain = "Slots" area = "BE/Mem" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Backend', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = (EV("PERF_METRICS.MEMORY_BOUND", 2) / EV("TOPDOWN.SLOTS", 2)) / PERF_METRICS_SUM(self, EV, 2) if topdown_use_fixed else EV("TOPDOWN.MEMORY_BOUND_SLOTS", 2) / SLOTS(self, EV, 2) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Memory_Bound zero division") return self.val desc = """ This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).""" class L1_Bound: name = "L1_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = ['MEM_LOAD_RETIRED.L1_HIT:pp', 'MEM_LOAD_RETIRED.FB_HIT:pp'] errcount = 0 sibling = None metricgroup = frozenset(['CacheHits', 'MemoryBound', 'TmaL3mem']) maxval = None def compute(self, EV): try: self.val = max((EV("EXE_ACTIVITY.BOUND_ON_LOADS", 3) - EV("MEMORY_ACTIVITY.STALLS_L1D_MISS", 3)) / CLKS(self, EV, 3) , 0 ) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "L1_Bound zero division") return self.val desc = """ This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache.""" class DTLB_Load: name = "DTLB_Load" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_INST_RETIRED.STLB_MISS_LOADS:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvMT', 'MemoryTLB']) maxval = 1.0 def compute(self, EV): try: self.val = min(Mem_STLB_Hit_Cost * EV("DTLB_LOAD_MISSES.STLB_HIT:c1", 4) + EV("DTLB_LOAD_MISSES.WALK_ACTIVE", 4) , max(EV("CYCLE_ACTIVITY.CYCLES_MEM_ANY", 4) - EV("MEMORY_ACTIVITY.CYCLES_L1D_MISS", 4) , 0)) / CLKS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "DTLB_Load zero division") return self.val desc = """ This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss..""" class Load_STLB_Hit: name = "Load_STLB_Hit" domain = "Clocks_Estimated" area = "BE/Mem" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['MemoryTLB']) maxval = 1.0 def compute(self, EV): try: self.val = self.DTLB_Load.compute(EV) - self.Load_STLB_Miss.compute(EV) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Load_STLB_Hit zero division") return self.val desc = """ This metric roughly estimates the fraction of cycles where the (first level) DTLB was missed by load accesses, that later on hit in second-level TLB (STLB)""" class Load_STLB_Miss: name = "Load_STLB_Miss" domain = "Clocks_Calculated" area = "BE/Mem" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['MemoryTLB']) maxval = 1.0 def compute(self, EV): try: self.val = EV("DTLB_LOAD_MISSES.WALK_ACTIVE", 5) / CLKS(self, EV, 5) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Load_STLB_Miss zero division") return self.val desc = """ This metric estimates the fraction of cycles where the Second-level TLB (STLB) was missed by load accesses, performing a hardware page walk""" class Store_Fwd_Blk: name = "Store_Fwd_Blk" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = 1.0 def compute(self, EV): try: self.val = 13 * EV("LD_BLOCKS.STORE_FORWARD", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Store_Fwd_Blk zero division") return self.val desc = """ This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading.""" class L1_Hit_Latency: name = "L1_Hit_Latency" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_LOAD_RETIRED.L1_HIT'] errcount = 0 sibling = None metricgroup = frozenset(['BvML', 'MemoryLat']) maxval = 1.0 def compute(self, EV): try: self.val = min(2 *(EV("MEM_INST_RETIRED.ALL_LOADS", 4) - EV("MEM_LOAD_RETIRED.FB_HIT", 4) - EV("MEM_LOAD_RETIRED.L1_MISS", 4)) * Dependent_Loads_Weight(self, EV, 4) / 100 , max(EV("CYCLE_ACTIVITY.CYCLES_MEM_ANY", 4) - EV("MEMORY_ACTIVITY.CYCLES_L1D_MISS", 4) , 0)) / CLKS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "L1_Hit_Latency zero division") return self.val desc = """ This metric roughly estimates fraction of cycles with demand load accesses that hit the L1 cache. The short latency of the L1 data cache may be exposed in pointer-chasing memory access patterns as an example.""" class Lock_Latency: name = "Lock_Latency" domain = "Clocks" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_INST_RETIRED.LOCK_LOADS'] errcount = 0 sibling = None metricgroup = frozenset(['Offcore']) maxval = 1.0 def compute(self, EV): try: self.val = (16 * max(0 , EV("MEM_INST_RETIRED.LOCK_LOADS", 4) - EV("L2_RQSTS.ALL_RFO", 4)) + Mem_Lock_St_Fraction(self, EV, 4) * (Mem_L2_Store_Cost * EV("L2_RQSTS.RFO_HIT", 4) + ORO_Demand_RFO_C1(self, EV, 4))) / CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Lock_Latency zero division") return self.val desc = """ This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them.""" class Split_Loads: name = "Split_Loads" domain = "Clocks_Calculated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_INST_RETIRED.SPLIT_LOADS:pp'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = 1.0 def compute(self, EV): try: self.val = Load_Miss_Real_Latency(self, EV, 4) * EV("LD_BLOCKS.NO_SR", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Split_Loads zero division") return self.val desc = """ This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. . Consider aligning data or hot structure fields. See the Optimization Manual for more details""" class FB_Full: name = "FB_Full" domain = "Clocks_Calculated" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvMB', 'MemoryBW']) maxval = None def compute(self, EV): try: self.val = EV("L1D_PEND_MISS.FB_FULL", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.3) except ZeroDivisionError: handle_error(self, "FB_Full zero division") return self.val desc = """ This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory).. See $issueBW and $issueSL hints. Avoid software prefetches if indeed memory BW limited.""" class L2_Bound: name = "L2_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = ['MEM_LOAD_RETIRED.L2_HIT:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvML', 'CacheHits', 'MemoryBound', 'TmaL3mem']) maxval = None def compute(self, EV): try: self.val = (EV("MEMORY_ACTIVITY.STALLS_L1D_MISS", 3) - EV("MEMORY_ACTIVITY.STALLS_L2_MISS", 3)) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "L2_Bound zero division") return self.val desc = """ This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance.""" class L3_Bound: name = "L3_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = ['MEM_LOAD_RETIRED.L3_HIT:pp'] errcount = 0 sibling = None metricgroup = frozenset(['CacheHits', 'MemoryBound', 'TmaL3mem']) maxval = None def compute(self, EV): try: self.val = (EV("MEMORY_ACTIVITY.STALLS_L2_MISS", 3) - EV("MEMORY_ACTIVITY.STALLS_L3_MISS", 3)) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "L3_Bound zero division") return self.val desc = """ This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance.""" class Contested_Accesses: name = "Contested_Accesses" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD', 'MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS'] errcount = 0 sibling = None metricgroup = frozenset(['BvMS', 'DataSharing', 'Offcore', 'Snoop']) maxval = 1.0 def compute(self, EV): try: self.val = ((Mem_XSNP_HitM_Cost(self, EV, 4) - Mem_L2_Hit_Cost(self, EV, 4)) * LOAD_XSNP_HITM(self, EV, 4) + (Mem_XSNP_Hit_Cost(self, EV, 4) - Mem_L2_Hit_Cost(self, EV, 4)) * LOAD_XSNP_MISS(self, EV, 4)) * FB_Factor(self, EV, 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Contested_Accesses zero division") return self.val desc = """ This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing.""" class Data_Sharing: name = "Data_Sharing" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD'] errcount = 0 sibling = None metricgroup = frozenset(['BvMS', 'Offcore', 'Snoop']) maxval = 1.0 def compute(self, EV): try: self.val = (Mem_XSNP_Hit_Cost(self, EV, 4) - Mem_L2_Hit_Cost(self, EV, 4)) * LOAD_XSNP_HIT(self, EV, 4) * FB_Factor(self, EV, 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Data_Sharing zero division") return self.val desc = """ This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance.""" class L3_Hit_Latency: name = "L3_Hit_Latency" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_LOAD_RETIRED.L3_HIT:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvML', 'MemoryLat']) maxval = 1.0 def compute(self, EV): try: self.val = (Mem_XSNP_None_Cost(self, EV, 4) - Mem_L2_Hit_Cost(self, EV, 4)) * LOAD_L3_HIT(self, EV, 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "L3_Hit_Latency zero division") return self.val desc = """ This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings.""" class SQ_Full: name = "SQ_Full" domain = "Clocks" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvMB', 'MemoryBW', 'Offcore']) maxval = None def compute(self, EV): try: self.val = (EV("XQ.FULL_CYCLES", 4) + EV("L1D_PEND_MISS.L2_STALLS", 4)) / CLKS(self, EV, 4) self.thresh = (self.val > 0.3) and self.parent.thresh except ZeroDivisionError: handle_error(self, "SQ_Full zero division") return self.val desc = """ This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors).""" class DRAM_Bound: name = "DRAM_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = ['MEM_LOAD_RETIRED.L3_MISS:pp'] errcount = 0 sibling = None metricgroup = frozenset(['MemoryBound', 'TmaL3mem']) maxval = 1.0 def compute(self, EV): try: self.val = MEM_Bound_Ratio(self, EV, 3) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "DRAM_Bound zero division") return self.val desc = """ This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance.""" class MEM_Bandwidth: name = "MEM_Bandwidth" domain = "Clocks" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvMB', 'MemoryBW', 'Offcore']) maxval = None def compute(self, EV): try: self.val = ORO_DRD_BW_Cycles(self, EV, 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "MEM_Bandwidth zero division") return self.val desc = """ This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that).. Improve data accesses to reduce cacheline transfers from/to memory. Examples: 1) Consume all bytes of a each cacheline before it is evicted (e.g. reorder structure elements and split non-hot ones), 2) merge computed-limited with BW-limited loops, 3) NUMA optimizations in multi-socket system. Note: software prefetches will not help BW-limited application..""" class MEM_Latency: name = "MEM_Latency" domain = "Clocks" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvML', 'MemoryLat', 'Offcore']) maxval = None def compute(self, EV): try: self.val = ORO_DRD_Any_Cycles(self, EV, 4) / CLKS(self, EV, 4) - self.MEM_Bandwidth.compute(EV) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "MEM_Latency zero division") return self.val desc = """ This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that).. Improve data accesses or interleave them with compute. Examples: 1) Data layout re-structuring, 2) Software Prefetches (also through the compiler)..""" class Store_Bound: name = "Store_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = ['MEM_INST_RETIRED.ALL_STORES:pp'] errcount = 0 sibling = None metricgroup = frozenset(['MemoryBound', 'TmaL3mem']) maxval = None def compute(self, EV): try: self.val = EV("EXE_ACTIVITY.BOUND_ON_STORES", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Store_Bound zero division") return self.val desc = """ This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck.""" class Store_Latency: name = "Store_Latency" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvML', 'MemoryLat', 'Offcore']) maxval = 1.0 def compute(self, EV): try: self.val = (Store_L2_Hit_Cycles(self, EV, 4) + (1 - Mem_Lock_St_Fraction(self, EV, 4)) * ORO_Demand_RFO_C1(self, EV, 4)) / CLKS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Store_Latency zero division") return self.val desc = """ This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Consider to avoid/reduce unnecessary (or easily load-able/computable) memory store.""" class False_Sharing: name = "False_Sharing" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM'] errcount = 0 sibling = None metricgroup = frozenset(['BvMS', 'DataSharing', 'Offcore', 'Snoop']) maxval = 1.0 def compute(self, EV): try: self.val = Mem_XSNP_HitM_Cost(self, EV, 4) * EV("OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "False_Sharing zero division") return self.val desc = """ This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. . False Sharing can be easily avoided by padding to make Logical Processors access different lines.""" class Split_Stores: name = "Split_Stores" domain = "Core_Utilization" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_INST_RETIRED.SPLIT_STORES:pp'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("MEM_INST_RETIRED.SPLIT_STORES", 4) / CORE_CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Split_Stores zero division") return self.val desc = """ This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity.""" class Streaming_Stores: name = "Streaming_Stores" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['OCR.STREAMING_WR.ANY_RESPONSE'] errcount = 0 sibling = None metricgroup = frozenset(['MemoryBW', 'Offcore']) maxval = 1.0 def compute(self, EV): try: self.val = 9 * EV("OCR.STREAMING_WR.ANY_RESPONSE", 4) / CLKS(self, EV, 4) if DS else 0 EV("OCR.STREAMING_WR.ANY_RESPONSE", 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Streaming_Stores zero division") return self.val desc = """ This metric estimates how often CPU was stalled due to Streaming store memory accesses; Streaming store optimize out a read request required by RFO stores. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should Streaming stores be a bottleneck.""" class DTLB_Store: name = "DTLB_Store" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_INST_RETIRED.STLB_MISS_STORES:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvMT', 'MemoryTLB']) maxval = 1.0 def compute(self, EV): try: self.val = (Mem_STLB_Hit_Cost * EV("DTLB_STORE_MISSES.STLB_HIT:c1", 4) + EV("DTLB_STORE_MISSES.WALK_ACTIVE", 4)) / CORE_CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "DTLB_Store zero division") return self.val desc = """ This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently- used data.""" class Store_STLB_Hit: name = "Store_STLB_Hit" domain = "Clocks_Estimated" area = "BE/Mem" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['MemoryTLB']) maxval = 1.0 def compute(self, EV): try: self.val = self.DTLB_Store.compute(EV) - self.Store_STLB_Miss.compute(EV) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Store_STLB_Hit zero division") return self.val desc = """ This metric roughly estimates the fraction of cycles where the TLB was missed by store accesses, hitting in the second- level TLB (STLB)""" class Store_STLB_Miss: name = "Store_STLB_Miss" domain = "Clocks_Calculated" area = "BE/Mem" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['MemoryTLB']) maxval = 1.0 def compute(self, EV): try: self.val = EV("DTLB_STORE_MISSES.WALK_ACTIVE", 5) / CORE_CLKS(self, EV, 5) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Store_STLB_Miss zero division") return self.val desc = """ This metric estimates the fraction of cycles where the STLB was missed by store accesses, performing a hardware page walk""" class Core_Bound: name = "Core_Bound" domain = "Slots" area = "BE/Core" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Backend', 'TmaL2', 'Compute']) maxval = None def compute(self, EV): try: self.val = max(0 , self.Backend_Bound.compute(EV) - self.Memory_Bound.compute(EV)) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Core_Bound zero division") return self.val desc = """ This metric represents fraction of slots where Core non- memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).. Tip: consider Port Saturation analysis as next step.""" class Divider: name = "Divider" domain = "Clocks" area = "BE/Core" level = 3 htoff = False sample = ['ARITH.DIVIDER_ACTIVE'] errcount = 0 sibling = None metricgroup = frozenset(['BvCB']) maxval = 1.0 def compute(self, EV): try: self.val = EV("ARITH.DIV_ACTIVE", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Divider zero division") return self.val desc = """ This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication.""" class Serializing_Operation: name = "Serializing_Operation" domain = "Clocks" area = "BE/Core" level = 3 htoff = False sample = ['RESOURCE_STALLS.SCOREBOARD'] errcount = 0 sibling = None metricgroup = frozenset(['BvIO', 'PortsUtil']) maxval = None def compute(self, EV): try: self.val = EV("RESOURCE_STALLS.SCOREBOARD", 3) / CLKS(self, EV, 3) + self.C02_WAIT.compute(EV) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Serializing_Operation zero division") return self.val desc = """ This metric represents fraction of cycles the CPU issue- pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out- of-order execution which may limit performance.""" class Slow_Pause: name = "Slow_Pause" domain = "Clocks" area = "BE/Core" level = 4 htoff = False sample = ['CPU_CLK_UNHALTED.PAUSE_INST'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("CPU_CLK_UNHALTED.PAUSE", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Slow_Pause zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions.""" class C01_WAIT: name = "C01_WAIT" domain = "Clocks" area = "BE/Core" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['C0Wait']) maxval = None def compute(self, EV): try: self.val = EV("CPU_CLK_UNHALTED.C01", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "C01_WAIT zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due staying in C0.1 power-performance optimized state (Faster wakeup time; Smaller power savings).""" class C02_WAIT: name = "C02_WAIT" domain = "Clocks" area = "BE/Core" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['C0Wait']) maxval = None def compute(self, EV): try: self.val = EV("CPU_CLK_UNHALTED.C02", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "C02_WAIT zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due staying in C0.2 power-performance optimized state (Slower wakeup time; Larger power savings).""" class Memory_Fence: name = "Memory_Fence" domain = "Clocks" area = "BE/Core" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = 1.0 def compute(self, EV): try: self.val = 13 * EV("MISC2_RETIRED.LFENCE", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Memory_Fence zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to LFENCE Instructions.""" class Ports_Utilization: name = "Ports_Utilization" domain = "Clocks" area = "BE/Core" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['PortsUtil']) maxval = None def compute(self, EV): try: self.val = Core_Bound_Cycles(self, EV, 3) / CLKS(self, EV, 3) if (EV("ARITH.DIV_ACTIVE", 3)<(EV("CYCLE_ACTIVITY.STALLS_TOTAL", 3) - EV("EXE_ACTIVITY.BOUND_ON_LOADS", 3))) else Few_Uops_Executed_Threshold(self, EV, 3) / CLKS(self, EV, 3) EV("EXE_ACTIVITY.BOUND_ON_LOADS", 3) EV("CYCLE_ACTIVITY.STALLS_TOTAL", 3) EV("ARITH.DIV_ACTIVE", 3) self.thresh = (self.val > 0.15) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Ports_Utilization zero division") return self.val desc = """ This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.. Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop.""" class Ports_Utilized_0: name = "Ports_Utilized_0" domain = "Clocks" area = "BE/Core" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['PortsUtil']) maxval = None def compute(self, EV): try: self.val = (EV("EXE_ACTIVITY.EXE_BOUND_0_PORTS", 4) + max(EV("RS.EMPTY:u1", 4) - EV("RESOURCE_STALLS.SCOREBOARD", 4) , 0)) / CLKS(self, EV, 4) * (EV("CYCLE_ACTIVITY.STALLS_TOTAL", 4) - EV("EXE_ACTIVITY.BOUND_ON_LOADS", 4)) / CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Ports_Utilized_0 zero division") return self.val desc = """ This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric.. Check assembly view and Appendix C in Optimization Manual to find out instructions with say 5 or more cycles latency.. http://www.intel.com/content/www/us/en/architecture-and- technology/64-ia-32-architectures-optimization-manual.html""" class Mixing_Vectors: name = "Mixing_Vectors" domain = "Clocks" area = "BE/Core" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = 1.0 def compute(self, EV): try: self.val = 160 * EV("ASSISTS.SSE_AVX_MIX", 5) / CLKS(self, EV, 5) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "Mixing_Vectors zero division") return self.val desc = """ This metric estimates penalty in terms of percentage of cycles. Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic.""" class Ports_Utilized_1: name = "Ports_Utilized_1" domain = "Clocks" area = "BE/Core" level = 4 htoff = False sample = ['EXE_ACTIVITY.1_PORTS_UTIL'] errcount = 0 sibling = None metricgroup = frozenset(['PortsUtil']) maxval = None def compute(self, EV): try: self.val = EV("EXE_ACTIVITY.1_PORTS_UTIL", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Ports_Utilized_1 zero division") return self.val desc = """ This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). This can be due to heavy data-dependency among software instructions; or over oversubscribing a particular hardware resource. In some other cases with high 1_Port_Utilized and L1_Bound; this metric can point to L1 data-cache latency bottleneck that may not necessarily manifest with complete execution starvation (due to the short L1 latency e.g. walking a linked list) - looking at the assembly can be helpful.""" class Ports_Utilized_2: name = "Ports_Utilized_2" domain = "Clocks" area = "BE/Core" level = 4 htoff = False sample = ['EXE_ACTIVITY.2_PORTS_UTIL'] errcount = 0 sibling = None metricgroup = frozenset(['PortsUtil']) maxval = None def compute(self, EV): try: self.val = EV("EXE_ACTIVITY.2_PORTS_UTIL", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.15) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Ports_Utilized_2 zero division") return self.val desc = """ This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto- Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop.""" class Ports_Utilized_3m: name = "Ports_Utilized_3m" domain = "Clocks" area = "BE/Core" level = 4 htoff = False sample = ['UOPS_EXECUTED.CYCLES_GE_3'] errcount = 0 sibling = None metricgroup = frozenset(['BvCB', 'PortsUtil']) maxval = None def compute(self, EV): try: self.val = EV("UOPS_EXECUTED.CYCLES_GE_3", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.4) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Ports_Utilized_3m zero division") return self.val desc = """ This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise).""" class ALU_Op_Utilization: name = "ALU_Op_Utilization" domain = "Core_Execution" area = "BE/Core" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = (EV("UOPS_DISPATCHED.PORT_0", 5) + EV("UOPS_DISPATCHED.PORT_1", 5) + EV("UOPS_DISPATCHED.PORT_5_11", 5) + EV("UOPS_DISPATCHED.PORT_6", 5)) / (5 * CORE_CLKS(self, EV, 5)) self.thresh = (self.val > 0.4) except ZeroDivisionError: handle_error(self, "ALU_Op_Utilization zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.""" class Port_0: name = "Port_0" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED.PORT_0'] errcount = 0 sibling = None metricgroup = frozenset(['Compute']) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED.PORT_0", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_0 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ALU and 2nd branch""" class Port_1: name = "Port_1" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED.PORT_1'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED.PORT_1", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_1 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU)""" class Port_6: name = "Port_6" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED.PORT_6'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED.PORT_6", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_6 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 Primary Branch and simple ALU""" class Load_Op_Utilization: name = "Load_Op_Utilization" domain = "Core_Execution" area = "BE/Core" level = 5 htoff = False sample = ['UOPS_DISPATCHED.PORT_2_3_10'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED.PORT_2_3_10", 5) / (3 * CORE_CLKS(self, EV, 5)) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Load_Op_Utilization zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations""" class Store_Op_Utilization: name = "Store_Op_Utilization" domain = "Core_Execution" area = "BE/Core" level = 5 htoff = False sample = ['UOPS_DISPATCHED.PORT_7_8'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = (EV("UOPS_DISPATCHED.PORT_4_9", 5) + EV("UOPS_DISPATCHED.PORT_7_8", 5)) / (4 * CORE_CLKS(self, EV, 5)) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Store_Op_Utilization zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port for Store operations""" class Retiring: name = "Retiring" domain = "Slots" area = "RET" level = 1 htoff = False sample = ['UOPS_RETIRED.SLOTS'] errcount = 0 sibling = None metricgroup = frozenset(['BvUW', 'TmaL1']) maxval = None def compute(self, EV): try: self.val = (EV("PERF_METRICS.RETIRING", 1) / EV("TOPDOWN.SLOTS", 1)) / PERF_METRICS_SUM(self, EV, 1) if topdown_use_fixed else EV("UOPS_RETIRED.SLOTS", 1) / SLOTS(self, EV, 1) self.thresh = (self.val > 0.7) or self.Heavy_Operations.thresh except ZeroDivisionError: handle_error(self, "Retiring zero division") return self.val desc = """ This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved. Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. . A high Retiring value for non-vectorized code may be a good hint for programmer to consider vectorizing his code. Doing so essentially lets more computations be done without significantly increasing number of instructions thus improving the performance.""" class Light_Operations: name = "Light_Operations" domain = "Slots" area = "RET" level = 2 htoff = False sample = ['INST_RETIRED.PREC_DIST'] errcount = 0 sibling = None metricgroup = frozenset(['Retire', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = max(0 , self.Retiring.compute(EV) - self.Heavy_Operations.compute(EV)) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Light_Operations zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. .. Focus on techniques that reduce instruction count or result in more efficient instructions generation such as vectorization.""" class FP_Arith: name = "FP_Arith" domain = "Uops" area = "RET" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['HPC']) maxval = None def compute(self, EV): try: self.val = self.X87_Use.compute(EV) + self.FP_Scalar.compute(EV) + self.FP_Vector.compute(EV) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "FP_Arith zero division") return self.val desc = """ This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.""" class X87_Use: name = "X87_Use" domain = "Uops" area = "RET" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Compute']) maxval = None def compute(self, EV): try: self.val = self.Retiring.compute(EV) * EV("UOPS_EXECUTED.X87", 4) / EV("UOPS_EXECUTED.THREAD", 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "X87_Use zero division") return self.val desc = """ This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.. Tip: consider compiler flags to generate newer AVX (or SSE) instruction sets; which typically perform better and feature vectors.""" class FP_Scalar: name = "FP_Scalar" domain = "Uops" area = "RET" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Compute', 'Flops']) maxval = None def compute(self, EV): try: self.val = FP_Arith_Scalar(self, EV, 4) / Retired_Slots(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "FP_Scalar zero division") return self.val desc = """ This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting.. Investigate what limits (compiler) generation of vector code.""" class FP_Vector: name = "FP_Vector" domain = "Uops" area = "RET" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Compute', 'Flops']) maxval = 1.0 def compute(self, EV): try: self.val = FP_Arith_Vector(self, EV, 4) / Retired_Slots(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "FP_Vector zero division") return self.val desc = """ This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting.. Check if vector width is expected""" class FP_Vector_128b: name = "FP_Vector_128b" domain = "Uops" area = "RET" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Compute', 'Flops']) maxval = 1.0 def compute(self, EV): try: self.val = (EV("FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", 5) + EV("FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE", 5)) / Retired_Slots(self, EV, 5) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "FP_Vector_128b zero division") return self.val desc = """ This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting.. Try to exploit wider vector length""" class FP_Vector_256b: name = "FP_Vector_256b" domain = "Uops" area = "RET" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Compute', 'Flops']) maxval = 1.0 def compute(self, EV): try: self.val = (EV("FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE", 5) + EV("FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE", 5)) / Retired_Slots(self, EV, 5) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "FP_Vector_256b zero division") return self.val desc = """ This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting.. Try to exploit wider vector length""" class Int_Operations: name = "Int_Operations" domain = "Uops" area = "RET" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Pipeline']) maxval = None def compute(self, EV): try: self.val = self.Int_Vector_128b.compute(EV) + self.Int_Vector_256b.compute(EV) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Int_Operations zero division") return self.val desc = """ This metric represents overall Integer (Int) select operations fraction the CPU has executed (retired). Vector/Matrix Int operations and shuffles are counted. Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain.""" class Int_Vector_128b: name = "Int_Vector_128b" domain = "Uops" area = "RET" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Compute', 'IntVector', 'Pipeline']) maxval = None def compute(self, EV): try: self.val = (EV("INT_VEC_RETIRED.ADD_128", 4) + EV("INT_VEC_RETIRED.VNNI_128", 4)) / Retired_Slots(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Int_Vector_128b zero division") return self.val desc = """ This metric represents 128-bit vector Integer ADD/SUB/SAD or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired.""" class Int_Vector_256b: name = "Int_Vector_256b" domain = "Uops" area = "RET" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Compute', 'IntVector', 'Pipeline']) maxval = None def compute(self, EV): try: self.val = (EV("INT_VEC_RETIRED.ADD_256", 4) + EV("INT_VEC_RETIRED.MUL_256", 4) + EV("INT_VEC_RETIRED.VNNI_256", 4)) / Retired_Slots(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Int_Vector_256b zero division") return self.val desc = """ This metric represents 256-bit vector Integer ADD/SUB/SAD/MUL or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired.""" class Memory_Operations: name = "Memory_Operations" domain = "Slots" area = "RET" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Pipeline']) maxval = None def compute(self, EV): try: self.val = self.Light_Operations.compute(EV) * EV("MEM_UOP_RETIRED.ANY", 3) / Retired_Slots(self, EV, 3) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Memory_Operations zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring memory operations -- uops for memory load or store accesses.""" class Fused_Instructions: name = "Fused_Instructions" domain = "Slots" area = "RET" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Branches', 'BvBO', 'Pipeline']) maxval = None def compute(self, EV): try: self.val = self.Light_Operations.compute(EV) * EV("INST_RETIRED.MACRO_FUSED", 3) / Retired_Slots(self, EV, 3) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Fused_Instructions zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions. CMP+JCC or DEC+JCC are common examples of legacy fusions. {}. See section 'Optimizing for Macro-fusion' in Optimization Manual:""" class Non_Fused_Branches: name = "Non_Fused_Branches" domain = "Slots" area = "RET" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Branches', 'BvBO', 'Pipeline']) maxval = None def compute(self, EV): try: self.val = self.Light_Operations.compute(EV) * (EV("BR_INST_RETIRED.ALL_BRANCHES", 3) - EV("INST_RETIRED.MACRO_FUSED", 3)) / Retired_Slots(self, EV, 3) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Non_Fused_Branches zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused. Non- conditional branches like direct JMP or CALL would count here. Can be used to examine fusible conditional jumps that were not fused.""" class Other_Light_Ops: name = "Other_Light_Ops" domain = "Slots" area = "RET" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Pipeline']) maxval = None def compute(self, EV): try: self.val = max(0 , self.Light_Operations.compute(EV) - Light_Ops_Sum(self, EV, 3)) self.thresh = (self.val > 0.3) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Other_Light_Ops zero division") return self.val desc = """ This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes. May undercount due to FMA double counting""" class Nop_Instructions: name = "Nop_Instructions" domain = "Slots" area = "RET" level = 4 htoff = False sample = ['INST_RETIRED.NOP'] errcount = 0 sibling = None metricgroup = frozenset(['BvBO', 'Pipeline']) maxval = None def compute(self, EV): try: self.val = self.Light_Operations.compute(EV) * EV("INST_RETIRED.NOP", 4) / Retired_Slots(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Nop_Instructions zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body.. Improve Codegen by correctly placing NOPs outside hot sections (e.g. outside loop body).""" class Shuffles_256b: name = "Shuffles_256b" domain = "Slots" area = "RET" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['HPC', 'Pipeline']) maxval = None def compute(self, EV): try: self.val = self.Light_Operations.compute(EV) * EV("INT_VEC_RETIRED.SHUFFLES", 4) / Retired_Slots(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Shuffles_256b zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring Shuffle operations of 256-bit vector size (FP or Integer). Shuffles may incur slow cross \"vector lane\" data transfers.""" class Heavy_Operations: name = "Heavy_Operations" domain = "Slots" area = "RET" level = 2 htoff = False sample = ['UOPS_RETIRED.HEAVY'] errcount = 0 sibling = None metricgroup = frozenset(['Retire', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = (EV("PERF_METRICS.HEAVY_OPERATIONS", 2) / EV("TOPDOWN.SLOTS", 2)) / PERF_METRICS_SUM(self, EV, 2) if topdown_use_fixed else EV("UOPS_RETIRED.HEAVY", 2) / SLOTS(self, EV, 2) self.thresh = (self.val > 0.1) except ZeroDivisionError: handle_error(self, "Heavy_Operations zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences. .""" class Few_Uops_Instructions: name = "Few_Uops_Instructions" domain = "Slots" area = "RET" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = max(0 , self.Heavy_Operations.compute(EV) - self.Microcode_Sequencer.compute(EV)) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Few_Uops_Instructions zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring instructions that that are decoder into two or up to five uops. This highly-correlates with the number of uops in such instructions.""" class Microcode_Sequencer: name = "Microcode_Sequencer" domain = "Slots" area = "RET" level = 3 htoff = False sample = ['UOPS_RETIRED.MS'] errcount = 0 sibling = None metricgroup = frozenset(['MicroSeq']) maxval = None def compute(self, EV): try: self.val = EV("UOPS_RETIRED.MS", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Microcode_Sequencer zero division") return self.val desc = """ This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided..""" class Assists: name = "Assists" domain = "Slots_Estimated" area = "RET" level = 4 htoff = False sample = ['ASSISTS.ANY'] errcount = 0 sibling = None metricgroup = frozenset(['BvIO']) maxval = 1.0 def compute(self, EV): try: self.val = Avg_Assist_Cost * EV("ASSISTS.ANY", 4) / SLOTS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Assists zero division") return self.val desc = """ This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases.""" class Page_Faults: name = "Page_Faults" domain = "Slots_Estimated" area = "RET" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = 99 * EV("ASSISTS.PAGE_FAULT", 5) / SLOTS(self, EV, 5) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "Page_Faults zero division") return self.val desc = """ This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Page Faults. A Page Fault may apply on first application access to a memory page. Note operating system handling of page faults accounts for the majority of its cost.""" class FP_Assists: name = "FP_Assists" domain = "Slots_Estimated" area = "RET" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['HPC']) maxval = None def compute(self, EV): try: self.val = 30 * EV("ASSISTS.FP", 5) / SLOTS(self, EV, 5) self.thresh = (self.val > 0.1) except ZeroDivisionError: handle_error(self, "FP_Assists zero division") return self.val desc = """ This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists. FP Assist may apply when working with very small floating point values (so-called Denormals).. Consider DAZ (Denormals Are Zero) and/or FTZ (Flush To Zero) options in your compiler; \"-ffast-math\" with -O2 in GCC for example. This option may improve performance if the denormal values are not critical in your application. Also note that the DAZ and FTZ modes are not compatible with the IEEE Standard 754.. https://www.intel.com/content/www/us/en/develop/docume ntation/vtune-help/top/reference/cpu-metrics-reference/bad- speculation-back-end-bound-pipeline-slots/fp-assists.html""" class AVX_Assists: name = "AVX_Assists" domain = "Slots_Estimated" area = "RET" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['HPC']) maxval = None def compute(self, EV): try: self.val = 63 * EV("ASSISTS.SSE_AVX_MIX", 5) / SLOTS(self, EV, 5) self.thresh = (self.val > 0.1) except ZeroDivisionError: handle_error(self, "AVX_Assists zero division") return self.val desc = """ This metric estimates fraction of slots the CPU retired uops as a result of handing SSE to AVX* or AVX* to SSE transition Assists.""" class CISC: name = "CISC" domain = "Slots" area = "RET" level = 4 htoff = False sample = ['FRONTEND_RETIRED.MS_FLOWS'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = max(0 , self.Microcode_Sequencer.compute(EV) - self.Assists.compute(EV)) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "CISC zero division") return self.val desc = """ This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources.""" class Metric_Mispredictions: name = "Mispredictions" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['Bad', 'BadSpec', 'BrMispredicts', 'BvMP']) sibling = None def compute(self, EV): try: self.val = Mispredictions(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Mispredictions zero division") desc = """ Total pipeline cost of Branch Misprediction related bottlenecks""" class Metric_Big_Code: name = "Big_Code" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvBC', 'BigFootprint', 'Fed', 'Frontend', 'IcMiss', 'MemoryTLB']) sibling = None def compute(self, EV): try: self.val = Big_Code(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Big_Code zero division") desc = """ Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)""" class Metric_Instruction_Fetch_BW: name = "Instruction_Fetch_BW" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvFB', 'Fed', 'FetchBW', 'Frontend']) sibling = None def compute(self, EV): try: self.val = Instruction_Fetch_BW(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Instruction_Fetch_BW zero division") desc = """ Total pipeline cost of instruction fetch bandwidth related bottlenecks (when the front-end could not sustain operations delivery to the back-end)""" class Metric_Cache_Memory_Bandwidth: name = "Cache_Memory_Bandwidth" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvMB', 'Mem', 'MemoryBW', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Cache_Memory_Bandwidth(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Cache_Memory_Bandwidth zero division") desc = """ Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks""" class Metric_Cache_Memory_Latency: name = "Cache_Memory_Latency" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvML', 'Mem', 'MemoryLat', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Cache_Memory_Latency(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Cache_Memory_Latency zero division") desc = """ Total pipeline cost of external Memory- or Cache-Latency related bottlenecks""" class Metric_Memory_Data_TLBs: name = "Memory_Data_TLBs" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvMT', 'Mem', 'MemoryTLB', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Memory_Data_TLBs(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Memory_Data_TLBs zero division") desc = """ Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)""" class Metric_Memory_Synchronization: name = "Memory_Synchronization" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvMS', 'Mem', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Memory_Synchronization(self, EV, 0) self.thresh = (self.val > 10) except ZeroDivisionError: handle_error_metric(self, "Memory_Synchronization zero division") desc = """ Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors)""" class Metric_Compute_Bound_Est: name = "Compute_Bound_Est" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvCB', 'Cor']) sibling = None def compute(self, EV): try: self.val = Compute_Bound_Est(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Compute_Bound_Est zero division") desc = """ Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy.""" class Metric_Irregular_Overhead: name = "Irregular_Overhead" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['Bad', 'BvIO', 'Cor', 'Ret']) sibling = None def compute(self, EV): try: self.val = Irregular_Overhead(self, EV, 0) self.thresh = (self.val > 10) except ZeroDivisionError: handle_error_metric(self, "Irregular_Overhead zero division") desc = """ Total pipeline cost of irregular execution (e.g. FP-assists in HPC, Wait time with work imbalance multithreaded workloads, overhead in system services or virtualized environments)""" class Metric_Other_Bottlenecks: name = "Other_Bottlenecks" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvOB', 'Cor', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Other_Bottlenecks(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Other_Bottlenecks zero division") desc = """ Total pipeline cost of remaining bottlenecks in the back- end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls.""" class Metric_Branching_Overhead: name = "Branching_Overhead" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvBO', 'Ret']) sibling = None def compute(self, EV): try: self.val = Branching_Overhead(self, EV, 0) self.thresh = (self.val > 5) except ZeroDivisionError: handle_error_metric(self, "Branching_Overhead zero division") desc = """ Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA. Examples include function calls; loops and alignments. (A lower bound). Consider Loop Unrolling or function inlining optimizations""" class Metric_Useful_Work: name = "Useful_Work" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvUW', 'Ret']) sibling = None def compute(self, EV): try: self.val = Useful_Work(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Useful_Work zero division") desc = """ Total pipeline cost of \"useful operations\" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead.""" class Metric_Core_Bound_Likely: name = "Core_Bound_Likely" domain = "Metric" maxval = 1.0 errcount = 0 area = "Info.Botlnk.L0" metricgroup = frozenset(['Cor', 'SMT']) sibling = None def compute(self, EV): try: self.val = Core_Bound_Likely(self, EV, 0) self.thresh = (self.val > 0.5) except ZeroDivisionError: handle_error_metric(self, "Core_Bound_Likely zero division") desc = """ Probability of Core Bound bottleneck hidden by SMT-profiling artifacts. Tip: consider analysis with SMT disabled""" class Metric_IPC: name = "IPC" domain = "Metric" maxval = Pipeline_Width + 2 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Ret', 'Summary']) sibling = None def compute(self, EV): try: self.val = IPC(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IPC zero division") desc = """ Instructions Per Cycle (per Logical Processor)""" class Metric_UopPI: name = "UopPI" domain = "Metric" maxval = 2.0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Pipeline', 'Ret', 'Retire']) sibling = None def compute(self, EV): try: self.val = UopPI(self, EV, 0) self.thresh = (self.val > 1.05) except ZeroDivisionError: handle_error_metric(self, "UopPI zero division") desc = """ Uops Per Instruction""" class Metric_UpTB: name = "UpTB" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Branches', 'Fed', 'FetchBW']) sibling = None def compute(self, EV): try: self.val = UpTB(self, EV, 0) self.thresh = self.val < Pipeline_Width * 1.5 except ZeroDivisionError: handle_error_metric(self, "UpTB zero division") desc = """ Uops per taken branch""" class Metric_CPI: name = "CPI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Pipeline', 'Mem']) sibling = None def compute(self, EV): try: self.val = CPI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CPI zero division") desc = """ Cycles Per Instruction (per Logical Processor)""" class Metric_CLKS: name = "CLKS" domain = "Count" maxval = 0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Pipeline']) sibling = None def compute(self, EV): try: self.val = CLKS(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CLKS zero division") desc = """ Per-Logical Processor actual clocks when the Logical Processor is active.""" class Metric_SLOTS: name = "SLOTS" domain = "Count" maxval = 0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['TmaL1']) sibling = None def compute(self, EV): try: self.val = SLOTS(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "SLOTS zero division") desc = """ Total issue-pipeline slots (per-Physical Core till ICL; per- Logical Processor ICL onward)""" class Metric_Slots_Utilization: name = "Slots_Utilization" domain = "Metric" maxval = 1.0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['SMT', 'TmaL1']) sibling = None def compute(self, EV): try: self.val = Slots_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Slots_Utilization zero division") desc = """ Fraction of Physical Core issue-slots utilized by this Logical Processor""" class Metric_Execute_per_Issue: name = "Execute_per_Issue" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Cor', 'Pipeline']) sibling = None def compute(self, EV): try: self.val = Execute_per_Issue(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Execute_per_Issue zero division") desc = """ The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage.""" class Metric_CoreIPC: name = "CoreIPC" domain = "Core_Metric" maxval = Pipeline_Width + 2 errcount = 0 area = "Info.Core" metricgroup = frozenset(['Ret', 'SMT', 'TmaL1']) sibling = None def compute(self, EV): try: self.val = CoreIPC(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CoreIPC zero division") desc = """ Instructions Per Cycle across hyper-threads (per physical core)""" class Metric_FLOPc: name = "FLOPc" domain = "Core_Metric" maxval = 10.0 errcount = 0 area = "Info.Core" metricgroup = frozenset(['Ret', 'Flops']) sibling = None def compute(self, EV): try: self.val = FLOPc(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "FLOPc zero division") desc = """ Floating Point Operations Per Cycle""" class Metric_FP_Arith_Utilization: name = "FP_Arith_Utilization" domain = "Core_Metric" maxval = 2.0 errcount = 0 area = "Info.Core" metricgroup = frozenset(['Cor', 'Flops', 'HPC']) sibling = None def compute(self, EV): try: self.val = FP_Arith_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "FP_Arith_Utilization zero division") desc = """ Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to Fused-Multiply Add use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common.""" class Metric_ILP: name = "ILP" domain = "Metric" maxval = Exe_Ports errcount = 0 area = "Info.Core" metricgroup = frozenset(['Backend', 'Cor', 'Pipeline', 'PortsUtil']) sibling = None def compute(self, EV): try: self.val = ILP(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "ILP zero division") desc = """ Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical- processor)""" class Metric_EPC: name = "EPC" domain = "Metric" maxval = 20.0 errcount = 0 area = "Info.Core" metricgroup = frozenset(['Power']) sibling = None def compute(self, EV): try: self.val = EPC(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "EPC zero division") desc = """ uops Executed per Cycle""" class Metric_CORE_CLKS: name = "CORE_CLKS" domain = "Count" maxval = 0 errcount = 0 area = "Info.Core" metricgroup = frozenset(['SMT']) sibling = None def compute(self, EV): try: self.val = CORE_CLKS(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CORE_CLKS zero division") desc = """ Core actual clocks when any Logical Processor is active on the Physical Core""" class Metric_IpLoad: name = "IpLoad" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['InsType']) sibling = None def compute(self, EV): try: self.val = IpLoad(self, EV, 0) self.thresh = (self.val < 3) except ZeroDivisionError: handle_error_metric(self, "IpLoad zero division") desc = """ Instructions per Load (lower number means higher occurrence rate). Tip: reduce memory accesses.""" class Metric_IpStore: name = "IpStore" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['InsType']) sibling = None def compute(self, EV): try: self.val = IpStore(self, EV, 0) self.thresh = (self.val < 8) except ZeroDivisionError: handle_error_metric(self, "IpStore zero division") desc = """ Instructions per Store (lower number means higher occurrence rate). Tip: reduce memory accesses.""" class Metric_IpBranch: name = "IpBranch" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Branches', 'Fed', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpBranch(self, EV, 0) self.thresh = (self.val < 8) except ZeroDivisionError: handle_error_metric(self, "IpBranch zero division") desc = """ Instructions per Branch (lower number means higher occurrence rate)""" class Metric_IpCall: name = "IpCall" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Branches', 'Fed', 'PGO']) sibling = None def compute(self, EV): try: self.val = IpCall(self, EV, 0) self.thresh = (self.val < 200) except ZeroDivisionError: handle_error_metric(self, "IpCall zero division") desc = """ Instructions per (near) call (lower number means higher occurrence rate)""" class Metric_IpTB: name = "IpTB" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Branches', 'Fed', 'FetchBW', 'Frontend', 'PGO']) sibling = None def compute(self, EV): try: self.val = IpTB(self, EV, 0) self.thresh = self.val < Pipeline_Width * 2 + 1 except ZeroDivisionError: handle_error_metric(self, "IpTB zero division") desc = """ Instructions per taken branch""" class Metric_BpTkBranch: name = "BpTkBranch" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Branches', 'Fed', 'PGO']) sibling = None def compute(self, EV): try: self.val = BpTkBranch(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "BpTkBranch zero division") desc = """ Branch instructions per taken branch. . Can be used to approximate PGO-likelihood for non-loopy codes.""" class Metric_IpFLOP: name = "IpFLOP" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Flops', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpFLOP(self, EV, 0) self.thresh = (self.val < 10) except ZeroDivisionError: handle_error_metric(self, "IpFLOP zero division") desc = """ Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate). Reference: Tuning Performance via Metrics with Expectations. https://doi.org/10.1109/LCA.2019.2916408""" class Metric_IpArith: name = "IpArith" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Flops', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpArith(self, EV, 0) self.thresh = (self.val < 10) except ZeroDivisionError: handle_error_metric(self, "IpArith zero division") desc = """ Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW.""" class Metric_IpArith_Scalar_SP: name = "IpArith_Scalar_SP" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Flops', 'FpScalar', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpArith_Scalar_SP(self, EV, 0) self.thresh = (self.val < 10) except ZeroDivisionError: handle_error_metric(self, "IpArith_Scalar_SP zero division") desc = """ Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.""" class Metric_IpArith_Scalar_DP: name = "IpArith_Scalar_DP" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Flops', 'FpScalar', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpArith_Scalar_DP(self, EV, 0) self.thresh = (self.val < 10) except ZeroDivisionError: handle_error_metric(self, "IpArith_Scalar_DP zero division") desc = """ Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.""" class Metric_IpArith_AVX128: name = "IpArith_AVX128" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Flops', 'FpVector', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpArith_AVX128(self, EV, 0) self.thresh = (self.val < 10) except ZeroDivisionError: handle_error_metric(self, "IpArith_AVX128 zero division") desc = """ Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.""" class Metric_IpArith_AVX256: name = "IpArith_AVX256" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Flops', 'FpVector', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpArith_AVX256(self, EV, 0) self.thresh = (self.val < 10) except ZeroDivisionError: handle_error_metric(self, "IpArith_AVX256 zero division") desc = """ Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.""" class Metric_IpPause: name = "IpPause" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Flops', 'FpVector', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpPause(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IpPause zero division") desc = """ Instructions per PAUSE (lower number means higher occurrence rate)""" class Metric_IpSWPF: name = "IpSWPF" domain = "Inst_Metric" maxval = 1000 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Prefetches']) sibling = None def compute(self, EV): try: self.val = IpSWPF(self, EV, 0) self.thresh = (self.val < 100) except ZeroDivisionError: handle_error_metric(self, "IpSWPF zero division") desc = """ Instructions per Software prefetch instruction (of any type: NTA/T0/T1/T2/Prefetch) (lower number means higher occurrence rate)""" class Metric_Instructions: name = "Instructions" domain = "Count" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Summary', 'TmaL1']) sibling = None def compute(self, EV): try: self.val = Instructions(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Instructions zero division") desc = """ Total number of retired Instructions""" class Metric_Retire: name = "Retire" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Pipeline" metricgroup = frozenset(['Pipeline', 'Ret']) sibling = None def compute(self, EV): try: self.val = Retire(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Retire zero division") desc = """ Average number of Uops retired in cycles where at least one uop has retired.""" class Metric_Strings_Cycles: name = "Strings_Cycles" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Pipeline" metricgroup = frozenset(['MicroSeq', 'Pipeline', 'Ret']) sibling = None def compute(self, EV): try: self.val = Strings_Cycles(self, EV, 0) self.thresh = (self.val > 0.1) except ZeroDivisionError: handle_error_metric(self, "Strings_Cycles zero division") desc = """ Estimated fraction of retirement-cycles dealing with repeat instructions""" class Metric_IpAssist: name = "IpAssist" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Pipeline" metricgroup = frozenset(['MicroSeq', 'Pipeline', 'Ret', 'Retire']) sibling = None def compute(self, EV): try: self.val = IpAssist(self, EV, 0) self.thresh = (self.val < 100000) except ZeroDivisionError: handle_error_metric(self, "IpAssist zero division") desc = """ Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate)""" class Metric_Execute: name = "Execute" domain = "Metric" maxval = Exe_Ports errcount = 0 area = "Info.Pipeline" metricgroup = frozenset(['Cor', 'Pipeline', 'PortsUtil', 'SMT']) sibling = None def compute(self, EV): try: self.val = Execute(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Execute zero division") desc = """ """ class Metric_Fetch_LSD: name = "Fetch_LSD" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Pipeline" metricgroup = frozenset(['Fed', 'FetchBW']) sibling = None def compute(self, EV): try: self.val = Fetch_LSD(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Fetch_LSD zero division") desc = """ Average number of uops fetched from LSD per cycle""" class Metric_Fetch_DSB: name = "Fetch_DSB" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Pipeline" metricgroup = frozenset(['Fed', 'FetchBW']) sibling = None def compute(self, EV): try: self.val = Fetch_DSB(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Fetch_DSB zero division") desc = """ Average number of uops fetched from DSB per cycle""" class Metric_Fetch_MITE: name = "Fetch_MITE" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Pipeline" metricgroup = frozenset(['Fed', 'FetchBW']) sibling = None def compute(self, EV): try: self.val = Fetch_MITE(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Fetch_MITE zero division") desc = """ Average number of uops fetched from MITE per cycle""" class Metric_Fetch_UpC: name = "Fetch_UpC" domain = "Metric" maxval = 6.0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['Fed', 'FetchBW']) sibling = None def compute(self, EV): try: self.val = Fetch_UpC(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Fetch_UpC zero division") desc = """ Average number of Uops issued by front-end when it issued something""" class Metric_LSD_Coverage: name = "LSD_Coverage" domain = "Metric" maxval = 1.0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['Fed', 'LSD']) sibling = None def compute(self, EV): try: self.val = LSD_Coverage(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "LSD_Coverage zero division") desc = """ Fraction of Uops delivered by the LSD (Loop Stream Detector; aka Loop Cache)""" class Metric_DSB_Coverage: name = "DSB_Coverage" domain = "Metric" maxval = 1.0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['DSB', 'Fed', 'FetchBW']) sibling = None def compute(self, EV): try: self.val = DSB_Coverage(self, EV, 0) self.thresh = (self.val < 0.7) and HighIPC(self, EV, 1) except ZeroDivisionError: handle_error_metric(self, "DSB_Coverage zero division") desc = """ Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). See section 'Decoded ICache' in Optimization Manual. http://www.intel.com/content/www/us/en/architecture- and-technology/64-ia-32-architectures-optimization- manual.html""" class Metric_Unknown_Branch_Cost: name = "Unknown_Branch_Cost" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['Fed']) sibling = None def compute(self, EV): try: self.val = Unknown_Branch_Cost(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Unknown_Branch_Cost zero division") desc = """ Average number of cycles the front-end was delayed due to an Unknown Branch detection. See Unknown_Branches node.""" class Metric_DSB_Switch_Cost: name = "DSB_Switch_Cost" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['DSBmiss']) sibling = None def compute(self, EV): try: self.val = DSB_Switch_Cost(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "DSB_Switch_Cost zero division") desc = """ Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.""" class Metric_DSB_Misses: name = "DSB_Misses" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Botlnk.L2" metricgroup = frozenset(['DSBmiss', 'Fed']) sibling = None def compute(self, EV): try: self.val = DSB_Misses(self, EV, 0) self.thresh = (self.val > 10) except ZeroDivisionError: handle_error_metric(self, "DSB_Misses zero division") desc = """ Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck.""" class Metric_DSB_Bandwidth: name = "DSB_Bandwidth" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Botlnk.L2" metricgroup = frozenset(['DSB', 'FetchBW']) sibling = None def compute(self, EV): try: self.val = DSB_Bandwidth(self, EV, 0) self.thresh = (self.val > 10) except ZeroDivisionError: handle_error_metric(self, "DSB_Bandwidth zero division") desc = """ Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck.""" class Metric_ICache_Miss_Latency: name = "ICache_Miss_Latency" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['Fed', 'FetchLat', 'IcMiss']) sibling = None def compute(self, EV): try: self.val = ICache_Miss_Latency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "ICache_Miss_Latency zero division") desc = """ Average Latency for L1 instruction cache misses""" class Metric_IC_Misses: name = "IC_Misses" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Botlnk.L2" metricgroup = frozenset(['Fed', 'FetchLat', 'IcMiss']) sibling = None def compute(self, EV): try: self.val = IC_Misses(self, EV, 0) self.thresh = (self.val > 5) except ZeroDivisionError: handle_error_metric(self, "IC_Misses zero division") desc = """ Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck.""" class Metric_IpDSB_Miss_Ret: name = "IpDSB_Miss_Ret" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['DSBmiss', 'Fed']) sibling = None def compute(self, EV): try: self.val = IpDSB_Miss_Ret(self, EV, 0) self.thresh = (self.val < 50) except ZeroDivisionError: handle_error_metric(self, "IpDSB_Miss_Ret zero division") desc = """ Instructions per non-speculative DSB miss (lower number means higher occurrence rate)""" class Metric_IpUnknown_Branch: name = "IpUnknown_Branch" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['Fed']) sibling = None def compute(self, EV): try: self.val = IpUnknown_Branch(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IpUnknown_Branch zero division") desc = """ Instructions per speculative Unknown Branch Misprediction (BAClear) (lower number means higher occurrence rate)""" class Metric_L2MPKI_Code: name = "L2MPKI_Code" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['IcMiss']) sibling = None def compute(self, EV): try: self.val = L2MPKI_Code(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2MPKI_Code zero division") desc = """ L2 cache true code cacheline misses per kilo instruction""" class Metric_L2MPKI_Code_All: name = "L2MPKI_Code_All" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['IcMiss']) sibling = None def compute(self, EV): try: self.val = L2MPKI_Code_All(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2MPKI_Code_All zero division") desc = """ L2 cache speculative code cacheline misses per kilo instruction""" class Metric_IpMispredict: name = "IpMispredict" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Bad_Spec" metricgroup = frozenset(['Bad', 'BadSpec', 'BrMispredicts']) sibling = None def compute(self, EV): try: self.val = IpMispredict(self, EV, 0) self.thresh = (self.val < 200) except ZeroDivisionError: handle_error_metric(self, "IpMispredict zero division") desc = """ Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)""" class Metric_IpMisp_Cond_Ntaken: name = "IpMisp_Cond_Ntaken" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Bad_Spec" metricgroup = frozenset(['Bad', 'BrMispredicts']) sibling = None def compute(self, EV): try: self.val = IpMisp_Cond_Ntaken(self, EV, 0) self.thresh = (self.val < 200) except ZeroDivisionError: handle_error_metric(self, "IpMisp_Cond_Ntaken zero division") desc = """ Instructions per retired Mispredicts for conditional non- taken branches (lower number means higher occurrence rate).""" class Metric_IpMisp_Cond_Taken: name = "IpMisp_Cond_Taken" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Bad_Spec" metricgroup = frozenset(['Bad', 'BrMispredicts']) sibling = None def compute(self, EV): try: self.val = IpMisp_Cond_Taken(self, EV, 0) self.thresh = (self.val < 200) except ZeroDivisionError: handle_error_metric(self, "IpMisp_Cond_Taken zero division") desc = """ Instructions per retired Mispredicts for conditional taken branches (lower number means higher occurrence rate).""" class Metric_IpMisp_Ret: name = "IpMisp_Ret" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Bad_Spec" metricgroup = frozenset(['Bad', 'BrMispredicts']) sibling = None def compute(self, EV): try: self.val = IpMisp_Ret(self, EV, 0) self.thresh = (self.val < 500) except ZeroDivisionError: handle_error_metric(self, "IpMisp_Ret zero division") desc = """ Instructions per retired Mispredicts for return branches (lower number means higher occurrence rate).""" class Metric_IpMisp_Indirect: name = "IpMisp_Indirect" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Bad_Spec" metricgroup = frozenset(['Bad', 'BrMispredicts']) sibling = None def compute(self, EV): try: self.val = IpMisp_Indirect(self, EV, 0) self.thresh = (self.val < 1000) except ZeroDivisionError: handle_error_metric(self, "IpMisp_Indirect zero division") desc = """ Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).""" class Metric_Branch_Misprediction_Cost: name = "Branch_Misprediction_Cost" domain = "Core_Metric" maxval = 300 errcount = 0 area = "Info.Bad_Spec" metricgroup = frozenset(['Bad', 'BrMispredicts']) sibling = None def compute(self, EV): try: self.val = Branch_Misprediction_Cost(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Branch_Misprediction_Cost zero division") desc = """ Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)""" class Metric_Spec_Clears_Ratio: name = "Spec_Clears_Ratio" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Bad_Spec" metricgroup = frozenset(['BrMispredicts']) sibling = None def compute(self, EV): try: self.val = Spec_Clears_Ratio(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Spec_Clears_Ratio zero division") desc = """ Speculative to Retired ratio of all clears (covering Mispredicts and nukes)""" class Metric_Cond_NT: name = "Cond_NT" domain = "Fraction" maxval = 1.0 errcount = 0 area = "Info.Branches" metricgroup = frozenset(['Bad', 'Branches', 'CodeGen', 'PGO']) sibling = None def compute(self, EV): try: self.val = Cond_NT(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Cond_NT zero division") desc = """ Fraction of branches that are non-taken conditionals""" class Metric_Cond_TK: name = "Cond_TK" domain = "Fraction" maxval = 1.0 errcount = 0 area = "Info.Branches" metricgroup = frozenset(['Bad', 'Branches', 'CodeGen', 'PGO']) sibling = None def compute(self, EV): try: self.val = Cond_TK(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Cond_TK zero division") desc = """ Fraction of branches that are taken conditionals""" class Metric_CallRet: name = "CallRet" domain = "Fraction" maxval = 1.0 errcount = 0 area = "Info.Branches" metricgroup = frozenset(['Bad', 'Branches']) sibling = None def compute(self, EV): try: self.val = CallRet(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CallRet zero division") desc = """ Fraction of branches that are CALL or RET""" class Metric_Jump: name = "Jump" domain = "Fraction" maxval = 1.0 errcount = 0 area = "Info.Branches" metricgroup = frozenset(['Bad', 'Branches']) sibling = None def compute(self, EV): try: self.val = Jump(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Jump zero division") desc = """ Fraction of branches that are unconditional (direct or indirect) jumps""" class Metric_Other_Branches: name = "Other_Branches" domain = "Fraction" maxval = 1.0 errcount = 0 area = "Info.Branches" metricgroup = frozenset(['Bad', 'Branches']) sibling = None def compute(self, EV): try: self.val = Other_Branches(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Other_Branches zero division") desc = """ Fraction of branches of other types (not individually covered by other metrics in Info.Branches group)""" class Metric_Load_Miss_Real_Latency: name = "Load_Miss_Real_Latency" domain = "Clocks_Latency" maxval = 1000 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBound', 'MemoryLat']) sibling = None def compute(self, EV): try: self.val = Load_Miss_Real_Latency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Load_Miss_Real_Latency zero division") desc = """ Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)""" class Metric_MLP: name = "MLP" domain = "Metric" maxval = 10.0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBound', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = MLP(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "MLP zero division") desc = """ Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)""" class Metric_L1MPKI: name = "L1MPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheHits', 'Mem']) sibling = None def compute(self, EV): try: self.val = L1MPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L1MPKI zero division") desc = """ L1 cache true misses per kilo instruction for retired demand loads""" class Metric_L1MPKI_Load: name = "L1MPKI_Load" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheHits', 'Mem']) sibling = None def compute(self, EV): try: self.val = L1MPKI_Load(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L1MPKI_Load zero division") desc = """ L1 cache true misses per kilo instruction for all demand loads (including speculative)""" class Metric_L2MPKI: name = "L2MPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'Backend', 'CacheHits']) sibling = None def compute(self, EV): try: self.val = L2MPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2MPKI zero division") desc = """ L2 cache true misses per kilo instruction for retired demand loads""" class Metric_L2MPKI_All: name = "L2MPKI_All" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheHits', 'Mem', 'Offcore']) sibling = None def compute(self, EV): try: self.val = L2MPKI_All(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2MPKI_All zero division") desc = """ L2 cache true misses per kilo instruction for all request types (including speculative)""" class Metric_L2MPKI_Load: name = "L2MPKI_Load" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheHits', 'Mem']) sibling = None def compute(self, EV): try: self.val = L2MPKI_Load(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2MPKI_Load zero division") desc = """ L2 cache true misses per kilo instruction for all demand loads (including speculative)""" class Metric_L2MPKI_RFO: name = "L2MPKI_RFO" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheMisses', 'Offcore']) sibling = None def compute(self, EV): try: self.val = L2MPKI_RFO(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2MPKI_RFO zero division") desc = """ Offcore requests (L2 cache miss) per kilo instruction for demand RFOs""" class Metric_L2HPKI_All: name = "L2HPKI_All" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheHits', 'Mem']) sibling = None def compute(self, EV): try: self.val = L2HPKI_All(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2HPKI_All zero division") desc = """ L2 cache hits per kilo instruction for all request types (including speculative)""" class Metric_L2HPKI_Load: name = "L2HPKI_Load" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheHits', 'Mem']) sibling = None def compute(self, EV): try: self.val = L2HPKI_Load(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2HPKI_Load zero division") desc = """ L2 cache hits per kilo instruction for all demand loads (including speculative)""" class Metric_L3MPKI: name = "L3MPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem']) sibling = None def compute(self, EV): try: self.val = L3MPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L3MPKI zero division") desc = """ L3 cache true misses per kilo instruction for retired demand loads""" class Metric_FB_HPKI: name = "FB_HPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheHits', 'Mem']) sibling = None def compute(self, EV): try: self.val = FB_HPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "FB_HPKI zero division") desc = """ Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss- handling entries)""" class Metric_L1D_Cache_Fill_BW: name = "L1D_Cache_Fill_BW" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L1D_Cache_Fill_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L1D_Cache_Fill_BW zero division") desc = """ """ class Metric_L2_Cache_Fill_BW: name = "L2_Cache_Fill_BW" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L2_Cache_Fill_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2_Cache_Fill_BW zero division") desc = """ """ class Metric_L3_Cache_Fill_BW: name = "L3_Cache_Fill_BW" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L3_Cache_Fill_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L3_Cache_Fill_BW zero division") desc = """ """ class Metric_L3_Cache_Access_BW: name = "L3_Cache_Access_BW" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBW', 'Offcore']) sibling = None def compute(self, EV): try: self.val = L3_Cache_Access_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L3_Cache_Access_BW zero division") desc = """ """ class Metric_Page_Walks_Utilization: name = "Page_Walks_Utilization" domain = "Core_Metric" maxval = 1.0 errcount = 0 area = "Info.Memory.TLB" metricgroup = frozenset(['Mem', 'MemoryTLB']) sibling = None def compute(self, EV): try: self.val = Page_Walks_Utilization(self, EV, 0) self.thresh = (self.val > 0.5) except ZeroDivisionError: handle_error_metric(self, "Page_Walks_Utilization zero division") desc = """ Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses""" class Metric_Code_STLB_MPKI: name = "Code_STLB_MPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory.TLB" metricgroup = frozenset(['Fed', 'MemoryTLB']) sibling = None def compute(self, EV): try: self.val = Code_STLB_MPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Code_STLB_MPKI zero division") desc = """ STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)""" class Metric_Load_STLB_MPKI: name = "Load_STLB_MPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory.TLB" metricgroup = frozenset(['Mem', 'MemoryTLB']) sibling = None def compute(self, EV): try: self.val = Load_STLB_MPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Load_STLB_MPKI zero division") desc = """ STLB (2nd level TLB) data load speculative misses per kilo instruction (misses of any page-size that complete the page walk)""" class Metric_Store_STLB_MPKI: name = "Store_STLB_MPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory.TLB" metricgroup = frozenset(['Mem', 'MemoryTLB']) sibling = None def compute(self, EV): try: self.val = Store_STLB_MPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Store_STLB_MPKI zero division") desc = """ STLB (2nd level TLB) data store speculative misses per kilo instruction (misses of any page-size that complete the page walk)""" class Metric_L1D_Cache_Fill_BW_2T: name = "L1D_Cache_Fill_BW_2T" domain = "Core_Metric" maxval = 0 errcount = 0 area = "Info.Memory.Core" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L1D_Cache_Fill_BW_2T(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L1D_Cache_Fill_BW_2T zero division") desc = """ Average per-core data fill bandwidth to the L1 data cache [GB / sec]""" class Metric_L2_Cache_Fill_BW_2T: name = "L2_Cache_Fill_BW_2T" domain = "Core_Metric" maxval = 0 errcount = 0 area = "Info.Memory.Core" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L2_Cache_Fill_BW_2T(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2_Cache_Fill_BW_2T zero division") desc = """ Average per-core data fill bandwidth to the L2 cache [GB / sec]""" class Metric_L3_Cache_Fill_BW_2T: name = "L3_Cache_Fill_BW_2T" domain = "Core_Metric" maxval = 0 errcount = 0 area = "Info.Memory.Core" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L3_Cache_Fill_BW_2T(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L3_Cache_Fill_BW_2T zero division") desc = """ Average per-core data fill bandwidth to the L3 cache [GB / sec]""" class Metric_L3_Cache_Access_BW_2T: name = "L3_Cache_Access_BW_2T" domain = "Core_Metric" maxval = 0 errcount = 0 area = "Info.Memory.Core" metricgroup = frozenset(['Mem', 'MemoryBW', 'Offcore']) sibling = None def compute(self, EV): try: self.val = L3_Cache_Access_BW_2T(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L3_Cache_Access_BW_2T zero division") desc = """ Average per-core data access bandwidth to the L3 cache [GB / sec]""" class Metric_Load_L2_Miss_Latency: name = "Load_L2_Miss_Latency" domain = "Clocks_Latency" maxval = 1000 errcount = 0 area = "Info.Memory.Latency" metricgroup = frozenset(['Memory_Lat', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Load_L2_Miss_Latency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Load_L2_Miss_Latency zero division") desc = """ Average Latency for L2 cache miss demand Loads""" class Metric_Load_L3_Miss_Latency: name = "Load_L3_Miss_Latency" domain = "Clocks_Latency" maxval = 1000 errcount = 0 area = "Info.Memory.Latency" metricgroup = frozenset(['Memory_Lat', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Load_L3_Miss_Latency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Load_L3_Miss_Latency zero division") desc = """ Average Latency for L3 cache miss demand Loads""" class Metric_Load_L2_MLP: name = "Load_L2_MLP" domain = "Metric" maxval = 100 errcount = 0 area = "Info.Memory.Latency" metricgroup = frozenset(['Memory_BW', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Load_L2_MLP(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Load_L2_MLP zero division") desc = """ Average Parallel L2 cache miss demand Loads""" class Metric_Data_L2_MLP: name = "Data_L2_MLP" domain = "Metric" maxval = 100 errcount = 0 area = "Info.Memory.Latency" metricgroup = frozenset(['Memory_BW', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Data_L2_MLP(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Data_L2_MLP zero division") desc = """ Average Parallel L2 cache miss data reads""" class Metric_UC_Load_PKI: name = "UC_Load_PKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory.Mix" metricgroup = frozenset(['Mem']) sibling = None def compute(self, EV): try: self.val = UC_Load_PKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "UC_Load_PKI zero division") desc = """ Un-cacheable retired load per kilo instruction""" class Metric_Bus_Lock_PKI: name = "Bus_Lock_PKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory.Mix" metricgroup = frozenset(['Mem']) sibling = None def compute(self, EV): try: self.val = Bus_Lock_PKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Bus_Lock_PKI zero division") desc = """ \"Bus lock\" per kilo instruction""" class Metric_CPU_Utilization: name = "CPU_Utilization" domain = "Metric" maxval = 1 errcount = 0 area = "Info.System" metricgroup = frozenset(['HPC', 'Summary']) sibling = None def compute(self, EV): try: self.val = CPU_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CPU_Utilization zero division") desc = """ Average CPU Utilization (percentage)""" class Metric_CPUs_Utilized: name = "CPUs_Utilized" domain = "Metric" maxval = 300 errcount = 0 area = "Info.System" metricgroup = frozenset(['Summary']) sibling = None def compute(self, EV): try: self.val = CPUs_Utilized(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CPUs_Utilized zero division") desc = """ Average number of utilized CPUs""" class Metric_Core_Frequency: name = "Core_Frequency" domain = "SystemMetric" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Summary', 'Power']) sibling = None def compute(self, EV): try: self.val = Core_Frequency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Core_Frequency zero division") desc = """ Measured Average Core Frequency for unhalted processors [GHz]""" class Metric_GFLOPs: name = "GFLOPs" domain = "Metric" maxval = 200 errcount = 0 area = "Info.System" metricgroup = frozenset(['Cor', 'Flops', 'HPC']) sibling = None def compute(self, EV): try: self.val = GFLOPs(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "GFLOPs zero division") desc = """ Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width""" class Metric_Turbo_Utilization: name = "Turbo_Utilization" domain = "Core_Metric" maxval = 10.0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Power']) sibling = None def compute(self, EV): try: self.val = Turbo_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Turbo_Utilization zero division") desc = """ Average Frequency Utilization relative nominal frequency""" class Metric_SMT_2T_Utilization: name = "SMT_2T_Utilization" domain = "Core_Metric" maxval = 1.0 errcount = 0 area = "Info.System" metricgroup = frozenset(['SMT']) sibling = None def compute(self, EV): try: self.val = SMT_2T_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "SMT_2T_Utilization zero division") desc = """ Fraction of cycles where both hardware Logical Processors were active""" class Metric_Kernel_Utilization: name = "Kernel_Utilization" domain = "Metric" maxval = 1.0 errcount = 0 area = "Info.System" metricgroup = frozenset(['OS']) sibling = None def compute(self, EV): try: self.val = Kernel_Utilization(self, EV, 0) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error_metric(self, "Kernel_Utilization zero division") desc = """ Fraction of cycles spent in the Operating System (OS) Kernel mode""" class Metric_Kernel_CPI: name = "Kernel_CPI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['OS']) sibling = None def compute(self, EV): try: self.val = Kernel_CPI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Kernel_CPI zero division") desc = """ Cycles Per Instruction for the Operating System (OS) Kernel mode""" class Metric_C0_Wait: name = "C0_Wait" domain = "Metric" maxval = 1.0 errcount = 0 area = "Info.System" metricgroup = frozenset(['C0Wait']) sibling = None def compute(self, EV): try: self.val = C0_Wait(self, EV, 0) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error_metric(self, "C0_Wait zero division") desc = """ Fraction of cycles the processor is waiting yet unhalted; covering legacy PAUSE instruction, as well as C0.1 / C0.2 power-performance optimized states. Sample code of TPAUSE: h ttps://github.com/torvalds/linux/blob/master/arch/x86/lib/de lay.c""" class Metric_DRAM_BW_Use: name = "DRAM_BW_Use" domain = "GB/sec" maxval = 200 errcount = 0 area = "Info.System" metricgroup = frozenset(['HPC', 'MemOffcore', 'MemoryBW', 'SoC']) sibling = None def compute(self, EV): try: self.val = DRAM_BW_Use(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "DRAM_BW_Use zero division") desc = """ Average external Memory Bandwidth Use for reads and writes [GB / sec]""" class Metric_MEM_Read_Latency: name = "MEM_Read_Latency" domain = "NanoSeconds" maxval = 1000 errcount = 0 area = "Info.System" metricgroup = frozenset(['Mem', 'MemoryLat', 'SoC']) sibling = None def compute(self, EV): try: self.val = MEM_Read_Latency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "MEM_Read_Latency zero division") desc = """ Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches. memory-controller only""" class Metric_MEM_Parallel_Reads: name = "MEM_Parallel_Reads" domain = "SystemMetric" maxval = 100 errcount = 0 area = "Info.System" metricgroup = frozenset(['Mem', 'MemoryBW', 'SoC']) sibling = None def compute(self, EV): try: self.val = MEM_Parallel_Reads(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "MEM_Parallel_Reads zero division") desc = """ Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches""" class Metric_Power: name = "Power" domain = "SystemMetric" maxval = 200 errcount = 0 area = "Info.System" metricgroup = frozenset(['Power', 'SoC']) sibling = None def compute(self, EV): try: self.val = Power(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Power zero division") desc = """ Total package Power in Watts""" class Metric_Time: name = "Time" domain = "Seconds" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Summary']) sibling = None def compute(self, EV): try: self.val = Time(self, EV, 0) self.thresh = (self.val < 1) except ZeroDivisionError: handle_error_metric(self, "Time zero division") desc = """ Run duration time in seconds""" class Metric_Socket_CLKS: name = "Socket_CLKS" domain = "Count" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['SoC']) sibling = None def compute(self, EV): try: self.val = Socket_CLKS(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Socket_CLKS zero division") desc = """ Socket actual clocks when any core is active on that socket""" class Metric_IpFarBranch: name = "IpFarBranch" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Branches', 'OS']) sibling = None def compute(self, EV): try: self.val = IpFarBranch(self, EV, 0) self.thresh = (self.val < 1000000) except ZeroDivisionError: handle_error_metric(self, "IpFarBranch zero division") desc = """ Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]""" # Schedule class Setup: def __init__(self, r): o = dict() n = Frontend_Bound() ; r.run(n) ; o["Frontend_Bound"] = n n = Fetch_Latency() ; r.run(n) ; o["Fetch_Latency"] = n n = ICache_Misses() ; r.run(n) ; o["ICache_Misses"] = n n = ITLB_Misses() ; r.run(n) ; o["ITLB_Misses"] = n n = Branch_Resteers() ; r.run(n) ; o["Branch_Resteers"] = n n = Mispredicts_Resteers() ; r.run(n) ; o["Mispredicts_Resteers"] = n n = Clears_Resteers() ; r.run(n) ; o["Clears_Resteers"] = n n = Unknown_Branches() ; r.run(n) ; o["Unknown_Branches"] = n n = MS_Switches() ; r.run(n) ; o["MS_Switches"] = n n = LCP() ; r.run(n) ; o["LCP"] = n n = DSB_Switches() ; r.run(n) ; o["DSB_Switches"] = n n = Fetch_Bandwidth() ; r.run(n) ; o["Fetch_Bandwidth"] = n n = MITE() ; r.run(n) ; o["MITE"] = n n = Decoder0_Alone() ; r.run(n) ; o["Decoder0_Alone"] = n n = DSB() ; r.run(n) ; o["DSB"] = n n = LSD() ; r.run(n) ; o["LSD"] = n n = Bad_Speculation() ; r.run(n) ; o["Bad_Speculation"] = n n = Branch_Mispredicts() ; r.run(n) ; o["Branch_Mispredicts"] = n n = Other_Mispredicts() ; r.run(n) ; o["Other_Mispredicts"] = n n = Machine_Clears() ; r.run(n) ; o["Machine_Clears"] = n n = Other_Nukes() ; r.run(n) ; o["Other_Nukes"] = n n = Backend_Bound() ; r.run(n) ; o["Backend_Bound"] = n n = Memory_Bound() ; r.run(n) ; o["Memory_Bound"] = n n = L1_Bound() ; r.run(n) ; o["L1_Bound"] = n n = DTLB_Load() ; r.run(n) ; o["DTLB_Load"] = n n = Load_STLB_Hit() ; r.run(n) ; o["Load_STLB_Hit"] = n n = Load_STLB_Miss() ; r.run(n) ; o["Load_STLB_Miss"] = n n = Store_Fwd_Blk() ; r.run(n) ; o["Store_Fwd_Blk"] = n n = L1_Hit_Latency() ; r.run(n) ; o["L1_Hit_Latency"] = n n = Lock_Latency() ; r.run(n) ; o["Lock_Latency"] = n n = Split_Loads() ; r.run(n) ; o["Split_Loads"] = n n = FB_Full() ; r.run(n) ; o["FB_Full"] = n n = L2_Bound() ; r.run(n) ; o["L2_Bound"] = n n = L3_Bound() ; r.run(n) ; o["L3_Bound"] = n n = Contested_Accesses() ; r.run(n) ; o["Contested_Accesses"] = n n = Data_Sharing() ; r.run(n) ; o["Data_Sharing"] = n n = L3_Hit_Latency() ; r.run(n) ; o["L3_Hit_Latency"] = n n = SQ_Full() ; r.run(n) ; o["SQ_Full"] = n n = DRAM_Bound() ; r.run(n) ; o["DRAM_Bound"] = n n = MEM_Bandwidth() ; r.run(n) ; o["MEM_Bandwidth"] = n n = MEM_Latency() ; r.run(n) ; o["MEM_Latency"] = n n = Store_Bound() ; r.run(n) ; o["Store_Bound"] = n n = Store_Latency() ; r.run(n) ; o["Store_Latency"] = n n = False_Sharing() ; r.run(n) ; o["False_Sharing"] = n n = Split_Stores() ; r.run(n) ; o["Split_Stores"] = n n = Streaming_Stores() ; r.run(n) ; o["Streaming_Stores"] = n n = DTLB_Store() ; r.run(n) ; o["DTLB_Store"] = n n = Store_STLB_Hit() ; r.run(n) ; o["Store_STLB_Hit"] = n n = Store_STLB_Miss() ; r.run(n) ; o["Store_STLB_Miss"] = n n = Core_Bound() ; r.run(n) ; o["Core_Bound"] = n n = Divider() ; r.run(n) ; o["Divider"] = n n = Serializing_Operation() ; r.run(n) ; o["Serializing_Operation"] = n n = Slow_Pause() ; r.run(n) ; o["Slow_Pause"] = n n = C01_WAIT() ; r.run(n) ; o["C01_WAIT"] = n n = C02_WAIT() ; r.run(n) ; o["C02_WAIT"] = n n = Memory_Fence() ; r.run(n) ; o["Memory_Fence"] = n n = Ports_Utilization() ; r.run(n) ; o["Ports_Utilization"] = n n = Ports_Utilized_0() ; r.run(n) ; o["Ports_Utilized_0"] = n n = Mixing_Vectors() ; r.run(n) ; o["Mixing_Vectors"] = n n = Ports_Utilized_1() ; r.run(n) ; o["Ports_Utilized_1"] = n n = Ports_Utilized_2() ; r.run(n) ; o["Ports_Utilized_2"] = n n = Ports_Utilized_3m() ; r.run(n) ; o["Ports_Utilized_3m"] = n n = ALU_Op_Utilization() ; r.run(n) ; o["ALU_Op_Utilization"] = n n = Port_0() ; r.run(n) ; o["Port_0"] = n n = Port_1() ; r.run(n) ; o["Port_1"] = n n = Port_6() ; r.run(n) ; o["Port_6"] = n n = Load_Op_Utilization() ; r.run(n) ; o["Load_Op_Utilization"] = n n = Store_Op_Utilization() ; r.run(n) ; o["Store_Op_Utilization"] = n n = Retiring() ; r.run(n) ; o["Retiring"] = n n = Light_Operations() ; r.run(n) ; o["Light_Operations"] = n n = FP_Arith() ; r.run(n) ; o["FP_Arith"] = n n = X87_Use() ; r.run(n) ; o["X87_Use"] = n n = FP_Scalar() ; r.run(n) ; o["FP_Scalar"] = n n = FP_Vector() ; r.run(n) ; o["FP_Vector"] = n n = FP_Vector_128b() ; r.run(n) ; o["FP_Vector_128b"] = n n = FP_Vector_256b() ; r.run(n) ; o["FP_Vector_256b"] = n n = Int_Operations() ; r.run(n) ; o["Int_Operations"] = n n = Int_Vector_128b() ; r.run(n) ; o["Int_Vector_128b"] = n n = Int_Vector_256b() ; r.run(n) ; o["Int_Vector_256b"] = n n = Memory_Operations() ; r.run(n) ; o["Memory_Operations"] = n n = Fused_Instructions() ; r.run(n) ; o["Fused_Instructions"] = n n = Non_Fused_Branches() ; r.run(n) ; o["Non_Fused_Branches"] = n n = Other_Light_Ops() ; r.run(n) ; o["Other_Light_Ops"] = n n = Nop_Instructions() ; r.run(n) ; o["Nop_Instructions"] = n n = Shuffles_256b() ; r.run(n) ; o["Shuffles_256b"] = n n = Heavy_Operations() ; r.run(n) ; o["Heavy_Operations"] = n n = Few_Uops_Instructions() ; r.run(n) ; o["Few_Uops_Instructions"] = n n = Microcode_Sequencer() ; r.run(n) ; o["Microcode_Sequencer"] = n n = Assists() ; r.run(n) ; o["Assists"] = n n = Page_Faults() ; r.run(n) ; o["Page_Faults"] = n n = FP_Assists() ; r.run(n) ; o["FP_Assists"] = n n = AVX_Assists() ; r.run(n) ; o["AVX_Assists"] = n n = CISC() ; r.run(n) ; o["CISC"] = n # parents o["Fetch_Latency"].parent = o["Frontend_Bound"] o["ICache_Misses"].parent = o["Fetch_Latency"] o["ITLB_Misses"].parent = o["Fetch_Latency"] o["Branch_Resteers"].parent = o["Fetch_Latency"] o["Mispredicts_Resteers"].parent = o["Branch_Resteers"] o["Clears_Resteers"].parent = o["Branch_Resteers"] o["Unknown_Branches"].parent = o["Branch_Resteers"] o["MS_Switches"].parent = o["Fetch_Latency"] o["LCP"].parent = o["Fetch_Latency"] o["DSB_Switches"].parent = o["Fetch_Latency"] o["Fetch_Bandwidth"].parent = o["Frontend_Bound"] o["MITE"].parent = o["Fetch_Bandwidth"] o["Decoder0_Alone"].parent = o["MITE"] o["DSB"].parent = o["Fetch_Bandwidth"] o["LSD"].parent = o["Fetch_Bandwidth"] o["Branch_Mispredicts"].parent = o["Bad_Speculation"] o["Other_Mispredicts"].parent = o["Branch_Mispredicts"] o["Machine_Clears"].parent = o["Bad_Speculation"] o["Other_Nukes"].parent = o["Machine_Clears"] o["Memory_Bound"].parent = o["Backend_Bound"] o["L1_Bound"].parent = o["Memory_Bound"] o["DTLB_Load"].parent = o["L1_Bound"] o["Load_STLB_Hit"].parent = o["DTLB_Load"] o["Load_STLB_Miss"].parent = o["DTLB_Load"] o["Store_Fwd_Blk"].parent = o["L1_Bound"] o["L1_Hit_Latency"].parent = o["L1_Bound"] o["Lock_Latency"].parent = o["L1_Bound"] o["Split_Loads"].parent = o["L1_Bound"] o["FB_Full"].parent = o["L1_Bound"] o["L2_Bound"].parent = o["Memory_Bound"] o["L3_Bound"].parent = o["Memory_Bound"] o["Contested_Accesses"].parent = o["L3_Bound"] o["Data_Sharing"].parent = o["L3_Bound"] o["L3_Hit_Latency"].parent = o["L3_Bound"] o["SQ_Full"].parent = o["L3_Bound"] o["DRAM_Bound"].parent = o["Memory_Bound"] o["MEM_Bandwidth"].parent = o["DRAM_Bound"] o["MEM_Latency"].parent = o["DRAM_Bound"] o["Store_Bound"].parent = o["Memory_Bound"] o["Store_Latency"].parent = o["Store_Bound"] o["False_Sharing"].parent = o["Store_Bound"] o["Split_Stores"].parent = o["Store_Bound"] o["Streaming_Stores"].parent = o["Store_Bound"] o["DTLB_Store"].parent = o["Store_Bound"] o["Store_STLB_Hit"].parent = o["DTLB_Store"] o["Store_STLB_Miss"].parent = o["DTLB_Store"] o["Core_Bound"].parent = o["Backend_Bound"] o["Divider"].parent = o["Core_Bound"] o["Serializing_Operation"].parent = o["Core_Bound"] o["Slow_Pause"].parent = o["Serializing_Operation"] o["C01_WAIT"].parent = o["Serializing_Operation"] o["C02_WAIT"].parent = o["Serializing_Operation"] o["Memory_Fence"].parent = o["Serializing_Operation"] o["Ports_Utilization"].parent = o["Core_Bound"] o["Ports_Utilized_0"].parent = o["Ports_Utilization"] o["Mixing_Vectors"].parent = o["Ports_Utilized_0"] o["Ports_Utilized_1"].parent = o["Ports_Utilization"] o["Ports_Utilized_2"].parent = o["Ports_Utilization"] o["Ports_Utilized_3m"].parent = o["Ports_Utilization"] o["ALU_Op_Utilization"].parent = o["Ports_Utilized_3m"] o["Port_0"].parent = o["ALU_Op_Utilization"] o["Port_1"].parent = o["ALU_Op_Utilization"] o["Port_6"].parent = o["ALU_Op_Utilization"] o["Load_Op_Utilization"].parent = o["Ports_Utilized_3m"] o["Store_Op_Utilization"].parent = o["Ports_Utilized_3m"] o["Light_Operations"].parent = o["Retiring"] o["FP_Arith"].parent = o["Light_Operations"] o["X87_Use"].parent = o["FP_Arith"] o["FP_Scalar"].parent = o["FP_Arith"] o["FP_Vector"].parent = o["FP_Arith"] o["FP_Vector_128b"].parent = o["FP_Vector"] o["FP_Vector_256b"].parent = o["FP_Vector"] o["Int_Operations"].parent = o["Light_Operations"] o["Int_Vector_128b"].parent = o["Int_Operations"] o["Int_Vector_256b"].parent = o["Int_Operations"] o["Memory_Operations"].parent = o["Light_Operations"] o["Fused_Instructions"].parent = o["Light_Operations"] o["Non_Fused_Branches"].parent = o["Light_Operations"] o["Other_Light_Ops"].parent = o["Light_Operations"] o["Nop_Instructions"].parent = o["Other_Light_Ops"] o["Shuffles_256b"].parent = o["Other_Light_Ops"] o["Heavy_Operations"].parent = o["Retiring"] o["Few_Uops_Instructions"].parent = o["Heavy_Operations"] o["Microcode_Sequencer"].parent = o["Heavy_Operations"] o["Assists"].parent = o["Microcode_Sequencer"] o["Page_Faults"].parent = o["Assists"] o["FP_Assists"].parent = o["Assists"] o["AVX_Assists"].parent = o["Assists"] o["CISC"].parent = o["Microcode_Sequencer"] # user visible metrics n = Metric_Mispredictions() ; r.metric(n) ; o["Mispredictions"] = n n = Metric_Big_Code() ; r.metric(n) ; o["Big_Code"] = n n = Metric_Instruction_Fetch_BW() ; r.metric(n) ; o["Instruction_Fetch_BW"] = n n = Metric_Cache_Memory_Bandwidth() ; r.metric(n) ; o["Cache_Memory_Bandwidth"] = n n = Metric_Cache_Memory_Latency() ; r.metric(n) ; o["Cache_Memory_Latency"] = n n = Metric_Memory_Data_TLBs() ; r.metric(n) ; o["Memory_Data_TLBs"] = n n = Metric_Memory_Synchronization() ; r.metric(n) ; o["Memory_Synchronization"] = n n = Metric_Compute_Bound_Est() ; r.metric(n) ; o["Compute_Bound_Est"] = n n = Metric_Irregular_Overhead() ; r.metric(n) ; o["Irregular_Overhead"] = n n = Metric_Other_Bottlenecks() ; r.metric(n) ; o["Other_Bottlenecks"] = n n = Metric_Branching_Overhead() ; r.metric(n) ; o["Branching_Overhead"] = n n = Metric_Useful_Work() ; r.metric(n) ; o["Useful_Work"] = n n = Metric_Core_Bound_Likely() ; r.metric(n) ; o["Core_Bound_Likely"] = n n = Metric_IPC() ; r.metric(n) ; o["IPC"] = n n = Metric_UopPI() ; r.metric(n) ; o["UopPI"] = n n = Metric_UpTB() ; r.metric(n) ; o["UpTB"] = n n = Metric_CPI() ; r.metric(n) ; o["CPI"] = n n = Metric_CLKS() ; r.metric(n) ; o["CLKS"] = n n = Metric_SLOTS() ; r.metric(n) ; o["SLOTS"] = n n = Metric_Slots_Utilization() ; r.metric(n) ; o["Slots_Utilization"] = n n = Metric_Execute_per_Issue() ; r.metric(n) ; o["Execute_per_Issue"] = n n = Metric_CoreIPC() ; r.metric(n) ; o["CoreIPC"] = n n = Metric_FLOPc() ; r.metric(n) ; o["FLOPc"] = n n = Metric_FP_Arith_Utilization() ; r.metric(n) ; o["FP_Arith_Utilization"] = n n = Metric_ILP() ; r.metric(n) ; o["ILP"] = n n = Metric_EPC() ; r.metric(n) ; o["EPC"] = n n = Metric_CORE_CLKS() ; r.metric(n) ; o["CORE_CLKS"] = n n = Metric_IpLoad() ; r.metric(n) ; o["IpLoad"] = n n = Metric_IpStore() ; r.metric(n) ; o["IpStore"] = n n = Metric_IpBranch() ; r.metric(n) ; o["IpBranch"] = n n = Metric_IpCall() ; r.metric(n) ; o["IpCall"] = n n = Metric_IpTB() ; r.metric(n) ; o["IpTB"] = n n = Metric_BpTkBranch() ; r.metric(n) ; o["BpTkBranch"] = n n = Metric_IpFLOP() ; r.metric(n) ; o["IpFLOP"] = n n = Metric_IpArith() ; r.metric(n) ; o["IpArith"] = n n = Metric_IpArith_Scalar_SP() ; r.metric(n) ; o["IpArith_Scalar_SP"] = n n = Metric_IpArith_Scalar_DP() ; r.metric(n) ; o["IpArith_Scalar_DP"] = n n = Metric_IpArith_AVX128() ; r.metric(n) ; o["IpArith_AVX128"] = n n = Metric_IpArith_AVX256() ; r.metric(n) ; o["IpArith_AVX256"] = n n = Metric_IpPause() ; r.metric(n) ; o["IpPause"] = n n = Metric_IpSWPF() ; r.metric(n) ; o["IpSWPF"] = n n = Metric_Instructions() ; r.metric(n) ; o["Instructions"] = n n = Metric_Retire() ; r.metric(n) ; o["Retire"] = n n = Metric_Strings_Cycles() ; r.metric(n) ; o["Strings_Cycles"] = n n = Metric_IpAssist() ; r.metric(n) ; o["IpAssist"] = n n = Metric_Execute() ; r.metric(n) ; o["Execute"] = n n = Metric_Fetch_LSD() ; r.metric(n) ; o["Fetch_LSD"] = n n = Metric_Fetch_DSB() ; r.metric(n) ; o["Fetch_DSB"] = n n = Metric_Fetch_MITE() ; r.metric(n) ; o["Fetch_MITE"] = n n = Metric_Fetch_UpC() ; r.metric(n) ; o["Fetch_UpC"] = n n = Metric_LSD_Coverage() ; r.metric(n) ; o["LSD_Coverage"] = n n = Metric_DSB_Coverage() ; r.metric(n) ; o["DSB_Coverage"] = n n = Metric_Unknown_Branch_Cost() ; r.metric(n) ; o["Unknown_Branch_Cost"] = n n = Metric_DSB_Switch_Cost() ; r.metric(n) ; o["DSB_Switch_Cost"] = n n = Metric_DSB_Misses() ; r.metric(n) ; o["DSB_Misses"] = n n = Metric_DSB_Bandwidth() ; r.metric(n) ; o["DSB_Bandwidth"] = n n = Metric_ICache_Miss_Latency() ; r.metric(n) ; o["ICache_Miss_Latency"] = n n = Metric_IC_Misses() ; r.metric(n) ; o["IC_Misses"] = n n = Metric_IpDSB_Miss_Ret() ; r.metric(n) ; o["IpDSB_Miss_Ret"] = n n = Metric_IpUnknown_Branch() ; r.metric(n) ; o["IpUnknown_Branch"] = n n = Metric_L2MPKI_Code() ; r.metric(n) ; o["L2MPKI_Code"] = n n = Metric_L2MPKI_Code_All() ; r.metric(n) ; o["L2MPKI_Code_All"] = n n = Metric_IpMispredict() ; r.metric(n) ; o["IpMispredict"] = n n = Metric_IpMisp_Cond_Ntaken() ; r.metric(n) ; o["IpMisp_Cond_Ntaken"] = n n = Metric_IpMisp_Cond_Taken() ; r.metric(n) ; o["IpMisp_Cond_Taken"] = n n = Metric_IpMisp_Ret() ; r.metric(n) ; o["IpMisp_Ret"] = n n = Metric_IpMisp_Indirect() ; r.metric(n) ; o["IpMisp_Indirect"] = n n = Metric_Branch_Misprediction_Cost() ; r.metric(n) ; o["Branch_Misprediction_Cost"] = n n = Metric_Spec_Clears_Ratio() ; r.metric(n) ; o["Spec_Clears_Ratio"] = n n = Metric_Cond_NT() ; r.metric(n) ; o["Cond_NT"] = n n = Metric_Cond_TK() ; r.metric(n) ; o["Cond_TK"] = n n = Metric_CallRet() ; r.metric(n) ; o["CallRet"] = n n = Metric_Jump() ; r.metric(n) ; o["Jump"] = n n = Metric_Other_Branches() ; r.metric(n) ; o["Other_Branches"] = n n = Metric_Load_Miss_Real_Latency() ; r.metric(n) ; o["Load_Miss_Real_Latency"] = n n = Metric_MLP() ; r.metric(n) ; o["MLP"] = n n = Metric_L1MPKI() ; r.metric(n) ; o["L1MPKI"] = n n = Metric_L1MPKI_Load() ; r.metric(n) ; o["L1MPKI_Load"] = n n = Metric_L2MPKI() ; r.metric(n) ; o["L2MPKI"] = n n = Metric_L2MPKI_All() ; r.metric(n) ; o["L2MPKI_All"] = n n = Metric_L2MPKI_Load() ; r.metric(n) ; o["L2MPKI_Load"] = n n = Metric_L2MPKI_RFO() ; r.metric(n) ; o["L2MPKI_RFO"] = n n = Metric_L2HPKI_All() ; r.metric(n) ; o["L2HPKI_All"] = n n = Metric_L2HPKI_Load() ; r.metric(n) ; o["L2HPKI_Load"] = n n = Metric_L3MPKI() ; r.metric(n) ; o["L3MPKI"] = n n = Metric_FB_HPKI() ; r.metric(n) ; o["FB_HPKI"] = n n = Metric_L1D_Cache_Fill_BW() ; r.metric(n) ; o["L1D_Cache_Fill_BW"] = n n = Metric_L2_Cache_Fill_BW() ; r.metric(n) ; o["L2_Cache_Fill_BW"] = n n = Metric_L3_Cache_Fill_BW() ; r.metric(n) ; o["L3_Cache_Fill_BW"] = n n = Metric_L3_Cache_Access_BW() ; r.metric(n) ; o["L3_Cache_Access_BW"] = n n = Metric_Page_Walks_Utilization() ; r.metric(n) ; o["Page_Walks_Utilization"] = n n = Metric_Code_STLB_MPKI() ; r.metric(n) ; o["Code_STLB_MPKI"] = n n = Metric_Load_STLB_MPKI() ; r.metric(n) ; o["Load_STLB_MPKI"] = n n = Metric_Store_STLB_MPKI() ; r.metric(n) ; o["Store_STLB_MPKI"] = n n = Metric_L1D_Cache_Fill_BW_2T() ; r.metric(n) ; o["L1D_Cache_Fill_BW_2T"] = n n = Metric_L2_Cache_Fill_BW_2T() ; r.metric(n) ; o["L2_Cache_Fill_BW_2T"] = n n = Metric_L3_Cache_Fill_BW_2T() ; r.metric(n) ; o["L3_Cache_Fill_BW_2T"] = n n = Metric_L3_Cache_Access_BW_2T() ; r.metric(n) ; o["L3_Cache_Access_BW_2T"] = n n = Metric_Load_L2_Miss_Latency() ; r.metric(n) ; o["Load_L2_Miss_Latency"] = n n = Metric_Load_L3_Miss_Latency() ; r.metric(n) ; o["Load_L3_Miss_Latency"] = n n = Metric_Load_L2_MLP() ; r.metric(n) ; o["Load_L2_MLP"] = n n = Metric_Data_L2_MLP() ; r.metric(n) ; o["Data_L2_MLP"] = n n = Metric_UC_Load_PKI() ; r.metric(n) ; o["UC_Load_PKI"] = n n = Metric_Bus_Lock_PKI() ; r.metric(n) ; o["Bus_Lock_PKI"] = n n = Metric_CPU_Utilization() ; r.metric(n) ; o["CPU_Utilization"] = n n = Metric_CPUs_Utilized() ; r.metric(n) ; o["CPUs_Utilized"] = n n = Metric_Core_Frequency() ; r.metric(n) ; o["Core_Frequency"] = n n = Metric_GFLOPs() ; r.metric(n) ; o["GFLOPs"] = n n = Metric_Turbo_Utilization() ; r.metric(n) ; o["Turbo_Utilization"] = n n = Metric_SMT_2T_Utilization() ; r.metric(n) ; o["SMT_2T_Utilization"] = n n = Metric_Kernel_Utilization() ; r.metric(n) ; o["Kernel_Utilization"] = n n = Metric_Kernel_CPI() ; r.metric(n) ; o["Kernel_CPI"] = n n = Metric_C0_Wait() ; r.metric(n) ; o["C0_Wait"] = n n = Metric_DRAM_BW_Use() ; r.metric(n) ; o["DRAM_BW_Use"] = n n = Metric_MEM_Read_Latency() ; r.metric(n) ; o["MEM_Read_Latency"] = n n = Metric_MEM_Parallel_Reads() ; r.metric(n) ; o["MEM_Parallel_Reads"] = n n = Metric_Power() ; r.metric(n) ; o["Power"] = n n = Metric_Time() ; r.metric(n) ; o["Time"] = n n = Metric_Socket_CLKS() ; r.metric(n) ; o["Socket_CLKS"] = n n = Metric_IpFarBranch() ; r.metric(n) ; o["IpFarBranch"] = n # references between groups o["Branch_Resteers"].Unknown_Branches = o["Unknown_Branches"] o["Mispredicts_Resteers"].Retiring = o["Retiring"] o["Mispredicts_Resteers"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Mispredicts_Resteers"].Bad_Speculation = o["Bad_Speculation"] o["Mispredicts_Resteers"].Frontend_Bound = o["Frontend_Bound"] o["Mispredicts_Resteers"].Backend_Bound = o["Backend_Bound"] o["Clears_Resteers"].Retiring = o["Retiring"] o["Clears_Resteers"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Clears_Resteers"].Bad_Speculation = o["Bad_Speculation"] o["Clears_Resteers"].Frontend_Bound = o["Frontend_Bound"] o["Clears_Resteers"].Backend_Bound = o["Backend_Bound"] o["Fetch_Bandwidth"].Frontend_Bound = o["Frontend_Bound"] o["Fetch_Bandwidth"].Fetch_Latency = o["Fetch_Latency"] o["Bad_Speculation"].Retiring = o["Retiring"] o["Bad_Speculation"].Frontend_Bound = o["Frontend_Bound"] o["Bad_Speculation"].Backend_Bound = o["Backend_Bound"] o["Other_Mispredicts"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Machine_Clears"].Retiring = o["Retiring"] o["Machine_Clears"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Machine_Clears"].Bad_Speculation = o["Bad_Speculation"] o["Machine_Clears"].Frontend_Bound = o["Frontend_Bound"] o["Machine_Clears"].Backend_Bound = o["Backend_Bound"] o["Other_Nukes"].Machine_Clears = o["Machine_Clears"] o["Other_Nukes"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Other_Nukes"].Retiring = o["Retiring"] o["Other_Nukes"].Backend_Bound = o["Backend_Bound"] o["Other_Nukes"].Bad_Speculation = o["Bad_Speculation"] o["Other_Nukes"].Frontend_Bound = o["Frontend_Bound"] o["Load_STLB_Hit"].Load_STLB_Miss = o["Load_STLB_Miss"] o["Load_STLB_Hit"].DTLB_Load = o["DTLB_Load"] o["MEM_Latency"].MEM_Bandwidth = o["MEM_Bandwidth"] o["Store_STLB_Hit"].DTLB_Store = o["DTLB_Store"] o["Store_STLB_Hit"].Store_STLB_Miss = o["Store_STLB_Miss"] o["Core_Bound"].Memory_Bound = o["Memory_Bound"] o["Core_Bound"].Backend_Bound = o["Backend_Bound"] o["Serializing_Operation"].C02_WAIT = o["C02_WAIT"] o["Ports_Utilization"].Ports_Utilized_0 = o["Ports_Utilized_0"] o["Ports_Utilization"].Retiring = o["Retiring"] o["Retiring"].Heavy_Operations = o["Heavy_Operations"] o["Light_Operations"].Retiring = o["Retiring"] o["Light_Operations"].Heavy_Operations = o["Heavy_Operations"] o["FP_Arith"].Retiring = o["Retiring"] o["FP_Arith"].FP_Scalar = o["FP_Scalar"] o["FP_Arith"].X87_Use = o["X87_Use"] o["FP_Arith"].FP_Vector = o["FP_Vector"] o["X87_Use"].Retiring = o["Retiring"] o["FP_Scalar"].Retiring = o["Retiring"] o["FP_Vector"].Retiring = o["Retiring"] o["FP_Vector_128b"].Retiring = o["Retiring"] o["FP_Vector_256b"].Retiring = o["Retiring"] o["Int_Operations"].Retiring = o["Retiring"] o["Int_Operations"].Int_Vector_256b = o["Int_Vector_256b"] o["Int_Operations"].Int_Vector_128b = o["Int_Vector_128b"] o["Int_Vector_128b"].Retiring = o["Retiring"] o["Int_Vector_256b"].Retiring = o["Retiring"] o["Memory_Operations"].Retiring = o["Retiring"] o["Memory_Operations"].Light_Operations = o["Light_Operations"] o["Memory_Operations"].Heavy_Operations = o["Heavy_Operations"] o["Fused_Instructions"].Retiring = o["Retiring"] o["Fused_Instructions"].Light_Operations = o["Light_Operations"] o["Fused_Instructions"].Heavy_Operations = o["Heavy_Operations"] o["Non_Fused_Branches"].Retiring = o["Retiring"] o["Non_Fused_Branches"].Light_Operations = o["Light_Operations"] o["Non_Fused_Branches"].Heavy_Operations = o["Heavy_Operations"] o["Other_Light_Ops"].Light_Operations = o["Light_Operations"] o["Other_Light_Ops"].Retiring = o["Retiring"] o["Other_Light_Ops"].Heavy_Operations = o["Heavy_Operations"] o["Other_Light_Ops"].Int_Operations = o["Int_Operations"] o["Other_Light_Ops"].Non_Fused_Branches = o["Non_Fused_Branches"] o["Other_Light_Ops"].FP_Arith = o["FP_Arith"] o["Other_Light_Ops"].Fused_Instructions = o["Fused_Instructions"] o["Other_Light_Ops"].Int_Vector_128b = o["Int_Vector_128b"] o["Other_Light_Ops"].FP_Vector = o["FP_Vector"] o["Other_Light_Ops"].FP_Scalar = o["FP_Scalar"] o["Other_Light_Ops"].X87_Use = o["X87_Use"] o["Other_Light_Ops"].Int_Vector_256b = o["Int_Vector_256b"] o["Other_Light_Ops"].Memory_Operations = o["Memory_Operations"] o["Nop_Instructions"].Retiring = o["Retiring"] o["Nop_Instructions"].Light_Operations = o["Light_Operations"] o["Nop_Instructions"].Heavy_Operations = o["Heavy_Operations"] o["Shuffles_256b"].Retiring = o["Retiring"] o["Shuffles_256b"].Light_Operations = o["Light_Operations"] o["Shuffles_256b"].Heavy_Operations = o["Heavy_Operations"] o["Few_Uops_Instructions"].Heavy_Operations = o["Heavy_Operations"] o["Few_Uops_Instructions"].Microcode_Sequencer = o["Microcode_Sequencer"] o["CISC"].Microcode_Sequencer = o["Microcode_Sequencer"] o["CISC"].Assists = o["Assists"] o["Mispredictions"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Mispredictions"].LCP = o["LCP"] o["Mispredictions"].Retiring = o["Retiring"] o["Mispredictions"].Other_Mispredicts = o["Other_Mispredicts"] o["Mispredictions"].Microcode_Sequencer = o["Microcode_Sequencer"] o["Mispredictions"].Frontend_Bound = o["Frontend_Bound"] o["Mispredictions"].DSB_Switches = o["DSB_Switches"] o["Mispredictions"].Backend_Bound = o["Backend_Bound"] o["Mispredictions"].Branch_Resteers = o["Branch_Resteers"] o["Mispredictions"].ICache_Misses = o["ICache_Misses"] o["Mispredictions"].MS_Switches = o["MS_Switches"] o["Mispredictions"].Bad_Speculation = o["Bad_Speculation"] o["Mispredictions"].ITLB_Misses = o["ITLB_Misses"] o["Mispredictions"].Unknown_Branches = o["Unknown_Branches"] o["Mispredictions"].Fetch_Latency = o["Fetch_Latency"] o["Mispredictions"].Mispredicts_Resteers = o["Mispredicts_Resteers"] o["Big_Code"].LCP = o["LCP"] o["Big_Code"].ICache_Misses = o["ICache_Misses"] o["Big_Code"].DSB_Switches = o["DSB_Switches"] o["Big_Code"].Branch_Resteers = o["Branch_Resteers"] o["Big_Code"].MS_Switches = o["MS_Switches"] o["Big_Code"].ITLB_Misses = o["ITLB_Misses"] o["Big_Code"].Unknown_Branches = o["Unknown_Branches"] o["Big_Code"].Fetch_Latency = o["Fetch_Latency"] o["Instruction_Fetch_BW"].Retiring = o["Retiring"] o["Instruction_Fetch_BW"].Other_Mispredicts = o["Other_Mispredicts"] o["Instruction_Fetch_BW"].DSB_Switches = o["DSB_Switches"] o["Instruction_Fetch_BW"].Backend_Bound = o["Backend_Bound"] o["Instruction_Fetch_BW"].Branch_Resteers = o["Branch_Resteers"] o["Instruction_Fetch_BW"].Fetch_Latency = o["Fetch_Latency"] o["Instruction_Fetch_BW"].ICache_Misses = o["ICache_Misses"] o["Instruction_Fetch_BW"].Microcode_Sequencer = o["Microcode_Sequencer"] o["Instruction_Fetch_BW"].Frontend_Bound = o["Frontend_Bound"] o["Instruction_Fetch_BW"].Bad_Speculation = o["Bad_Speculation"] o["Instruction_Fetch_BW"].ITLB_Misses = o["ITLB_Misses"] o["Instruction_Fetch_BW"].Mispredicts_Resteers = o["Mispredicts_Resteers"] o["Instruction_Fetch_BW"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Instruction_Fetch_BW"].LCP = o["LCP"] o["Instruction_Fetch_BW"].Clears_Resteers = o["Clears_Resteers"] o["Instruction_Fetch_BW"].MS_Switches = o["MS_Switches"] o["Instruction_Fetch_BW"].Unknown_Branches = o["Unknown_Branches"] o["Cache_Memory_Bandwidth"].L1_Bound = o["L1_Bound"] o["Cache_Memory_Bandwidth"].Store_Fwd_Blk = o["Store_Fwd_Blk"] o["Cache_Memory_Bandwidth"].SQ_Full = o["SQ_Full"] o["Cache_Memory_Bandwidth"].MEM_Bandwidth = o["MEM_Bandwidth"] o["Cache_Memory_Bandwidth"].L1_Hit_Latency = o["L1_Hit_Latency"] o["Cache_Memory_Bandwidth"].Data_Sharing = o["Data_Sharing"] o["Cache_Memory_Bandwidth"].L2_Bound = o["L2_Bound"] o["Cache_Memory_Bandwidth"].Memory_Bound = o["Memory_Bound"] o["Cache_Memory_Bandwidth"].Lock_Latency = o["Lock_Latency"] o["Cache_Memory_Bandwidth"].MEM_Latency = o["MEM_Latency"] o["Cache_Memory_Bandwidth"].Store_Bound = o["Store_Bound"] o["Cache_Memory_Bandwidth"].Split_Loads = o["Split_Loads"] o["Cache_Memory_Bandwidth"].L3_Hit_Latency = o["L3_Hit_Latency"] o["Cache_Memory_Bandwidth"].DTLB_Load = o["DTLB_Load"] o["Cache_Memory_Bandwidth"].L3_Bound = o["L3_Bound"] o["Cache_Memory_Bandwidth"].FB_Full = o["FB_Full"] o["Cache_Memory_Bandwidth"].Contested_Accesses = o["Contested_Accesses"] o["Cache_Memory_Bandwidth"].DRAM_Bound = o["DRAM_Bound"] o["Cache_Memory_Latency"].L1_Bound = o["L1_Bound"] o["Cache_Memory_Latency"].Data_Sharing = o["Data_Sharing"] o["Cache_Memory_Latency"].L2_Bound = o["L2_Bound"] o["Cache_Memory_Latency"].Contested_Accesses = o["Contested_Accesses"] o["Cache_Memory_Latency"].L1_Hit_Latency = o["L1_Hit_Latency"] o["Cache_Memory_Latency"].MEM_Bandwidth = o["MEM_Bandwidth"] o["Cache_Memory_Latency"].Store_Latency = o["Store_Latency"] o["Cache_Memory_Latency"].L3_Hit_Latency = o["L3_Hit_Latency"] o["Cache_Memory_Latency"].DTLB_Load = o["DTLB_Load"] o["Cache_Memory_Latency"].False_Sharing = o["False_Sharing"] o["Cache_Memory_Latency"].Streaming_Stores = o["Streaming_Stores"] o["Cache_Memory_Latency"].Memory_Bound = o["Memory_Bound"] o["Cache_Memory_Latency"].SQ_Full = o["SQ_Full"] o["Cache_Memory_Latency"].Store_Bound = o["Store_Bound"] o["Cache_Memory_Latency"].Split_Loads = o["Split_Loads"] o["Cache_Memory_Latency"].L3_Bound = o["L3_Bound"] o["Cache_Memory_Latency"].FB_Full = o["FB_Full"] o["Cache_Memory_Latency"].Store_Fwd_Blk = o["Store_Fwd_Blk"] o["Cache_Memory_Latency"].DTLB_Store = o["DTLB_Store"] o["Cache_Memory_Latency"].Split_Stores = o["Split_Stores"] o["Cache_Memory_Latency"].Lock_Latency = o["Lock_Latency"] o["Cache_Memory_Latency"].MEM_Latency = o["MEM_Latency"] o["Cache_Memory_Latency"].DRAM_Bound = o["DRAM_Bound"] o["Memory_Data_TLBs"].L1_Bound = o["L1_Bound"] o["Memory_Data_TLBs"].Store_Fwd_Blk = o["Store_Fwd_Blk"] o["Memory_Data_TLBs"].L1_Hit_Latency = o["L1_Hit_Latency"] o["Memory_Data_TLBs"].DTLB_Load = o["DTLB_Load"] o["Memory_Data_TLBs"].Store_Latency = o["Store_Latency"] o["Memory_Data_TLBs"].Split_Stores = o["Split_Stores"] o["Memory_Data_TLBs"].False_Sharing = o["False_Sharing"] o["Memory_Data_TLBs"].DTLB_Store = o["DTLB_Store"] o["Memory_Data_TLBs"].L2_Bound = o["L2_Bound"] o["Memory_Data_TLBs"].Memory_Bound = o["Memory_Bound"] o["Memory_Data_TLBs"].Lock_Latency = o["Lock_Latency"] o["Memory_Data_TLBs"].Store_Bound = o["Store_Bound"] o["Memory_Data_TLBs"].Split_Loads = o["Split_Loads"] o["Memory_Data_TLBs"].L3_Bound = o["L3_Bound"] o["Memory_Data_TLBs"].FB_Full = o["FB_Full"] o["Memory_Data_TLBs"].Streaming_Stores = o["Streaming_Stores"] o["Memory_Data_TLBs"].DRAM_Bound = o["DRAM_Bound"] o["Memory_Synchronization"].L1_Bound = o["L1_Bound"] o["Memory_Synchronization"].Retiring = o["Retiring"] o["Memory_Synchronization"].Data_Sharing = o["Data_Sharing"] o["Memory_Synchronization"].L2_Bound = o["L2_Bound"] o["Memory_Synchronization"].Contested_Accesses = o["Contested_Accesses"] o["Memory_Synchronization"].L3_Bound = o["L3_Bound"] o["Memory_Synchronization"].Machine_Clears = o["Machine_Clears"] o["Memory_Synchronization"].Store_Latency = o["Store_Latency"] o["Memory_Synchronization"].Backend_Bound = o["Backend_Bound"] o["Memory_Synchronization"].L3_Hit_Latency = o["L3_Hit_Latency"] o["Memory_Synchronization"].False_Sharing = o["False_Sharing"] o["Memory_Synchronization"].Frontend_Bound = o["Frontend_Bound"] o["Memory_Synchronization"].Streaming_Stores = o["Streaming_Stores"] o["Memory_Synchronization"].Memory_Bound = o["Memory_Bound"] o["Memory_Synchronization"].SQ_Full = o["SQ_Full"] o["Memory_Synchronization"].Store_Bound = o["Store_Bound"] o["Memory_Synchronization"].Bad_Speculation = o["Bad_Speculation"] o["Memory_Synchronization"].DTLB_Store = o["DTLB_Store"] o["Memory_Synchronization"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Memory_Synchronization"].Split_Stores = o["Split_Stores"] o["Memory_Synchronization"].Other_Nukes = o["Other_Nukes"] o["Memory_Synchronization"].DRAM_Bound = o["DRAM_Bound"] o["Compute_Bound_Est"].Serializing_Operation = o["Serializing_Operation"] o["Compute_Bound_Est"].Ports_Utilization = o["Ports_Utilization"] o["Compute_Bound_Est"].C02_WAIT = o["C02_WAIT"] o["Compute_Bound_Est"].Retiring = o["Retiring"] o["Compute_Bound_Est"].Ports_Utilized_2 = o["Ports_Utilized_2"] o["Compute_Bound_Est"].Memory_Bound = o["Memory_Bound"] o["Compute_Bound_Est"].Ports_Utilized_1 = o["Ports_Utilized_1"] o["Compute_Bound_Est"].Core_Bound = o["Core_Bound"] o["Compute_Bound_Est"].Backend_Bound = o["Backend_Bound"] o["Compute_Bound_Est"].Ports_Utilized_3m = o["Ports_Utilized_3m"] o["Compute_Bound_Est"].Divider = o["Divider"] o["Compute_Bound_Est"].Ports_Utilized_0 = o["Ports_Utilized_0"] o["Irregular_Overhead"].Heavy_Operations = o["Heavy_Operations"] o["Irregular_Overhead"].Ports_Utilization = o["Ports_Utilization"] o["Irregular_Overhead"].C02_WAIT = o["C02_WAIT"] o["Irregular_Overhead"].Retiring = o["Retiring"] o["Irregular_Overhead"].ICache_Misses = o["ICache_Misses"] o["Irregular_Overhead"].Microcode_Sequencer = o["Microcode_Sequencer"] o["Irregular_Overhead"].Frontend_Bound = o["Frontend_Bound"] o["Irregular_Overhead"].Serializing_Operation = o["Serializing_Operation"] o["Irregular_Overhead"].Core_Bound = o["Core_Bound"] o["Irregular_Overhead"].Bad_Speculation = o["Bad_Speculation"] o["Irregular_Overhead"].ITLB_Misses = o["ITLB_Misses"] o["Irregular_Overhead"].Divider = o["Divider"] o["Irregular_Overhead"].Mispredicts_Resteers = o["Mispredicts_Resteers"] o["Irregular_Overhead"].Memory_Bound = o["Memory_Bound"] o["Irregular_Overhead"].Machine_Clears = o["Machine_Clears"] o["Irregular_Overhead"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Irregular_Overhead"].LCP = o["LCP"] o["Irregular_Overhead"].Other_Mispredicts = o["Other_Mispredicts"] o["Irregular_Overhead"].Few_Uops_Instructions = o["Few_Uops_Instructions"] o["Irregular_Overhead"].DSB_Switches = o["DSB_Switches"] o["Irregular_Overhead"].Ports_Utilized_0 = o["Ports_Utilized_0"] o["Irregular_Overhead"].Assists = o["Assists"] o["Irregular_Overhead"].Backend_Bound = o["Backend_Bound"] o["Irregular_Overhead"].Branch_Resteers = o["Branch_Resteers"] o["Irregular_Overhead"].Clears_Resteers = o["Clears_Resteers"] o["Irregular_Overhead"].MS_Switches = o["MS_Switches"] o["Irregular_Overhead"].Other_Nukes = o["Other_Nukes"] o["Irregular_Overhead"].Unknown_Branches = o["Unknown_Branches"] o["Irregular_Overhead"].Fetch_Latency = o["Fetch_Latency"] o["Other_Bottlenecks"].L1_Bound = o["L1_Bound"] o["Other_Bottlenecks"].C02_WAIT = o["C02_WAIT"] o["Other_Bottlenecks"].Retiring = o["Retiring"] o["Other_Bottlenecks"].Data_Sharing = o["Data_Sharing"] o["Other_Bottlenecks"].L2_Bound = o["L2_Bound"] o["Other_Bottlenecks"].Core_Bound = o["Core_Bound"] o["Other_Bottlenecks"].Ports_Utilization = o["Ports_Utilization"] o["Other_Bottlenecks"].Contested_Accesses = o["Contested_Accesses"] o["Other_Bottlenecks"].Divider = o["Divider"] o["Other_Bottlenecks"].L3_Bound = o["L3_Bound"] o["Other_Bottlenecks"].Ports_Utilized_3m = o["Ports_Utilized_3m"] o["Other_Bottlenecks"].L1_Hit_Latency = o["L1_Hit_Latency"] o["Other_Bottlenecks"].FB_Full = o["FB_Full"] o["Other_Bottlenecks"].MEM_Bandwidth = o["MEM_Bandwidth"] o["Other_Bottlenecks"].Store_Latency = o["Store_Latency"] o["Other_Bottlenecks"].Other_Mispredicts = o["Other_Mispredicts"] o["Other_Bottlenecks"].DSB_Switches = o["DSB_Switches"] o["Other_Bottlenecks"].Ports_Utilized_0 = o["Ports_Utilized_0"] o["Other_Bottlenecks"].Ports_Utilized_1 = o["Ports_Utilized_1"] o["Other_Bottlenecks"].Ports_Utilized_2 = o["Ports_Utilized_2"] o["Other_Bottlenecks"].Assists = o["Assists"] o["Other_Bottlenecks"].Backend_Bound = o["Backend_Bound"] o["Other_Bottlenecks"].Branch_Resteers = o["Branch_Resteers"] o["Other_Bottlenecks"].L3_Hit_Latency = o["L3_Hit_Latency"] o["Other_Bottlenecks"].Heavy_Operations = o["Heavy_Operations"] o["Other_Bottlenecks"].Fetch_Latency = o["Fetch_Latency"] o["Other_Bottlenecks"].DTLB_Load = o["DTLB_Load"] o["Other_Bottlenecks"].False_Sharing = o["False_Sharing"] o["Other_Bottlenecks"].ICache_Misses = o["ICache_Misses"] o["Other_Bottlenecks"].Microcode_Sequencer = o["Microcode_Sequencer"] o["Other_Bottlenecks"].Frontend_Bound = o["Frontend_Bound"] o["Other_Bottlenecks"].Machine_Clears = o["Machine_Clears"] o["Other_Bottlenecks"].Streaming_Stores = o["Streaming_Stores"] o["Other_Bottlenecks"].Memory_Bound = o["Memory_Bound"] o["Other_Bottlenecks"].SQ_Full = o["SQ_Full"] o["Other_Bottlenecks"].Store_Bound = o["Store_Bound"] o["Other_Bottlenecks"].Split_Loads = o["Split_Loads"] o["Other_Bottlenecks"].Bad_Speculation = o["Bad_Speculation"] o["Other_Bottlenecks"].ITLB_Misses = o["ITLB_Misses"] o["Other_Bottlenecks"].Mispredicts_Resteers = o["Mispredicts_Resteers"] o["Other_Bottlenecks"].Store_Fwd_Blk = o["Store_Fwd_Blk"] o["Other_Bottlenecks"].Serializing_Operation = o["Serializing_Operation"] o["Other_Bottlenecks"].DTLB_Store = o["DTLB_Store"] o["Other_Bottlenecks"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Other_Bottlenecks"].LCP = o["LCP"] o["Other_Bottlenecks"].Split_Stores = o["Split_Stores"] o["Other_Bottlenecks"].Few_Uops_Instructions = o["Few_Uops_Instructions"] o["Other_Bottlenecks"].Lock_Latency = o["Lock_Latency"] o["Other_Bottlenecks"].MEM_Latency = o["MEM_Latency"] o["Other_Bottlenecks"].Clears_Resteers = o["Clears_Resteers"] o["Other_Bottlenecks"].MS_Switches = o["MS_Switches"] o["Other_Bottlenecks"].Other_Nukes = o["Other_Nukes"] o["Other_Bottlenecks"].Unknown_Branches = o["Unknown_Branches"] o["Other_Bottlenecks"].DRAM_Bound = o["DRAM_Bound"] o["Useful_Work"].Retiring = o["Retiring"] o["Useful_Work"].Heavy_Operations = o["Heavy_Operations"] o["Useful_Work"].Microcode_Sequencer = o["Microcode_Sequencer"] o["Useful_Work"].Few_Uops_Instructions = o["Few_Uops_Instructions"] o["Useful_Work"].Assists = o["Assists"] o["Core_Bound_Likely"].Memory_Bound = o["Memory_Bound"] o["Core_Bound_Likely"].Ports_Utilized_0 = o["Ports_Utilized_0"] o["Core_Bound_Likely"].Core_Bound = o["Core_Bound"] o["Core_Bound_Likely"].Ports_Utilization = o["Ports_Utilization"] o["Core_Bound_Likely"].Retiring = o["Retiring"] o["Core_Bound_Likely"].Backend_Bound = o["Backend_Bound"] o["UopPI"].Retiring = o["Retiring"] o["UpTB"].Retiring = o["Retiring"] o["Retire"].Retiring = o["Retiring"] o["DSB_Misses"].LSD = o["LSD"] o["DSB_Misses"].MITE = o["MITE"] o["DSB_Misses"].LCP = o["LCP"] o["DSB_Misses"].Fetch_Bandwidth = o["Fetch_Bandwidth"] o["DSB_Misses"].Frontend_Bound = o["Frontend_Bound"] o["DSB_Misses"].DSB_Switches = o["DSB_Switches"] o["DSB_Misses"].Branch_Resteers = o["Branch_Resteers"] o["DSB_Misses"].ICache_Misses = o["ICache_Misses"] o["DSB_Misses"].MS_Switches = o["MS_Switches"] o["DSB_Misses"].ITLB_Misses = o["ITLB_Misses"] o["DSB_Misses"].DSB = o["DSB"] o["DSB_Misses"].Unknown_Branches = o["Unknown_Branches"] o["DSB_Misses"].Fetch_Latency = o["Fetch_Latency"] o["DSB_Bandwidth"].LSD = o["LSD"] o["DSB_Bandwidth"].Fetch_Bandwidth = o["Fetch_Bandwidth"] o["DSB_Bandwidth"].Frontend_Bound = o["Frontend_Bound"] o["DSB_Bandwidth"].MITE = o["MITE"] o["DSB_Bandwidth"].DSB = o["DSB"] o["DSB_Bandwidth"].Fetch_Latency = o["Fetch_Latency"] o["IC_Misses"].Fetch_Latency = o["Fetch_Latency"] o["IC_Misses"].LCP = o["LCP"] o["IC_Misses"].MS_Switches = o["MS_Switches"] o["IC_Misses"].ICache_Misses = o["ICache_Misses"] o["IC_Misses"].ITLB_Misses = o["ITLB_Misses"] o["IC_Misses"].Unknown_Branches = o["Unknown_Branches"] o["IC_Misses"].DSB_Switches = o["DSB_Switches"] o["IC_Misses"].Branch_Resteers = o["Branch_Resteers"] o["Branch_Misprediction_Cost"].Retiring = o["Retiring"] o["Branch_Misprediction_Cost"].ICache_Misses = o["ICache_Misses"] o["Branch_Misprediction_Cost"].Microcode_Sequencer = o["Microcode_Sequencer"] o["Branch_Misprediction_Cost"].Frontend_Bound = o["Frontend_Bound"] o["Branch_Misprediction_Cost"].Bad_Speculation = o["Bad_Speculation"] o["Branch_Misprediction_Cost"].ITLB_Misses = o["ITLB_Misses"] o["Branch_Misprediction_Cost"].Mispredicts_Resteers = o["Mispredicts_Resteers"] o["Branch_Misprediction_Cost"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Branch_Misprediction_Cost"].LCP = o["LCP"] o["Branch_Misprediction_Cost"].Other_Mispredicts = o["Other_Mispredicts"] o["Branch_Misprediction_Cost"].DSB_Switches = o["DSB_Switches"] o["Branch_Misprediction_Cost"].Backend_Bound = o["Backend_Bound"] o["Branch_Misprediction_Cost"].Branch_Resteers = o["Branch_Resteers"] o["Branch_Misprediction_Cost"].MS_Switches = o["MS_Switches"] o["Branch_Misprediction_Cost"].Unknown_Branches = o["Unknown_Branches"] o["Branch_Misprediction_Cost"].Fetch_Latency = o["Fetch_Latency"] # siblings cross-tree o["Mispredicts_Resteers"].sibling = (o["Branch_Mispredicts"],) o["Clears_Resteers"].sibling = (o["MS_Switches"], o["Machine_Clears"], o["L1_Bound"], o["Microcode_Sequencer"],) o["MS_Switches"].sibling = (o["Clears_Resteers"], o["Machine_Clears"], o["L1_Bound"], o["Serializing_Operation"], o["Mixing_Vectors"], o["Microcode_Sequencer"],) o["LCP"].sibling = (o["DSB_Switches"], o["Fetch_Bandwidth"],) o["DSB_Switches"].sibling = (o["LCP"], o["Fetch_Bandwidth"],) o["Fetch_Bandwidth"].sibling = (o["LCP"], o["DSB_Switches"],) o["Decoder0_Alone"].sibling = (o["Few_Uops_Instructions"],) o["Branch_Mispredicts"].sibling = (o["Mispredicts_Resteers"],) o["Machine_Clears"].sibling = (o["Clears_Resteers"], o["MS_Switches"], o["L1_Bound"], o["Contested_Accesses"], o["Data_Sharing"], o["False_Sharing"], o["Microcode_Sequencer"],) o["L1_Bound"].sibling = (o["Clears_Resteers"], o["MS_Switches"], o["Machine_Clears"], o["Ports_Utilized_1"], o["Microcode_Sequencer"],) o["DTLB_Load"].sibling = (o["DTLB_Store"],) o["Lock_Latency"].sibling = (o["Store_Latency"],) o["FB_Full"].sibling = (o["SQ_Full"], o["MEM_Bandwidth"], o["Store_Latency"], o["Streaming_Stores"],) o["Contested_Accesses"].sibling = (o["Machine_Clears"], o["Data_Sharing"], o["False_Sharing"],) o["Data_Sharing"].sibling = (o["Machine_Clears"], o["Contested_Accesses"], o["False_Sharing"],) o["L3_Hit_Latency"].sibling = (o["MEM_Latency"],) o["L3_Hit_Latency"].overlap = True o["SQ_Full"].sibling = (o["FB_Full"], o["MEM_Bandwidth"],) o["MEM_Bandwidth"].sibling = (o["FB_Full"], o["SQ_Full"],) o["MEM_Latency"].sibling = (o["L3_Hit_Latency"],) o["Store_Latency"].sibling = (o["Lock_Latency"], o["FB_Full"],) o["Store_Latency"].overlap = True o["False_Sharing"].sibling = (o["Machine_Clears"], o["Contested_Accesses"], o["Data_Sharing"],) o["Streaming_Stores"].sibling = (o["FB_Full"],) o["DTLB_Store"].sibling = (o["DTLB_Load"],) o["Serializing_Operation"].sibling = (o["MS_Switches"],) o["Mixing_Vectors"].sibling = (o["MS_Switches"],) o["Ports_Utilized_1"].sibling = (o["L1_Bound"],) o["Ports_Utilized_2"].sibling = (o["Port_0"], o["Port_1"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["Int_Vector_128b"], o["Int_Vector_256b"],) o["Port_0"].sibling = (o["Ports_Utilized_2"], o["Port_1"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["Int_Vector_128b"], o["Int_Vector_256b"],) o["Port_1"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["Int_Vector_128b"], o["Int_Vector_256b"],) o["Port_6"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["Int_Vector_128b"], o["Int_Vector_256b"],) o["FP_Scalar"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_6"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["Int_Vector_128b"], o["Int_Vector_256b"],) o["FP_Vector"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_6"], o["FP_Scalar"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["Int_Vector_128b"], o["Int_Vector_256b"],) o["FP_Vector_128b"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_256b"], o["Int_Vector_128b"], o["Int_Vector_256b"],) o["FP_Vector_256b"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["Int_Vector_128b"], o["Int_Vector_256b"],) o["Int_Vector_128b"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["Int_Vector_256b"],) o["Int_Vector_256b"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["Int_Vector_128b"],) o["Few_Uops_Instructions"].sibling = (o["Decoder0_Alone"],) o["Microcode_Sequencer"].sibling = (o["Clears_Resteers"], o["MS_Switches"], o["Machine_Clears"], o["L1_Bound"],) o["Mispredictions"].sibling = (o["Mispredicts_Resteers"], o["Branch_Mispredicts"],) o["Cache_Memory_Bandwidth"].sibling = (o["FB_Full"], o["SQ_Full"], o["MEM_Bandwidth"],) o["Cache_Memory_Latency"].sibling = (o["L3_Hit_Latency"], o["MEM_Latency"],) o["Memory_Data_TLBs"].sibling = (o["DTLB_Load"], o["DTLB_Store"],) o["Memory_Synchronization"].sibling = (o["DTLB_Load"], o["DTLB_Store"],) o["Irregular_Overhead"].sibling = (o["MS_Switches"], o["Microcode_Sequencer"],) o["IpTB"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],) o["DSB_Coverage"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],) o["DSB_Misses"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],) o["DSB_Bandwidth"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],) o["Branch_Misprediction_Cost"].sibling = (o["Mispredicts_Resteers"], o["Branch_Mispredicts"],) o["DRAM_BW_Use"].sibling = (o["FB_Full"], o["SQ_Full"], o["MEM_Bandwidth"],)
244,918
Python
.py
5,674
37.201269
1,765
0.655639
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,908
metrics.py
andikleen_pmu-tools/metrics.py
#Base classes for different metrics print_error = lambda msg: False class MetricBase(object): # Derived classes can override these level = 0 name = "" domain = "" area = "" htoff = False sample = [] sibling = None metricgroup = frozenset([]) desc = "Missing description" def __init__(self, **kwargs): self.errcount = 0 for k, v in kwargs.items(): setattr(self, k, v) def compute(self, EV): try: self.val = self._compute(EV) self.thresh = self.val > 0 except ZeroDivisionError: print_error("{0} zero division".format(self.__class__.__name__)) self.errcount += 1 self.val = 0 self.thresh = False return self.val def _compute(self, EV): raise NotImplementedError() class FrontendBound(MetricBase): level = 1 name = "Frontend_Bound" domain = "Slots" area = "FE" desc = ("\n" "This category reflects slots where the Frontend of the\n" "processor undersupplies its Backend.") metricgroup = frozenset(['TopDownL1']) class FrontendLatency(MetricBase): level = 2 name = "Frontend_Latency" domain = "Slots" area = "FE" desc = ("\n" "This metric represents slots fraction the CPU was stalled\n" "due to Frontend latency issues. For example; instruction-\n" "cache misses; iTLB misses or fetch stalls after a branch\n" "misprediction are categorized under Frontend Latency. In\n" "such cases; the Frontend eventually delivers no uops for\n" "some period.") htoff = False sample = [] errcount = 0 sibling = None server = False metricgroup = frozenset(['Frontend_Bound', 'TopDownL2']) class BadSpeculation(MetricBase): level = 1 name = "Bad_Speculation" domain = "Slots" area = "BAD" desc = ("\n" "This category represents slots fraction wasted due to\n" "incorrect speculations. This includes slots used to issue\n" "uops that do not eventually get retired and slots for which\n" "the issue-pipeline was blocked due to recovery from earlier\n" "incorrect speculation. For example; wasted work due to miss-\n" "predicted branches are categorized under Bad Speculation\n" "category. Incorrect data speculation followed by Memory\n" "Ordering Nukes is another example.") metricgroup = frozenset(['Bad_Speculation', 'TopDownL1']) class Retiring(MetricBase): level = 1 name = "Retiring" domain = "Slots" area = "RET" desc = ("\n" "This category represents slots fraction utilized by useful\n" "work i.e. issued uops that eventually get retired. Ideally;" "all pipeline slots would be attributed to the Retiring\n" "category. Retiring of 100% would indicate the maximum 4\n" "uops retired per cycle has been achieved. Maximizing\n" "Retiring typically increases the Instruction-Per-Cycle\n" "metric. Note that a high Retiring value does not necessary\n" "mean there is no room for more performance. For example;\n" "Microcode assists are categorized under Retiring. They hurt\n" "performance and can often be avoided. . A high Retiring\n" "value for non-vectorized code may be a good hint for\n" "programmer to consider vectorizing his code. Doing so\n" "essentially lets more computations be done without\n" "significantly increasing number of instructions thus\n" "improving the performance.") metricgroup = frozenset(['TopDownL1']) class BackendBound(MetricBase): level = 1 name = "Backend_Bound" domain = "Slots" area = "BE" desc = ("\n" "This category represents slots fraction where no uops are\n" "being delivered due to a lack of required resources for\n" "accepting new uops in the Backend. Backend is the portion of\n" "the processor core where the out-of-order scheduler\n" "dispatches ready uops into their respective execution units;\n" "and once completed these uops get retired according to\n" "program order. For example; stalls due to data-cache misses\n" "or stalls due to the divider unit being overloaded are both\n" "categorized under Backend Bound. Backend Bound is further\n" "divided into two main categories: Memory Bound and Core\n" "Bound.") metricgroup = frozenset(['TopDownL1']) class ICacheMisses(MetricBase): level = 3 name = "ICache Misses" domain = "Clocks" area = "FE" desc = ("\n" "This metric represents cycles fraction the CPU was stalled\n" "due to instruction cache misses.. Using compiler's Profile-\n" "Guided Optimization (PGO) can reduce i-cache misses through\n" "improved hot code layout.") metricgroup = frozenset(['Frontend_Latency']) class ITLBMisses(MetricBase): level = 3 name = "ITLB_Misses" domain = "Clocks" area = "FE" desc = ("\n" "This metric represents cycles fraction the CPU was stalled\n" "due to instruction TLB misses.. Consider large 2M pages for\n" "code (selectively prefer hot large-size function, due to\n" "limited 2M entries). Linux options: standard binaries use\n" "libhugetlbfs; Hfsort.. https://github.com/libhugetlbfs/libhu\n" "getlbfs;https://research.fb.com/publications/optimizing-\n" "function-placement-for-large-scale-data-center-\n" "applications-2/\n") metricgroup = frozenset(['Frontend_Latency', 'TLB']) class BranchResteers(MetricBase): level = 3 name = "Branch_Resteers" domain = "Clocks" area = "FE" desc = ("\nThis metric represents cycles fraction the CPU was stalled\n" "due to Branch Resteers. Branch Resteers estimates the\n" "Frontend delay in fetching operations from corrected path;\n" "following all sorts of miss-predicted branches. For example;\n" "branchy code with lots of miss-predictions might get\n" "categorized under Branch Resteers. Note the value of this\n" "node may overlap with its siblings.") metricgroup = frozenset(['Bad_Speculation', 'Frontend_Latency']) class MSSwitches(MetricBase): level = 3 name = "MS_Switches" domain = "Clocks" area = "FE" desc = ("\n" "This metric estimates the fraction of cycles when the CPU\n" "was stalled due to switches of uop delivery to the Microcode\n" "Sequencer (MS). Commonly used instructions are optimized for\n" "delivery by the DSB (decoded i-cache) or MITE (legacy\n" "instruction decode) pipelines. Certain operations cannot be\n" "handled natively by the execution pipeline; and must be\n" "performed by microcode (small programs injected into the\n" "execution stream). Switching to the MS too often can\n" "negatively impact performance. The MS is designated to\n" "deliver long uop flows required by CISC instructions like\n" "CPUID; or uncommon conditions like Floating Point Assists\n" "when dealing with Denormals.") metricgroup = frozenset(['Frontend_Latency', 'Microcode_Sequencer']) class IFetchLine(MetricBase): name = "IFetchLine" domain = "Metric" maxval = 1 desc = ("\n" "This metric represents cycles fraction the fetch stalls\n" "due to an instruction cache miss.") metricgroup = frozenset([])
7,887
Python
.py
177
36.016949
76
0.642495
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,909
spr_max_server_ratios.py
andikleen_pmu-tools/spr_max_server_ratios.py
# -*- coding: latin-1 -*- # # auto generated TopDown/TMA 4.8-full-perf description for Intel Xeon Scalable Processors 4th gen MAX (code name Sapphire Rapids) # Please see http://ark.intel.com for more details on these CPUs. # # References: # http://bit.ly/tma-ispass14 # http://halobates.de/blog/p/262 # https://sites.google.com/site/analysismethods/yasin-pubs # https://download.01.org/perfmon/ # https://github.com/andikleen/pmu-tools/wiki/toplev-manual # # Helpers print_error = lambda msg: False smt_enabled = False ebs_mode = False version = "4.8-full-perf" base_frequency = -1.0 Memory = 1 Average_Frequency = 0.0 num_cores = 1 num_threads = 1 num_sockets = 1 topdown_use_fixed = False def handle_error(obj, msg): print_error(msg) obj.errcount += 1 obj.val = 0 obj.thresh = False def handle_error_metric(obj, msg): print_error(msg) obj.errcount += 1 obj.val = 0 # Constants Exe_Ports = 12 Mem_L2_Store_Cost = 10 Mem_STLB_Hit_Cost = 7 MS_Switches_Cost = 3 Avg_Assist_Cost = ( 99 *3 + 63 + 30 ) / 5 Pipeline_Width = 6 OneMillion = 1000000 OneBillion = 1000000000 Energy_Unit = 61 Errata_Whitelist = "ADL038;ADL066" Memory = 2 PMM_App_Direct = 1 if Memory == 1 else 0 HBM = 1 if Memory > 1 else 0 PERF_METRICS_MSR = 1 FP16 = 1 DS = 1 # Aux. formulas def Br_DoI_Jumps(self, EV, level): return EV("BR_INST_RETIRED.NEAR_TAKEN", level) - EV("BR_INST_RETIRED.COND_TAKEN", level) - 2 * EV("BR_INST_RETIRED.NEAR_CALL", level) def Branching_Retired(self, EV, level): return (EV("BR_INST_RETIRED.ALL_BRANCHES", level) + 2 * EV("BR_INST_RETIRED.NEAR_CALL", level) + EV("INST_RETIRED.NOP", level)) / SLOTS(self, EV, level) def Serialize_Core(self, EV, level): return self.Core_Bound.compute(EV) * (self.Serializing_Operation.compute(EV) + EV("RS.EMPTY:u1", level) / CLKS(self, EV, level) * self.Ports_Utilized_0.compute(EV)) / (self.Serializing_Operation.compute(EV) + self.Ports_Utilization.compute(EV) + self.AMX_Busy.compute(EV) + self.Divider.compute(EV)) def Umisp(self, EV, level): return 10 * self.Microcode_Sequencer.compute(EV) * self.Other_Mispredicts.compute(EV) / self.Branch_Mispredicts.compute(EV) def Assist(self, EV, level): return (self.Microcode_Sequencer.compute(EV) / (self.Microcode_Sequencer.compute(EV) + self.Few_Uops_Instructions.compute(EV))) * (self.Assists.compute(EV) / self.Microcode_Sequencer.compute(EV)) def Assist_Frontend(self, EV, level): return (1 - EV("INST_RETIRED.REP_ITERATION", level) / EV("UOPS_RETIRED.MS:c1", level)) * (self.Fetch_Latency.compute(EV) * (self.MS_Switches.compute(EV) + self.Branch_Resteers.compute(EV) * (self.Clears_Resteers.compute(EV) + self.Mispredicts_Resteers.compute(EV) * self.Other_Mispredicts.compute(EV) / self.Branch_Mispredicts.compute(EV)) / (self.Clears_Resteers.compute(EV) + self.Unknown_Branches.compute(EV) + self.Mispredicts_Resteers.compute(EV))) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV))) def Assist_Retired(self, EV, level): return Assist(self, EV, level) * self.Heavy_Operations.compute(EV) def Core_Bound_Cycles(self, EV, level): return self.Ports_Utilized_0.compute(EV) * CLKS(self, EV, level) + Few_Uops_Executed_Threshold(self, EV, level) def DurationTimeInSeconds(self, EV, level): return EV("interval-ms", 0) / 1000 def Execute_Cycles(self, EV, level): return (EV("UOPS_EXECUTED.CORE_CYCLES_GE_1", level) / 2) if smt_enabled else EV("UOPS_EXECUTED.THREAD:c1", level) # factor used for metrics associating fixed costs for FB Hits - according to probability theory if all FB Hits come at a random rate in original L1_Miss cost interval then the average cost for each one is 0.5 of the fixed cost def FB_Factor(self, EV, level): return 1 + FBHit_per_L1Miss(self, EV, level) / 2 def FBHit_per_L1Miss(self, EV, level): return EV("MEM_LOAD_RETIRED.FB_HIT", level) / EV("MEM_LOAD_RETIRED.L1_MISS", level) def Fetched_Uops(self, EV, level): return EV("UOPS_ISSUED.ANY", level) def Few_Uops_Executed_Threshold(self, EV, level): return EV("EXE_ACTIVITY.1_PORTS_UTIL", level) + self.Retiring.compute(EV) * EV("EXE_ACTIVITY.2_PORTS_UTIL:u0xc", level) # Floating Point computational (arithmetic) Operations Count def FLOP_Count(self, EV, level): return EV("FP_ARITH_INST_RETIRED.SCALAR", level) + 2 * EV("FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", level) + 4 * EV("FP_ARITH_INST_RETIRED.4_FLOPS", level) + 8 * EV("FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE", level) # Floating Point computational (arithmetic) Operations Count def FP_Arith_Scalar(self, EV, level): EV("FP_ARITH_INST_RETIRED2.SCALAR", level) EV("FP_ARITH_INST_RETIRED.SCALAR", level) return EV("FP_ARITH_INST_RETIRED.SCALAR", level) + EV("FP_ARITH_INST_RETIRED2.SCALAR", level) if FP16 else EV("FP_ARITH_INST_RETIRED.SCALAR", level) # Floating Point computational (arithmetic) Operations Count def FP_Arith_Vector(self, EV, level): EV("FP_ARITH_INST_RETIRED.VECTOR", level) EV("FP_ARITH_INST_RETIRED2.VECTOR", level) return EV("FP_ARITH_INST_RETIRED.VECTOR", level) + EV("FP_ARITH_INST_RETIRED2.VECTOR", level) if FP16 else EV("FP_ARITH_INST_RETIRED.VECTOR", level) def HighIPC(self, EV, level): val = IPC(self, EV, level) / Pipeline_Width return val def Light_Ops_Sum(self, EV, level): return self.FP_Arith.compute(EV) + self.Int_Operations.compute(EV) + self.Memory_Operations.compute(EV) + self.Fused_Instructions.compute(EV) + self.Non_Fused_Branches.compute(EV) def LOAD_L3_HIT(self, EV, level): return EV("MEM_LOAD_RETIRED.L3_HIT", level) * FB_Factor(self, EV, level) def LOAD_LCL_MEM(self, EV, level): return EV("MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM", level) * (1 + FBHit_per_L1Miss(self, EV, level)) def LOAD_LCL_PMM(self, EV, level): EV("MEM_LOAD_RETIRED.LOCAL_PMM", level) return EV("MEM_LOAD_RETIRED.LOCAL_PMM", level) * (1 + FBHit_per_L1Miss(self, EV, level)) if DS else 0 def LOAD_RMT_FWD(self, EV, level): EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD", level) return EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD", level) * (1 + FBHit_per_L1Miss(self, EV, level)) if DS else 0 def LOAD_RMT_HITM(self, EV, level): EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM", level) return EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM", level) * (1 + FBHit_per_L1Miss(self, EV, level)) if DS else 0 def LOAD_RMT_MEM(self, EV, level): EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM", level) return EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM", level) * (1 + FBHit_per_L1Miss(self, EV, level)) if DS else 0 def LOAD_RMT_PMM(self, EV, level): EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM", level) return EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM", level) * (1 + FBHit_per_L1Miss(self, EV, level)) if DS else 0 def LOAD_XSNP_HIT(self, EV, level): return EV("MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD", level) + EV("MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD", level) * (1 - True_XSNP_HitM_Fraction(self, EV, level)) def LOAD_XSNP_HITM(self, EV, level): return EV("MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD", level) * True_XSNP_HitM_Fraction(self, EV, level) def LOAD_XSNP_MISS(self, EV, level): return EV("MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS", level) def MEM_Bound_Ratio(self, EV, level): return EV("MEMORY_ACTIVITY.STALLS_L3_MISS", level) / CLKS(self, EV, level) def Mem_DDR_Hit_Fraction(self, EV, level): return (19 * LOAD_RMT_MEM(self, EV, level) + 10 *(LOAD_LCL_MEM(self, EV, level) + LOAD_RMT_FWD(self, EV, level) + LOAD_RMT_HITM(self, EV, level))) / ((19 * LOAD_RMT_MEM(self, EV, level) + 10 *(LOAD_LCL_MEM(self, EV, level) + LOAD_RMT_FWD(self, EV, level) + LOAD_RMT_HITM(self, EV, level))) + (25 * LOAD_LCL_PMM(self, EV, level) + 33 * LOAD_RMT_PMM(self, EV, level))) if DS else 1 def Mem_Lock_St_Fraction(self, EV, level): return EV("MEM_INST_RETIRED.LOCK_LOADS", level) / EV("MEM_INST_RETIRED.ALL_STORES", level) def Mispred_Clears_Fraction(self, EV, level): return self.Branch_Mispredicts.compute(EV) / self.Bad_Speculation.compute(EV) def ORO_Demand_RFO_C1(self, EV, level): return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO", level)) , level ) def ORO_DRD_Any_Cycles(self, EV, level): return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", level)) , level ) def ORO_DRD_BW_Cycles(self, EV, level): return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD:c4", level)) , level ) def Store_L2_Hit_Cycles(self, EV, level): return EV("MEM_STORE_RETIRED.L2_HIT", level) * Mem_L2_Store_Cost *(1 - Mem_Lock_St_Fraction(self, EV, level)) def True_XSNP_HitM_Fraction(self, EV, level): return EV("OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM", level) / (EV("OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM", level) + EV("OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD", level)) def Mem_XSNP_HitM_Cost(self, EV, level): return 28 * Core_Frequency(self, EV, level) def Mem_XSNP_Hit_Cost(self, EV, level): return 27 * Core_Frequency(self, EV, level) def Mem_XSNP_None_Cost(self, EV, level): return 12 * Core_Frequency(self, EV, level) def Mem_Local_DRAM_Cost(self, EV, level): return 109 * Core_Frequency(self, EV, level) def Mem_Remote_DRAM_Cost(self, EV, level): return 190 * Core_Frequency(self, EV, level) def Mem_Remote_HitM_Cost(self, EV, level): return 170 * Core_Frequency(self, EV, level) def Mem_Remote_Fwd_Cost(self, EV, level): return 170 * Core_Frequency(self, EV, level) def Mem_L2_Hit_Cost(self, EV, level): return 3 * Core_Frequency(self, EV, level) def PERF_METRICS_SUM(self, EV, level): return (EV("PERF_METRICS.FRONTEND_BOUND", level) / EV("TOPDOWN.SLOTS", level)) + (EV("PERF_METRICS.BAD_SPECULATION", level) / EV("TOPDOWN.SLOTS", level)) + (EV("PERF_METRICS.RETIRING", level) / EV("TOPDOWN.SLOTS", level)) + (EV("PERF_METRICS.BACKEND_BOUND", level) / EV("TOPDOWN.SLOTS", level)) def Retire_Fraction(self, EV, level): return EV("UOPS_RETIRED.SLOTS", level) / EV("UOPS_ISSUED.ANY", level) # Retired slots per Logical Processor def Retired_Slots(self, EV, level): return self.Retiring.compute(EV) * SLOTS(self, EV, level) # Number of logical processors (enabled or online) on the target system def Num_CPUs(self, EV, level): return num_cores * num_threads if num_cores else(8 + 16 /(2 - smt_enabled)) # A system parameter for dependent-loads (pointer chasing like access pattern) of the workload. An integer fraction in range from 0 (no dependent loads) to 100 (all loads are dependent loads) def Dependent_Loads_Weight(self, EV, level): return 20 # Total pipeline cost of Branch Misprediction related bottlenecks def Mispredictions(self, EV, level): val = 100 *(1 - Umisp(self, EV, level)) * (self.Branch_Mispredicts.compute(EV) + self.Fetch_Latency.compute(EV) * self.Mispredicts_Resteers.compute(EV) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV))) self.thresh = (val > 20) return val # Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses) def Big_Code(self, EV, level): val = 100 * self.Fetch_Latency.compute(EV) * (self.ITLB_Misses.compute(EV) + self.ICache_Misses.compute(EV) + self.Unknown_Branches.compute(EV)) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV)) self.thresh = (val > 20) return val # Total pipeline cost of instruction fetch bandwidth related bottlenecks (when the front-end could not sustain operations delivery to the back-end) def Instruction_Fetch_BW(self, EV, level): val = 100 *(self.Frontend_Bound.compute(EV) - (1 - Umisp(self, EV, level)) * self.Fetch_Latency.compute(EV) * self.Mispredicts_Resteers.compute(EV) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV)) - Assist_Frontend(self, EV, level)) - Big_Code(self, EV, level) self.thresh = (val > 20) return val # Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks def Cache_Memory_Bandwidth(self, EV, level): val = (100 *((self.Memory_Bound.compute(EV) * (self.DRAM_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.HBM_Bound.compute(EV))) * (self.MEM_Bandwidth.compute(EV) / (self.MEM_Latency.compute(EV) + self.MEM_Bandwidth.compute(EV)))) + (self.Memory_Bound.compute(EV) * self.HBM_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.HBM_Bound.compute(EV))) * self.MEM_Bandwidth.compute(EV) / (self.MEM_Latency.compute(EV) + self.MEM_Bandwidth.compute(EV)) + (self.Memory_Bound.compute(EV) * (self.L3_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.HBM_Bound.compute(EV))) * (self.SQ_Full.compute(EV) / (self.L3_Hit_Latency.compute(EV) + self.Contested_Accesses.compute(EV) + self.SQ_Full.compute(EV) + self.Data_Sharing.compute(EV)))) + (self.Memory_Bound.compute(EV) * (self.L1_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.HBM_Bound.compute(EV))) * (self.FB_Full.compute(EV) / (self.Store_Fwd_Blk.compute(EV) + self.L1_Hit_Latency.compute(EV) + self.DTLB_Load.compute(EV) + self.Lock_Latency.compute(EV) + self.Split_Loads.compute(EV) + self.FB_Full.compute(EV)))))) self.thresh = (val > 20) return val # Total pipeline cost of external Memory- or Cache-Latency related bottlenecks def Cache_Memory_Latency(self, EV, level): val = 100 *((self.Memory_Bound.compute(EV) * (self.DRAM_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.HBM_Bound.compute(EV))) * (self.MEM_Latency.compute(EV) / (self.MEM_Latency.compute(EV) + self.MEM_Bandwidth.compute(EV)))) + (self.Memory_Bound.compute(EV) * self.HBM_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.HBM_Bound.compute(EV))) * self.MEM_Latency.compute(EV) / (self.MEM_Latency.compute(EV) + self.MEM_Bandwidth.compute(EV)) + (self.Memory_Bound.compute(EV) * (self.L3_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.HBM_Bound.compute(EV))) * (self.L3_Hit_Latency.compute(EV) / (self.L3_Hit_Latency.compute(EV) + self.Contested_Accesses.compute(EV) + self.SQ_Full.compute(EV) + self.Data_Sharing.compute(EV)))) + (self.Memory_Bound.compute(EV) * self.L2_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.HBM_Bound.compute(EV))) + (self.Memory_Bound.compute(EV) * (self.Store_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.HBM_Bound.compute(EV))) * (self.Store_Latency.compute(EV) / (self.Split_Stores.compute(EV) + self.DTLB_Store.compute(EV) + self.Streaming_Stores.compute(EV) + self.Store_Latency.compute(EV) + self.False_Sharing.compute(EV))))) self.thresh = (val > 20) return val # Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs) def Memory_Data_TLBs(self, EV, level): val = 100 *(self.Memory_Bound.compute(EV) * (self.L1_Bound.compute(EV) / max(self.Memory_Bound.compute(EV) , (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.HBM_Bound.compute(EV)))) * (self.DTLB_Load.compute(EV) / max(self.L1_Bound.compute(EV) , (self.Store_Fwd_Blk.compute(EV) + self.L1_Hit_Latency.compute(EV) + self.DTLB_Load.compute(EV) + self.Lock_Latency.compute(EV) + self.Split_Loads.compute(EV) + self.FB_Full.compute(EV)))) + (self.Memory_Bound.compute(EV) * (self.Store_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.HBM_Bound.compute(EV))) * (self.DTLB_Store.compute(EV) / (self.Split_Stores.compute(EV) + self.DTLB_Store.compute(EV) + self.Streaming_Stores.compute(EV) + self.Store_Latency.compute(EV) + self.False_Sharing.compute(EV))))) self.thresh = (val > 20) return val # Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors) def Memory_Synchronization(self, EV, level): val = 100 *(self.Memory_Bound.compute(EV) * ((self.L3_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.HBM_Bound.compute(EV))) * (self.Contested_Accesses.compute(EV) + self.Data_Sharing.compute(EV)) / (self.L3_Hit_Latency.compute(EV) + self.Contested_Accesses.compute(EV) + self.SQ_Full.compute(EV) + self.Data_Sharing.compute(EV)) + (self.Store_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.HBM_Bound.compute(EV))) * self.False_Sharing.compute(EV) / ((self.Split_Stores.compute(EV) + self.DTLB_Store.compute(EV) + self.Streaming_Stores.compute(EV) + self.Store_Latency.compute(EV) + self.False_Sharing.compute(EV)) - self.Store_Latency.compute(EV))) + self.Machine_Clears.compute(EV) * (1 - self.Other_Nukes.compute(EV) / (self.Other_Nukes.compute(EV)))) self.thresh = (val > 10) return val # Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy. def Compute_Bound_Est(self, EV, level): val = 100 *((self.Core_Bound.compute(EV) * self.Divider.compute(EV) / (self.Serializing_Operation.compute(EV) + self.Ports_Utilization.compute(EV) + self.AMX_Busy.compute(EV) + self.Divider.compute(EV))) + (self.Core_Bound.compute(EV) * self.AMX_Busy.compute(EV) / (self.Serializing_Operation.compute(EV) + self.Ports_Utilization.compute(EV) + self.AMX_Busy.compute(EV) + self.Divider.compute(EV))) + (self.Core_Bound.compute(EV) * (self.Ports_Utilization.compute(EV) / (self.Serializing_Operation.compute(EV) + self.Ports_Utilization.compute(EV) + self.AMX_Busy.compute(EV) + self.Divider.compute(EV))) * (self.Ports_Utilized_3m.compute(EV) / (self.Ports_Utilized_0.compute(EV) + self.Ports_Utilized_1.compute(EV) + self.Ports_Utilized_2.compute(EV) + self.Ports_Utilized_3m.compute(EV))))) self.thresh = (val > 20) return val # Total pipeline cost of irregular execution (e.g. FP-assists in HPC, Wait time with work imbalance multithreaded workloads, overhead in system services or virtualized environments) def Irregular_Overhead(self, EV, level): val = 100 *(Assist_Frontend(self, EV, level) + Umisp(self, EV, level) * self.Branch_Mispredicts.compute(EV) + (self.Machine_Clears.compute(EV) * self.Other_Nukes.compute(EV) / (self.Other_Nukes.compute(EV))) + Serialize_Core(self, EV, level) + Assist_Retired(self, EV, level)) self.thresh = (val > 10) return val # Total pipeline cost of remaining bottlenecks in the back-end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls. def Other_Bottlenecks(self, EV, level): val = 100 -(Big_Code(self, EV, level) + Instruction_Fetch_BW(self, EV, level) + Mispredictions(self, EV, level) + Cache_Memory_Bandwidth(self, EV, level) + Cache_Memory_Latency(self, EV, level) + Memory_Data_TLBs(self, EV, level) + Memory_Synchronization(self, EV, level) + Compute_Bound_Est(self, EV, level) + Irregular_Overhead(self, EV, level) + Branching_Overhead(self, EV, level) + Useful_Work(self, EV, level)) self.thresh = (val > 20) return val # Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA. Examples include function calls; loops and alignments. (A lower bound). Consider Loop Unrolling or function inlining optimizations def Branching_Overhead(self, EV, level): val = 100 * Branching_Retired(self, EV, level) self.thresh = (val > 5) return val # Total pipeline cost of "useful operations" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead. def Useful_Work(self, EV, level): val = 100 *(self.Retiring.compute(EV) - Branching_Retired(self, EV, level) - Assist_Retired(self, EV, level)) self.thresh = (val > 20) return val # Probability of Core Bound bottleneck hidden by SMT-profiling artifacts. Tip: consider analysis with SMT disabled def Core_Bound_Likely(self, EV, level): val = 100 *(1 - self.Core_Bound.compute(EV) / self.Ports_Utilization.compute(EV) if self.Core_Bound.compute(EV)< self.Ports_Utilization.compute(EV) else 1) if SMT_2T_Utilization(self, EV, level)> 0.5 else 0 self.thresh = (val > 0.5) return val # Instructions Per Cycle (per Logical Processor) def IPC(self, EV, level): return EV("INST_RETIRED.ANY", level) / CLKS(self, EV, level) # Uops Per Instruction def UopPI(self, EV, level): val = Retired_Slots(self, EV, level) / EV("INST_RETIRED.ANY", level) self.thresh = (val > 1.05) return val # Uops per taken branch def UpTB(self, EV, level): val = Retired_Slots(self, EV, level) / EV("BR_INST_RETIRED.NEAR_TAKEN", level) self.thresh = val < Pipeline_Width * 1.5 return val # Cycles Per Instruction (per Logical Processor) def CPI(self, EV, level): return 1 / IPC(self, EV, level) # Per-Logical Processor actual clocks when the Logical Processor is active. def CLKS(self, EV, level): return EV("CPU_CLK_UNHALTED.THREAD", level) # Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward) def SLOTS(self, EV, level): return EV("TOPDOWN.SLOTS", level) if topdown_use_fixed else EV("TOPDOWN.SLOTS", level) # Fraction of Physical Core issue-slots utilized by this Logical Processor def Slots_Utilization(self, EV, level): return SLOTS(self, EV, level) / (EV("TOPDOWN.SLOTS:percore", level) / 2) if smt_enabled else 1 # The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of "execute" at rename stage. def Execute_per_Issue(self, EV, level): return EV("UOPS_EXECUTED.THREAD", level) / EV("UOPS_ISSUED.ANY", level) # Instructions Per Cycle across hyper-threads (per physical core) def CoreIPC(self, EV, level): return EV("INST_RETIRED.ANY", level) / CORE_CLKS(self, EV, level) # Floating Point Operations Per Cycle def FLOPc(self, EV, level): return FLOP_Count(self, EV, level) / CORE_CLKS(self, EV, level) # Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to Fused-Multiply Add use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common. def FP_Arith_Utilization(self, EV, level): return (EV("FP_ARITH_DISPATCHED.PORT_0", level) + EV("FP_ARITH_DISPATCHED.PORT_1", level) + EV("FP_ARITH_DISPATCHED.PORT_5", level)) / (2 * CORE_CLKS(self, EV, level)) # Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor) def ILP(self, EV, level): return EV("UOPS_EXECUTED.THREAD", level) / EV("UOPS_EXECUTED.THREAD:c1", level) # uops Executed per Cycle def EPC(self, EV, level): return EV("UOPS_EXECUTED.THREAD", level) / CLKS(self, EV, level) # Core actual clocks when any Logical Processor is active on the Physical Core def CORE_CLKS(self, EV, level): return EV("CPU_CLK_UNHALTED.DISTRIBUTED", level) if smt_enabled else CLKS(self, EV, level) # Instructions per Load (lower number means higher occurrence rate). Tip: reduce memory accesses. #Link Opt Guide section: Minimize Register Spills def IpLoad(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("MEM_INST_RETIRED.ALL_LOADS", level) self.thresh = (val < 3) return val # Instructions per Store (lower number means higher occurrence rate). Tip: reduce memory accesses. #Link Opt Guide section: Minimize Register Spills def IpStore(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("MEM_INST_RETIRED.ALL_STORES", level) self.thresh = (val < 8) return val # Instructions per Branch (lower number means higher occurrence rate) def IpBranch(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.ALL_BRANCHES", level) self.thresh = (val < 8) return val # Instructions per (near) call (lower number means higher occurrence rate) def IpCall(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.NEAR_CALL", level) self.thresh = (val < 200) return val # Instructions per taken branch def IpTB(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.NEAR_TAKEN", level) self.thresh = val < Pipeline_Width * 2 + 1 return val # Branch instructions per taken branch. . Can be used to approximate PGO-likelihood for non-loopy codes. def BpTkBranch(self, EV, level): return EV("BR_INST_RETIRED.ALL_BRANCHES", level) / EV("BR_INST_RETIRED.NEAR_TAKEN", level) # Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate). Reference: Tuning Performance via Metrics with Expectations. https://doi.org/10.1109/LCA.2019.2916408 def IpFLOP(self, EV, level): val = EV("INST_RETIRED.ANY", level) / FLOP_Count(self, EV, level) self.thresh = (val < 10) return val # Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW. def IpArith(self, EV, level): val = EV("INST_RETIRED.ANY", level) / (FP_Arith_Scalar(self, EV, level) + FP_Arith_Vector(self, EV, level)) self.thresh = (val < 10) return val # Instructions per FP Arithmetic Scalar Half-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. def IpArith_Scalar_HP(self, EV, level): EV("FP_ARITH_INST_RETIRED2.SCALAR", level) EV("INST_RETIRED.ANY", level) val = EV("INST_RETIRED.ANY", level) / EV("FP_ARITH_INST_RETIRED2.SCALAR", level) if FP16 else 0 self.thresh = (val < 10) return val # Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. def IpArith_Scalar_SP(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("FP_ARITH_INST_RETIRED.SCALAR_SINGLE", level) self.thresh = (val < 10) return val # Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. def IpArith_Scalar_DP(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("FP_ARITH_INST_RETIRED.SCALAR_DOUBLE", level) self.thresh = (val < 10) return val # Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. def IpArith_AVX128(self, EV, level): EV("FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE", level) EV("FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", level) EV("INST_RETIRED.ANY", level) EV("FP_ARITH_INST_RETIRED2.128B_PACKED_HALF", level) val = EV("INST_RETIRED.ANY", level) / (EV("FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", level) + EV("FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE", level) + EV("FP_ARITH_INST_RETIRED2.128B_PACKED_HALF", level)) if FP16 else EV("INST_RETIRED.ANY", level) / (EV("FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", level) + EV("FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE", level)) self.thresh = (val < 10) return val # Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. def IpArith_AVX256(self, EV, level): EV("FP_ARITH_INST_RETIRED2.256B_PACKED_HALF", level) EV("FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE", level) EV("INST_RETIRED.ANY", level) EV("FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE", level) val = EV("INST_RETIRED.ANY", level) / (EV("FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE", level) + EV("FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE", level) + EV("FP_ARITH_INST_RETIRED2.256B_PACKED_HALF", level)) if FP16 else EV("INST_RETIRED.ANY", level) / (EV("FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE", level) + EV("FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE", level)) self.thresh = (val < 10) return val # Instructions per PAUSE (lower number means higher occurrence rate) def IpPause(self, EV, level): return Instructions(self, EV, level) / EV("CPU_CLK_UNHALTED.PAUSE_INST", level) # Instructions per Software prefetch instruction (of any type: NTA/T0/T1/T2/Prefetch) (lower number means higher occurrence rate) def IpSWPF(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("SW_PREFETCH_ACCESS.T0:u0xF", level) self.thresh = (val < 100) return val # Total number of retired Instructions def Instructions(self, EV, level): return EV("INST_RETIRED.ANY", level) # Average number of Uops retired in cycles where at least one uop has retired. def Retire(self, EV, level): return Retired_Slots(self, EV, level) / EV("UOPS_RETIRED.SLOTS:c1", level) # Estimated fraction of retirement-cycles dealing with repeat instructions def Strings_Cycles(self, EV, level): val = EV("INST_RETIRED.REP_ITERATION", level) / EV("UOPS_RETIRED.SLOTS:c1", level) self.thresh = (val > 0.1) return val # Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate) def IpAssist(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("ASSISTS.ANY", level) self.thresh = (val < 100000) return val def Execute(self, EV, level): return EV("UOPS_EXECUTED.THREAD", level) / Execute_Cycles(self, EV, level) # Average number of uops fetched from LSD per cycle def Fetch_LSD(self, EV, level): return EV("LSD.UOPS", level) / EV("LSD.CYCLES_ACTIVE", level) # Average number of uops fetched from DSB per cycle def Fetch_DSB(self, EV, level): return EV("IDQ.DSB_UOPS", level) / EV("IDQ.DSB_CYCLES_ANY", level) # Average number of uops fetched from MITE per cycle def Fetch_MITE(self, EV, level): return EV("IDQ.MITE_UOPS", level) / EV("IDQ.MITE_CYCLES_ANY", level) # Average number of Uops issued by front-end when it issued something def Fetch_UpC(self, EV, level): return EV("UOPS_ISSUED.ANY", level) / EV("UOPS_ISSUED.ANY:c1", level) # Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). See section 'Decoded ICache' in Optimization Manual. http://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-optimization-manual.html def DSB_Coverage(self, EV, level): val = EV("IDQ.DSB_UOPS", level) / Fetched_Uops(self, EV, level) self.thresh = (val < 0.7) and HighIPC(self, EV, 1) return val # Average number of cycles the front-end was delayed due to an Unknown Branch detection. See Unknown_Branches node. def Unknown_Branch_Cost(self, EV, level): return EV("INT_MISC.UNKNOWN_BRANCH_CYCLES", level) / EV("INT_MISC.UNKNOWN_BRANCH_CYCLES:c1:e1", level) # Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details. def DSB_Switch_Cost(self, EV, level): return EV("DSB2MITE_SWITCHES.PENALTY_CYCLES", level) / EV("DSB2MITE_SWITCHES.PENALTY_CYCLES:c1:e1", level) # Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck. def DSB_Misses(self, EV, level): val = 100 *(self.Fetch_Latency.compute(EV) * self.DSB_Switches.compute(EV) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV)) + self.Fetch_Bandwidth.compute(EV) * self.MITE.compute(EV) / (self.MITE.compute(EV) + self.DSB.compute(EV))) self.thresh = (val > 10) return val # Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck. def DSB_Bandwidth(self, EV, level): val = 100 *(self.Frontend_Bound.compute(EV) * (self.Fetch_Bandwidth.compute(EV) / (self.Fetch_Bandwidth.compute(EV) + self.Fetch_Latency.compute(EV))) * (self.DSB.compute(EV) / (self.MITE.compute(EV) + self.DSB.compute(EV)))) self.thresh = (val > 10) return val # Average Latency for L1 instruction cache misses def ICache_Miss_Latency(self, EV, level): return EV("ICACHE_DATA.STALLS", level) / EV("ICACHE_DATA.STALLS:c1:e1", level) # Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck. def IC_Misses(self, EV, level): val = 100 *(self.Fetch_Latency.compute(EV) * self.ICache_Misses.compute(EV) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV))) self.thresh = (val > 5) return val # Instructions per non-speculative DSB miss (lower number means higher occurrence rate) def IpDSB_Miss_Ret(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("FRONTEND_RETIRED.ANY_DSB_MISS", level) self.thresh = (val < 50) return val # Instructions per speculative Unknown Branch Misprediction (BAClear) (lower number means higher occurrence rate) def IpUnknown_Branch(self, EV, level): return Instructions(self, EV, level) / EV("BACLEARS.ANY", level) # L2 cache true code cacheline misses per kilo instruction def L2MPKI_Code(self, EV, level): return 1000 * EV("FRONTEND_RETIRED.L2_MISS", level) / EV("INST_RETIRED.ANY", level) # L2 cache speculative code cacheline misses per kilo instruction def L2MPKI_Code_All(self, EV, level): return 1000 * EV("L2_RQSTS.CODE_RD_MISS", level) / EV("INST_RETIRED.ANY", level) # Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate) def IpMispredict(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_MISP_RETIRED.ALL_BRANCHES", level) self.thresh = (val < 200) return val # Instructions per retired Mispredicts for conditional non-taken branches (lower number means higher occurrence rate). def IpMisp_Cond_Ntaken(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_MISP_RETIRED.COND_NTAKEN", level) self.thresh = (val < 200) return val # Instructions per retired Mispredicts for conditional taken branches (lower number means higher occurrence rate). def IpMisp_Cond_Taken(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_MISP_RETIRED.COND_TAKEN", level) self.thresh = (val < 200) return val # Instructions per retired Mispredicts for return branches (lower number means higher occurrence rate). def IpMisp_Ret(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_MISP_RETIRED.RET", level) self.thresh = (val < 500) return val # Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate). def IpMisp_Indirect(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_MISP_RETIRED.INDIRECT", level) self.thresh = (val < 1000) return val # Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear) def Branch_Misprediction_Cost(self, EV, level): return Mispredictions(self, EV, level) * SLOTS(self, EV, level) / EV("BR_MISP_RETIRED.ALL_BRANCHES", level) / 100 # Speculative to Retired ratio of all clears (covering Mispredicts and nukes) def Spec_Clears_Ratio(self, EV, level): return EV("INT_MISC.CLEARS_COUNT", level) / (EV("BR_MISP_RETIRED.ALL_BRANCHES", level) + EV("MACHINE_CLEARS.COUNT", level)) # Fraction of branches that are non-taken conditionals def Cond_NT(self, EV, level): return EV("BR_INST_RETIRED.COND_NTAKEN", level) / EV("BR_INST_RETIRED.ALL_BRANCHES", level) # Fraction of branches that are taken conditionals def Cond_TK(self, EV, level): return EV("BR_INST_RETIRED.COND_TAKEN", level) / EV("BR_INST_RETIRED.ALL_BRANCHES", level) # Fraction of branches that are CALL or RET def CallRet(self, EV, level): return (EV("BR_INST_RETIRED.NEAR_CALL", level) + EV("BR_INST_RETIRED.NEAR_RETURN", level)) / EV("BR_INST_RETIRED.ALL_BRANCHES", level) # Fraction of branches that are unconditional (direct or indirect) jumps def Jump(self, EV, level): return Br_DoI_Jumps(self, EV, level) / EV("BR_INST_RETIRED.ALL_BRANCHES", level) # Fraction of branches of other types (not individually covered by other metrics in Info.Branches group) def Other_Branches(self, EV, level): return 1 -(Cond_NT(self, EV, level) + Cond_TK(self, EV, level) + CallRet(self, EV, level) + Jump(self, EV, level)) # Actual Average Latency for L1 data-cache miss demand load operations (in core cycles) def Load_Miss_Real_Latency(self, EV, level): return EV("L1D_PEND_MISS.PENDING", level) / EV("MEM_LOAD_COMPLETED.L1_MISS_ANY", level) # Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor) def MLP(self, EV, level): return EV("L1D_PEND_MISS.PENDING", level) / EV("L1D_PEND_MISS.PENDING_CYCLES", level) # L1 cache true misses per kilo instruction for retired demand loads def L1MPKI(self, EV, level): return 1000 * EV("MEM_LOAD_RETIRED.L1_MISS", level) / EV("INST_RETIRED.ANY", level) # L1 cache true misses per kilo instruction for all demand loads (including speculative) def L1MPKI_Load(self, EV, level): return 1000 * EV("L2_RQSTS.ALL_DEMAND_DATA_RD", level) / EV("INST_RETIRED.ANY", level) # L2 cache true misses per kilo instruction for retired demand loads def L2MPKI(self, EV, level): return 1000 * EV("MEM_LOAD_RETIRED.L2_MISS", level) / EV("INST_RETIRED.ANY", level) # L2 cache true misses per kilo instruction for all request types (including speculative) def L2MPKI_All(self, EV, level): return 1000 * EV("L2_RQSTS.MISS", level) / EV("INST_RETIRED.ANY", level) # L2 cache true misses per kilo instruction for all demand loads (including speculative) def L2MPKI_Load(self, EV, level): return 1000 * EV("L2_RQSTS.DEMAND_DATA_RD_MISS", level) / EV("INST_RETIRED.ANY", level) # Offcore requests (L2 cache miss) per kilo instruction for demand RFOs def L2MPKI_RFO(self, EV, level): return 1000 * EV("L2_RQSTS.RFO_MISS", level) / EV("INST_RETIRED.ANY", level) # L2 cache hits per kilo instruction for all request types (including speculative) def L2HPKI_All(self, EV, level): return 1000 *(EV("L2_RQSTS.REFERENCES", level) - EV("L2_RQSTS.MISS", level)) / EV("INST_RETIRED.ANY", level) # L2 cache hits per kilo instruction for all demand loads (including speculative) def L2HPKI_Load(self, EV, level): return 1000 * EV("L2_RQSTS.DEMAND_DATA_RD_HIT", level) / EV("INST_RETIRED.ANY", level) # L3 cache true misses per kilo instruction for retired demand loads def L3MPKI(self, EV, level): return 1000 * EV("MEM_LOAD_RETIRED.L3_MISS", level) / EV("INST_RETIRED.ANY", level) # Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries) def FB_HPKI(self, EV, level): return 1000 * EV("MEM_LOAD_RETIRED.FB_HIT", level) / EV("INST_RETIRED.ANY", level) def L1D_Cache_Fill_BW(self, EV, level): return 64 * EV("L1D.REPLACEMENT", level) / OneBillion / Time(self, EV, level) def L2_Cache_Fill_BW(self, EV, level): return 64 * EV("L2_LINES_IN.ALL", level) / OneBillion / Time(self, EV, level) def L3_Cache_Fill_BW(self, EV, level): return 64 * EV("LONGEST_LAT_CACHE.MISS", level) / OneBillion / Time(self, EV, level) def L3_Cache_Access_BW(self, EV, level): return 64 * EV("OFFCORE_REQUESTS.ALL_REQUESTS", level) / OneBillion / Time(self, EV, level) # Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses def Page_Walks_Utilization(self, EV, level): val = (EV("ITLB_MISSES.WALK_PENDING", level) + EV("DTLB_LOAD_MISSES.WALK_PENDING", level) + EV("DTLB_STORE_MISSES.WALK_PENDING", level)) / (4 * CORE_CLKS(self, EV, level)) self.thresh = (val > 0.5) return val # STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk) def Code_STLB_MPKI(self, EV, level): return 1000 * EV("ITLB_MISSES.WALK_COMPLETED", level) / EV("INST_RETIRED.ANY", level) # STLB (2nd level TLB) data load speculative misses per kilo instruction (misses of any page-size that complete the page walk) def Load_STLB_MPKI(self, EV, level): return 1000 * EV("DTLB_LOAD_MISSES.WALK_COMPLETED", level) / EV("INST_RETIRED.ANY", level) # STLB (2nd level TLB) data store speculative misses per kilo instruction (misses of any page-size that complete the page walk) def Store_STLB_MPKI(self, EV, level): return 1000 * EV("DTLB_STORE_MISSES.WALK_COMPLETED", level) / EV("INST_RETIRED.ANY", level) # Average per-core data fill bandwidth to the L1 data cache [GB / sec] def L1D_Cache_Fill_BW_2T(self, EV, level): return L1D_Cache_Fill_BW(self, EV, level) # Average per-core data fill bandwidth to the L2 cache [GB / sec] def L2_Cache_Fill_BW_2T(self, EV, level): return L2_Cache_Fill_BW(self, EV, level) # Average per-core data fill bandwidth to the L3 cache [GB / sec] def L3_Cache_Fill_BW_2T(self, EV, level): return L3_Cache_Fill_BW(self, EV, level) # Average per-core data access bandwidth to the L3 cache [GB / sec] def L3_Cache_Access_BW_2T(self, EV, level): return L3_Cache_Access_BW(self, EV, level) # Rate of silent evictions from the L2 cache per Kilo instruction where the evicted lines are dropped (no writeback to L3 or memory) def L2_Evictions_Silent_PKI(self, EV, level): return 1000 * EV("L2_LINES_OUT.SILENT", level) / Instructions(self, EV, level) # Rate of non silent evictions from the L2 cache per Kilo instruction def L2_Evictions_NonSilent_PKI(self, EV, level): return 1000 * EV("L2_LINES_OUT.NON_SILENT", level) / Instructions(self, EV, level) # Average Latency for L2 cache miss demand Loads def Load_L2_Miss_Latency(self, EV, level): return EV("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD", level) / EV("OFFCORE_REQUESTS.DEMAND_DATA_RD", level) # Average Latency for L3 cache miss demand Loads def Load_L3_Miss_Latency(self, EV, level): return EV("OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD", level) / EV("OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD", level) # Average Parallel L2 cache miss demand Loads def Load_L2_MLP(self, EV, level): return EV("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD", level) / EV("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD:c1", level) # Average Parallel L2 cache miss data reads def Data_L2_MLP(self, EV, level): return EV("OFFCORE_REQUESTS_OUTSTANDING.DATA_RD", level) / EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", level) # Off-core accesses per kilo instruction for reads-to-core requests (speculative; including in-core HW prefetches) def Offcore_Read_Any_PKI(self, EV, level): return 1000 * EV("OCR.READS_TO_CORE.ANY_RESPONSE", level) / Instructions(self, EV, level) # L3 cache misses per kilo instruction for reads-to-core requests (speculative; including in-core HW prefetches) def Offcore_Read_L3M_PKI(self, EV, level): return 1000 * EV("OCR.READS_TO_CORE.L3_MISS", level) / Instructions(self, EV, level) # High-Bandwidth Memory (HBM) accesses per kilo instruction for reads-to-core requests (speculative; including in-core HW prefetches) def Offcore_Read_HBM_PKI(self, EV, level): EV("OCR.DEMAND_DATA_RD.PMM", level) return Offcore_Read_L3M_PKI(self, EV, level) if Memory > 2 else 1000 * EV("OCR.DEMAND_DATA_RD.PMM", level) / Instructions(self, EV, level) # Off-core accesses per kilo instruction for modified write requests def Offcore_MWrite_Any_PKI(self, EV, level): return 1000 * EV("OCR.MODIFIED_WRITE.ANY_RESPONSE", level) / Instructions(self, EV, level) # Un-cacheable retired load per kilo instruction def UC_Load_PKI(self, EV, level): return 1000 * EV("MEM_LOAD_MISC_RETIRED.UC", level) / EV("INST_RETIRED.ANY", level) # "Bus lock" per kilo instruction def Bus_Lock_PKI(self, EV, level): return 1000 * EV("SQ_MISC.BUS_LOCK", level) / EV("INST_RETIRED.ANY", level) # Average CPU Utilization (percentage) def CPU_Utilization(self, EV, level): return CPUs_Utilized(self, EV, level) / Num_CPUs(self, EV, level) # Average number of utilized CPUs def CPUs_Utilized(self, EV, level): return EV("CPU_CLK_UNHALTED.REF_TSC", level) / EV("msr/tsc/", 0) # Measured Average Core Frequency for unhalted processors [GHz] def Core_Frequency(self, EV, level): return Turbo_Utilization(self, EV, level) * EV("msr/tsc/", 0) / OneBillion / Time(self, EV, level) # Measured Average Uncore Frequency for the SoC [GHz] def Uncore_Frequency(self, EV, level): return Socket_CLKS(self, EV, level) / 1e9 / Time(self, EV, level) # Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width def GFLOPs(self, EV, level): return (FLOP_Count(self, EV, level) / OneBillion) / Time(self, EV, level) # Average Frequency Utilization relative nominal frequency def Turbo_Utilization(self, EV, level): return CLKS(self, EV, level) / EV("CPU_CLK_UNHALTED.REF_TSC", level) # Fraction of cycles where both hardware Logical Processors were active def SMT_2T_Utilization(self, EV, level): return 1 - EV("CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE", level) / EV("CPU_CLK_UNHALTED.REF_DISTRIBUTED", level) if smt_enabled else 0 # Fraction of cycles spent in the Operating System (OS) Kernel mode def Kernel_Utilization(self, EV, level): val = EV("CPU_CLK_UNHALTED.THREAD_P:SUP", level) / EV("CPU_CLK_UNHALTED.THREAD", level) self.thresh = (val > 0.05) return val # Cycles Per Instruction for the Operating System (OS) Kernel mode def Kernel_CPI(self, EV, level): return EV("CPU_CLK_UNHALTED.THREAD_P:SUP", level) / EV("INST_RETIRED.ANY_P:SUP", level) # Fraction of cycles the processor is waiting yet unhalted; covering legacy PAUSE instruction, as well as C0.1 / C0.2 power-performance optimized states. Sample code of TPAUSE: https://github.com/torvalds/linux/blob/master/arch/x86/lib/delay.c#L105. If running on Linux, please check the power control interface: https://github.com/torvalds/linux/blob/master/arch/x86/kernel/cpu/umwait.c and https://github.com/torvalds/linux/blob/master/Documentation/ABI/testing/sysfs-devices-system-cpu#L587 def C0_Wait(self, EV, level): val = EV("CPU_CLK_UNHALTED.C0_WAIT", level) / CLKS(self, EV, level) self.thresh = (val > 0.05) return val # Average external Memory Bandwidth Use for reads and writes [GB / sec] def DRAM_BW_Use(self, EV, level): return (64 *(EV("UNC_M_CAS_COUNT.RD", level) + EV("UNC_M_CAS_COUNT.WR", level)) / OneBillion) / Time(self, EV, level) # Average Off-core access BW for Reads-to-Core (R2C). R2C account for demand or prefetch load/RFO/code access that fill data into the Core caches. def R2C_Offcore_BW(self, EV, level): return 64 * EV("OCR.READS_TO_CORE.ANY_RESPONSE", level) / 1e9 / Time(self, EV, level) # Average L3-cache miss BW for Reads-to-Core (R2C). This covering going to DRAM or other memory off-chip memory tears. See R2C_Offcore_BW. def R2C_L3M_BW(self, EV, level): return 64 * EV("OCR.READS_TO_CORE.L3_MISS", level) / 1e9 / Time(self, EV, level) # Average DRAM BW for Reads-to-Core (R2C) covering for memory attached to local- and remote-socket. See R2C_Offcore_BW. def R2C_DRAM_BW(self, EV, level): return 64 * EV("OCR.READS_TO_CORE.DRAM", level) / 1e9 / Time(self, EV, level) # Average HBM BW for Reads-to-Core. See R2C_Offcore_BW. def R2C_HBM_BW(self, EV, level): EV("OCR.DEMAND_DATA_RD.PMM", level) return R2C_DRAM_BW(self, EV, level) if Memory > 2 else 64 * EV("OCR.DEMAND_DATA_RD.PMM", level) / 1e9 / Time(self, EV, level) # Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches. memory-controller only def MEM_Read_Latency(self, EV, level): return (EV("UNC_ARB_TRK_OCCUPANCY.RD", level) + EV("UNC_ARB_DAT_OCCUPANCY.RD", level)) / EV("UNC_ARB_TRK_REQUESTS.RD", level) # Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches def MEM_Parallel_Reads(self, EV, level): return EV("UNC_ARB_DAT_OCCUPANCY.RD", level) / EV("UNC_ARB_DAT_OCCUPANCY.RD:c1", level) # Average latency of data read request to external 3D X-Point memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches def MEM_PMM_Read_Latency(self, EV, level): EV("UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PMM", level) EV("UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PMM", level) return (OneBillion *(EV("UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PMM", level) / EV("UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PMM", level)) / EV("UNC_CHA_CLOCKTICKS:one_unit", level)) if PMM_App_Direct else 0 # Average latency of data read request to external DRAM memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches def MEM_DRAM_Read_Latency(self, EV, level): return OneBillion *(EV("UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_DDR", level) / EV("UNC_CHA_TOR_INSERTS.IA_MISS_DRD_DDR", level)) / EV("UNC_CHA_CLOCKTICKS:one_unit", level) # Average 3DXP Memory Bandwidth Use for reads [GB / sec] def PMM_Read_BW(self, EV, level): return ((64 * EV("UNC_M_PMM_RPQ_INSERTS", level) / OneBillion) / Time(self, EV, level)) if PMM_App_Direct else 0 # Average 3DXP Memory Bandwidth Use for Writes [GB / sec] def PMM_Write_BW(self, EV, level): return ((64 * EV("UNC_M_PMM_WPQ_INSERTS", level) / OneBillion) / Time(self, EV, level)) if PMM_App_Direct else 0 # Average IO (network or disk) Bandwidth Use for Reads [GB / sec]. Bandwidth of IO reads that are initiated by end device controllers that are requesting memory from the CPU def IO_Read_BW(self, EV, level): return EV("UNC_CHA_TOR_INSERTS.IO_PCIRDCUR", level) * 64 / OneBillion / Time(self, EV, level) # Average IO (network or disk) Bandwidth Use for Writes [GB / sec]. Bandwidth of IO writes that are initiated by end device controllers that are writing memory to the CPU def IO_Write_BW(self, EV, level): return (EV("UNC_CHA_TOR_INSERTS.IO_ITOM", level) + EV("UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR", level)) * 64 / OneBillion / Time(self, EV, level) # Cross-socket Ultra Path Interconnect (UPI) data transmit bandwidth for data only [MB / sec] def UPI_Data_Transmit_BW(self, EV, level): return EV("UNC_UPI_TxL_FLITS.ALL_DATA", level) * 64 / 9 / 1000000 # Run duration time in seconds def Time(self, EV, level): val = EV("interval-s", 0) self.thresh = (val < 1) return val # Socket actual clocks when any core is active on that socket def Socket_CLKS(self, EV, level): return EV("UNC_CHA_CLOCKTICKS:one_unit", level) # Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate] def IpFarBranch(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.FAR_BRANCH:USER", level) self.thresh = (val < 1000000) return val # Event groups class Frontend_Bound: name = "Frontend_Bound" domain = "Slots" area = "FE" level = 1 htoff = False sample = ['FRONTEND_RETIRED.LATENCY_GE_4:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvFB', 'BvIO', 'TmaL1', 'PGO']) maxval = None def compute(self, EV): try: self.val = (EV("PERF_METRICS.FRONTEND_BOUND", 1) / EV("TOPDOWN.SLOTS", 1)) / PERF_METRICS_SUM(self, EV, 1) - EV("INT_MISC.UOP_DROPPING", 1) / SLOTS(self, EV, 1) if topdown_use_fixed else(EV("IDQ_BUBBLES.CORE", 1) - EV("INT_MISC.UOP_DROPPING", 1)) / SLOTS(self, EV, 1) self.thresh = (self.val > 0.15) except ZeroDivisionError: handle_error(self, "Frontend_Bound zero division") return self.val desc = """ This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.""" class Fetch_Latency: name = "Fetch_Latency" domain = "Slots" area = "FE" level = 2 htoff = False sample = ['FRONTEND_RETIRED.LATENCY_GE_16:pp', 'FRONTEND_RETIRED.LATENCY_GE_8:pp'] errcount = 0 sibling = None metricgroup = frozenset(['Frontend', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = ((EV("PERF_METRICS.FETCH_LATENCY", 2) / EV("TOPDOWN.SLOTS", 2)) / PERF_METRICS_SUM(self, EV, 2) - EV("INT_MISC.UOP_DROPPING", 2) / SLOTS(self, EV, 2)) if topdown_use_fixed else(EV("IDQ_BUBBLES.CYCLES_0_UOPS_DELIV.CORE", 2) * Pipeline_Width - EV("INT_MISC.UOP_DROPPING", 2)) / SLOTS(self, EV, 2) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Fetch_Latency zero division") return self.val desc = """ This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction- cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period.""" class ICache_Misses: name = "ICache_Misses" domain = "Clocks" area = "FE" level = 3 htoff = False sample = ['FRONTEND_RETIRED.L2_MISS:pp', 'FRONTEND_RETIRED.L1I_MISS:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BigFootprint', 'BvBC', 'FetchLat', 'IcMiss']) maxval = None def compute(self, EV): try: self.val = EV("ICACHE_DATA.STALLS", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "ICache_Misses zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to instruction cache misses.. Using compiler's Profile-Guided Optimization (PGO) can reduce i-cache misses through improved hot code layout.""" class ITLB_Misses: name = "ITLB_Misses" domain = "Clocks" area = "FE" level = 3 htoff = False sample = ['FRONTEND_RETIRED.STLB_MISS:pp', 'FRONTEND_RETIRED.ITLB_MISS:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BigFootprint', 'BvBC', 'FetchLat', 'MemoryTLB']) maxval = None def compute(self, EV): try: self.val = EV("ICACHE_TAG.STALLS", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "ITLB_Misses zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses.. Consider large 2M pages for code (selectively prefer hot large-size function, due to limited 2M entries). Linux options: standard binaries use libhugetlbfs; Hfsort.. https://github. com/libhugetlbfs/libhugetlbfs;https://research.fb.com/public ations/optimizing-function-placement-for-large-scale-data- center-applications-2/""" class Branch_Resteers: name = "Branch_Resteers" domain = "Clocks" area = "FE" level = 3 htoff = False sample = ['BR_MISP_RETIRED.ALL_BRANCHES'] errcount = 0 sibling = None metricgroup = frozenset(['FetchLat']) maxval = None def compute(self, EV): try: self.val = EV("INT_MISC.CLEAR_RESTEER_CYCLES", 3) / CLKS(self, EV, 3) + self.Unknown_Branches.compute(EV) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Branch_Resteers zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings.""" class Mispredicts_Resteers: name = "Mispredicts_Resteers" domain = "Clocks" area = "FE" level = 4 htoff = False sample = ['INT_MISC.CLEAR_RESTEER_CYCLES'] errcount = 0 sibling = None metricgroup = frozenset(['BadSpec', 'BrMispredicts', 'BvMP']) maxval = None def compute(self, EV): try: self.val = Mispred_Clears_Fraction(self, EV, 4) * EV("INT_MISC.CLEAR_RESTEER_CYCLES", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Mispredicts_Resteers zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage.""" class Clears_Resteers: name = "Clears_Resteers" domain = "Clocks" area = "FE" level = 4 htoff = False sample = ['INT_MISC.CLEAR_RESTEER_CYCLES'] errcount = 0 sibling = None metricgroup = frozenset(['BadSpec', 'MachineClears']) maxval = None def compute(self, EV): try: self.val = (1 - Mispred_Clears_Fraction(self, EV, 4)) * EV("INT_MISC.CLEAR_RESTEER_CYCLES", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Clears_Resteers zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears.""" class Unknown_Branches: name = "Unknown_Branches" domain = "Clocks" area = "FE" level = 4 htoff = False sample = ['FRONTEND_RETIRED.UNKNOWN_BRANCH'] errcount = 0 sibling = None metricgroup = frozenset(['BigFootprint', 'BvBC', 'FetchLat']) maxval = None def compute(self, EV): try: self.val = EV("INT_MISC.UNKNOWN_BRANCH_CYCLES", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Unknown_Branches zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches""" class MS_Switches: name = "MS_Switches" domain = "Clocks_Estimated" area = "FE" level = 3 htoff = False sample = ['FRONTEND_RETIRED.MS_FLOWS'] errcount = 0 sibling = None metricgroup = frozenset(['FetchLat', 'MicroSeq']) maxval = 1.0 def compute(self, EV): try: self.val = MS_Switches_Cost * EV("UOPS_RETIRED.MS:c1:e1", 3) / Retire_Fraction(self, EV, 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "MS_Switches zero division") return self.val desc = """ This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals.""" class LCP: name = "LCP" domain = "Clocks" area = "FE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['FetchLat']) maxval = None def compute(self, EV): try: self.val = EV("DECODE.LCP", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "LCP zero division") return self.val desc = """ This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this.""" class DSB_Switches: name = "DSB_Switches" domain = "Clocks" area = "FE" level = 3 htoff = False sample = ['FRONTEND_RETIRED.DSB_MISS:pp'] errcount = 0 sibling = None metricgroup = frozenset(['DSBmiss', 'FetchLat']) maxval = None def compute(self, EV): try: self.val = EV("DSB2MITE_SWITCHES.PENALTY_CYCLES", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "DSB_Switches zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty.. See section 'Optimization for Decoded Icache' in Optimization Manual:. http://www.intel.com/content/www/us/en /architecture-and-technology/64-ia-32-architectures- optimization-manual.html""" class Fetch_Bandwidth: name = "Fetch_Bandwidth" domain = "Slots" area = "FE" level = 2 htoff = False sample = ['FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1:pp', 'FRONTEND_RETIRED.LATENCY_GE_1:pp', 'FRONTEND_RETIRED.LATENCY_GE_2:pp'] errcount = 0 sibling = None metricgroup = frozenset(['FetchBW', 'Frontend', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = max(0 , self.Frontend_Bound.compute(EV) - self.Fetch_Latency.compute(EV)) self.thresh = (self.val > 0.2) except ZeroDivisionError: handle_error(self, "Fetch_Bandwidth zero division") return self.val desc = """ This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend.""" class MITE: name = "MITE" domain = "Slots_Estimated" area = "FE" level = 3 htoff = False sample = ['FRONTEND_RETIRED.ANY_DSB_MISS'] errcount = 0 sibling = None metricgroup = frozenset(['DSBmiss', 'FetchBW']) maxval = None def compute(self, EV): try: self.val = (EV("IDQ.MITE_CYCLES_ANY", 3) - EV("IDQ.MITE_CYCLES_OK", 3)) / CORE_CLKS(self, EV, 3) / 2 self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "MITE zero division") return self.val desc = """ This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck.. Consider tuning codegen of 'small hotspots' that can fit in DSB. Read about 'Decoded ICache' in Optimization Manual:. http://www.intel.com/content/www/us/en /architecture-and-technology/64-ia-32-architectures- optimization-manual.html""" class Decoder0_Alone: name = "Decoder0_Alone" domain = "Slots_Estimated" area = "FE" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['DSBmiss', 'FetchBW']) maxval = None def compute(self, EV): try: self.val = (EV("INST_DECODED.DECODERS:c1", 4) - EV("INST_DECODED.DECODERS:c2", 4)) / CORE_CLKS(self, EV, 4) / 2 self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Decoder0_Alone zero division") return self.val desc = """ This metric represents fraction of cycles where decoder-0 was the only active decoder""" class DSB: name = "DSB" domain = "Slots_Estimated" area = "FE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['DSB', 'FetchBW']) maxval = None def compute(self, EV): try: self.val = (EV("IDQ.DSB_CYCLES_ANY", 3) - EV("IDQ.DSB_CYCLES_OK", 3)) / CORE_CLKS(self, EV, 3) / 2 self.thresh = (self.val > 0.15) and self.parent.thresh except ZeroDivisionError: handle_error(self, "DSB zero division") return self.val desc = """ This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.""" class Bad_Speculation: name = "Bad_Speculation" domain = "Slots" area = "BAD" level = 1 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['TmaL1']) maxval = None def compute(self, EV): try: self.val = max(1 -(self.Frontend_Bound.compute(EV) + self.Backend_Bound.compute(EV) + self.Retiring.compute(EV)) , 0 ) self.thresh = (self.val > 0.15) except ZeroDivisionError: handle_error(self, "Bad_Speculation zero division") return self.val desc = """ This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss- predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.""" class Branch_Mispredicts: name = "Branch_Mispredicts" domain = "Slots" area = "BAD" level = 2 htoff = False sample = ['TOPDOWN.BR_MISPREDICT_SLOTS'] errcount = 0 sibling = None metricgroup = frozenset(['BadSpec', 'BrMispredicts', 'BvMP', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = (EV("PERF_METRICS.BRANCH_MISPREDICTS", 2) / EV("TOPDOWN.SLOTS", 2)) / PERF_METRICS_SUM(self, EV, 2) if topdown_use_fixed else EV("TOPDOWN.BR_MISPREDICT_SLOTS", 2) / SLOTS(self, EV, 2) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Branch_Mispredicts zero division") return self.val desc = """ This metric represents fraction of slots the CPU has wasted due to Branch Misprediction. These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path.. Using profile feedback in the compiler may help. Please see the Optimization Manual for general strategies for addressing branch misprediction issues.. http://www.intel.com/content/www/us/en/architecture-and- technology/64-ia-32-architectures-optimization-manual.html""" class Other_Mispredicts: name = "Other_Mispredicts" domain = "Slots" area = "BAD" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvIO', 'BrMispredicts']) maxval = None def compute(self, EV): try: self.val = max(self.Branch_Mispredicts.compute(EV) * (1 - EV("BR_MISP_RETIRED.ALL_BRANCHES", 3) / (EV("INT_MISC.CLEARS_COUNT", 3) - EV("MACHINE_CLEARS.COUNT", 3))) , 0.0001 ) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Other_Mispredicts zero division") return self.val desc = """ This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types).""" class Machine_Clears: name = "Machine_Clears" domain = "Slots" area = "BAD" level = 2 htoff = False sample = ['MACHINE_CLEARS.COUNT'] errcount = 0 sibling = None metricgroup = frozenset(['BadSpec', 'BvMS', 'MachineClears', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = max(0 , self.Bad_Speculation.compute(EV) - self.Branch_Mispredicts.compute(EV)) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Machine_Clears zero division") return self.val desc = """ This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes.. See \"Memory Disambiguation\" in Optimization Manual and:. https://software.intel.com/sites/default/files/ m/d/4/1/d/8/sma.pdf""" class Other_Nukes: name = "Other_Nukes" domain = "Slots" area = "BAD" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvIO', 'Machine_Clears']) maxval = None def compute(self, EV): try: self.val = max(self.Machine_Clears.compute(EV) * (1 - EV("MACHINE_CLEARS.MEMORY_ORDERING", 3) / EV("MACHINE_CLEARS.COUNT", 3)) , 0.0001 ) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Other_Nukes zero division") return self.val desc = """ This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering.""" class Backend_Bound: name = "Backend_Bound" domain = "Slots" area = "BE" level = 1 htoff = False sample = ['TOPDOWN.BACKEND_BOUND_SLOTS'] errcount = 0 sibling = None metricgroup = frozenset(['BvOB', 'TmaL1']) maxval = None def compute(self, EV): try: self.val = (EV("PERF_METRICS.BACKEND_BOUND", 1) / EV("TOPDOWN.SLOTS", 1)) / PERF_METRICS_SUM(self, EV, 1) if topdown_use_fixed else EV("TOPDOWN.BACKEND_BOUND_SLOTS", 1) / SLOTS(self, EV, 1) self.thresh = (self.val > 0.2) except ZeroDivisionError: handle_error(self, "Backend_Bound zero division") return self.val desc = """ This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.""" class Memory_Bound: name = "Memory_Bound" domain = "Slots" area = "BE/Mem" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Backend', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = (EV("PERF_METRICS.MEMORY_BOUND", 2) / EV("TOPDOWN.SLOTS", 2)) / PERF_METRICS_SUM(self, EV, 2) if topdown_use_fixed else EV("TOPDOWN.MEMORY_BOUND_SLOTS", 2) / SLOTS(self, EV, 2) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Memory_Bound zero division") return self.val desc = """ This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).""" class L1_Bound: name = "L1_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = ['MEM_LOAD_RETIRED.L1_HIT:pp', 'MEM_LOAD_RETIRED.FB_HIT:pp'] errcount = 0 sibling = None metricgroup = frozenset(['CacheHits', 'MemoryBound', 'TmaL3mem']) maxval = None def compute(self, EV): try: self.val = max((EV("EXE_ACTIVITY.BOUND_ON_LOADS", 3) - EV("MEMORY_ACTIVITY.STALLS_L1D_MISS", 3)) / CLKS(self, EV, 3) , 0 ) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "L1_Bound zero division") return self.val desc = """ This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache.""" class DTLB_Load: name = "DTLB_Load" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_INST_RETIRED.STLB_MISS_LOADS:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvMT', 'MemoryTLB']) maxval = 1.0 def compute(self, EV): try: self.val = min(Mem_STLB_Hit_Cost * EV("DTLB_LOAD_MISSES.STLB_HIT:c1", 4) + EV("DTLB_LOAD_MISSES.WALK_ACTIVE", 4) , max(EV("CYCLE_ACTIVITY.CYCLES_MEM_ANY", 4) - EV("MEMORY_ACTIVITY.CYCLES_L1D_MISS", 4) , 0)) / CLKS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "DTLB_Load zero division") return self.val desc = """ This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss..""" class Load_STLB_Hit: name = "Load_STLB_Hit" domain = "Clocks_Estimated" area = "BE/Mem" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['MemoryTLB']) maxval = 1.0 def compute(self, EV): try: self.val = self.DTLB_Load.compute(EV) - self.Load_STLB_Miss.compute(EV) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Load_STLB_Hit zero division") return self.val desc = """ This metric roughly estimates the fraction of cycles where the (first level) DTLB was missed by load accesses, that later on hit in second-level TLB (STLB)""" class Load_STLB_Miss: name = "Load_STLB_Miss" domain = "Clocks_Calculated" area = "BE/Mem" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['MemoryTLB']) maxval = 1.0 def compute(self, EV): try: self.val = EV("DTLB_LOAD_MISSES.WALK_ACTIVE", 5) / CLKS(self, EV, 5) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Load_STLB_Miss zero division") return self.val desc = """ This metric estimates the fraction of cycles where the Second-level TLB (STLB) was missed by load accesses, performing a hardware page walk""" class Store_Fwd_Blk: name = "Store_Fwd_Blk" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = 1.0 def compute(self, EV): try: self.val = 13 * EV("LD_BLOCKS.STORE_FORWARD", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Store_Fwd_Blk zero division") return self.val desc = """ This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading.""" class L1_Hit_Latency: name = "L1_Hit_Latency" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_LOAD_RETIRED.L1_HIT'] errcount = 0 sibling = None metricgroup = frozenset(['BvML', 'MemoryLat']) maxval = 1.0 def compute(self, EV): try: self.val = min(2 *(EV("MEM_INST_RETIRED.ALL_LOADS", 4) - EV("MEM_LOAD_RETIRED.FB_HIT", 4) - EV("MEM_LOAD_RETIRED.L1_MISS", 4)) * Dependent_Loads_Weight(self, EV, 4) / 100 , max(EV("CYCLE_ACTIVITY.CYCLES_MEM_ANY", 4) - EV("MEMORY_ACTIVITY.CYCLES_L1D_MISS", 4) , 0)) / CLKS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "L1_Hit_Latency zero division") return self.val desc = """ This metric roughly estimates fraction of cycles with demand load accesses that hit the L1 cache. The short latency of the L1 data cache may be exposed in pointer-chasing memory access patterns as an example.""" class Lock_Latency: name = "Lock_Latency" domain = "Clocks" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_INST_RETIRED.LOCK_LOADS'] errcount = 0 sibling = None metricgroup = frozenset(['Offcore']) maxval = 1.0 def compute(self, EV): try: self.val = (16 * max(0 , EV("MEM_INST_RETIRED.LOCK_LOADS", 4) - EV("L2_RQSTS.ALL_RFO", 4)) + Mem_Lock_St_Fraction(self, EV, 4) * (Mem_L2_Store_Cost * EV("L2_RQSTS.RFO_HIT", 4) + ORO_Demand_RFO_C1(self, EV, 4))) / CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Lock_Latency zero division") return self.val desc = """ This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them.""" class Split_Loads: name = "Split_Loads" domain = "Clocks_Calculated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_INST_RETIRED.SPLIT_LOADS:pp'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = 1.0 def compute(self, EV): try: self.val = Load_Miss_Real_Latency(self, EV, 4) * EV("LD_BLOCKS.NO_SR", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Split_Loads zero division") return self.val desc = """ This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. . Consider aligning data or hot structure fields. See the Optimization Manual for more details""" class FB_Full: name = "FB_Full" domain = "Clocks_Calculated" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvMB', 'MemoryBW']) maxval = None def compute(self, EV): try: self.val = EV("L1D_PEND_MISS.FB_FULL", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.3) except ZeroDivisionError: handle_error(self, "FB_Full zero division") return self.val desc = """ This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory).. See $issueBW and $issueSL hints. Avoid software prefetches if indeed memory BW limited.""" class L2_Bound: name = "L2_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = ['MEM_LOAD_RETIRED.L2_HIT:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvML', 'CacheHits', 'MemoryBound', 'TmaL3mem']) maxval = None def compute(self, EV): try: self.val = (EV("MEMORY_ACTIVITY.STALLS_L1D_MISS", 3) - EV("MEMORY_ACTIVITY.STALLS_L2_MISS", 3)) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "L2_Bound zero division") return self.val desc = """ This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance.""" class L3_Bound: name = "L3_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = ['MEM_LOAD_RETIRED.L3_HIT:pp'] errcount = 0 sibling = None metricgroup = frozenset(['CacheHits', 'MemoryBound', 'TmaL3mem']) maxval = None def compute(self, EV): try: self.val = (EV("MEMORY_ACTIVITY.STALLS_L2_MISS", 3) - EV("MEMORY_ACTIVITY.STALLS_L3_MISS", 3)) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "L3_Bound zero division") return self.val desc = """ This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance.""" class Contested_Accesses: name = "Contested_Accesses" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD', 'MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS'] errcount = 0 sibling = None metricgroup = frozenset(['BvMS', 'DataSharing', 'Offcore', 'Snoop']) maxval = 1.0 def compute(self, EV): try: self.val = ((Mem_XSNP_HitM_Cost(self, EV, 4) - Mem_L2_Hit_Cost(self, EV, 4)) * LOAD_XSNP_HITM(self, EV, 4) + (Mem_XSNP_Hit_Cost(self, EV, 4) - Mem_L2_Hit_Cost(self, EV, 4)) * LOAD_XSNP_MISS(self, EV, 4)) * FB_Factor(self, EV, 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Contested_Accesses zero division") return self.val desc = """ This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing.""" class Data_Sharing: name = "Data_Sharing" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD'] errcount = 0 sibling = None metricgroup = frozenset(['BvMS', 'Offcore', 'Snoop']) maxval = 1.0 def compute(self, EV): try: self.val = (Mem_XSNP_Hit_Cost(self, EV, 4) - Mem_L2_Hit_Cost(self, EV, 4)) * LOAD_XSNP_HIT(self, EV, 4) * FB_Factor(self, EV, 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Data_Sharing zero division") return self.val desc = """ This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance.""" class L3_Hit_Latency: name = "L3_Hit_Latency" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_LOAD_RETIRED.L3_HIT:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvML', 'MemoryLat']) maxval = 1.0 def compute(self, EV): try: self.val = (Mem_XSNP_None_Cost(self, EV, 4) - Mem_L2_Hit_Cost(self, EV, 4)) * LOAD_L3_HIT(self, EV, 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "L3_Hit_Latency zero division") return self.val desc = """ This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings.""" class SQ_Full: name = "SQ_Full" domain = "Clocks" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvMB', 'MemoryBW', 'Offcore']) maxval = None def compute(self, EV): try: self.val = (EV("XQ.FULL_CYCLES", 4) + EV("L1D_PEND_MISS.L2_STALLS", 4)) / CLKS(self, EV, 4) self.thresh = (self.val > 0.3) and self.parent.thresh except ZeroDivisionError: handle_error(self, "SQ_Full zero division") return self.val desc = """ This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors).""" class HBM_Bound: name = "HBM_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['MemoryBound', 'Offcore', 'Server', 'TmaL3mem']) maxval = None def compute(self, EV): try: self.val = MEM_Bound_Ratio(self, EV, 3) * EV("OCR.DEMAND_DATA_RD.PMM", 3) / EV("OCR.READS_TO_CORE.L3_MISS", 3) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "HBM_Bound zero division") return self.val desc = """ This metric estimates how often the CPU was stalled due to High Bandwidth Memory (HBM) accesses by loads.""" class DRAM_Bound: name = "DRAM_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = ['MEM_LOAD_RETIRED.L3_MISS:pp'] errcount = 0 sibling = None metricgroup = frozenset(['MemoryBound', 'TmaL3mem']) maxval = 1.0 def compute(self, EV): try: self.val = MEM_Bound_Ratio(self, EV, 3) - self.HBM_Bound.compute(EV) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "DRAM_Bound zero division") return self.val desc = """ This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance.""" class MEM_Bandwidth: name = "MEM_Bandwidth" domain = "Clocks" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvMB', 'MemoryBW', 'Offcore']) maxval = None def compute(self, EV): try: self.val = ORO_DRD_BW_Cycles(self, EV, 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "MEM_Bandwidth zero division") return self.val desc = """ This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that).. Improve data accesses to reduce cacheline transfers from/to memory. Examples: 1) Consume all bytes of a each cacheline before it is evicted (e.g. reorder structure elements and split non-hot ones), 2) merge computed-limited with BW-limited loops, 3) NUMA optimizations in multi-socket system. Note: software prefetches will not help BW-limited application..""" class MBA_Stalls: name = "MBA_Stalls" domain = "Clocks" area = "BE/Mem" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['MemoryBW', 'Offcore', 'Server']) maxval = None def compute(self, EV): try: self.val = EV("INT_MISC.MBA_STALLS", 5) / CLKS(self, EV, 5) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "MBA_Stalls zero division") return self.val desc = """ This metric estimates fraction of cycles where the core's performance was likely hurt due to memory bandwidth Allocation feature (RDT's memory bandwidth throttling).""" class MEM_Latency: name = "MEM_Latency" domain = "Clocks" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvML', 'MemoryLat', 'Offcore']) maxval = None def compute(self, EV): try: self.val = ORO_DRD_Any_Cycles(self, EV, 4) / CLKS(self, EV, 4) - self.MEM_Bandwidth.compute(EV) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "MEM_Latency zero division") return self.val desc = """ This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that).. Improve data accesses or interleave them with compute. Examples: 1) Data layout re-structuring, 2) Software Prefetches (also through the compiler)..""" class Local_MEM: name = "Local_MEM" domain = "Clocks_Estimated" area = "BE/Mem" level = 5 htoff = False sample = ['MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM'] errcount = 0 sibling = None metricgroup = frozenset(['Server']) maxval = 1.0 def compute(self, EV): try: self.val = (Mem_Local_DRAM_Cost(self, EV, 5) - Mem_XSNP_None_Cost(self, EV, 5)) * EV("MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM", 5) * FB_Factor(self, EV, 5) / CLKS(self, EV, 5) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Local_MEM zero division") return self.val desc = """ This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory. Caching will improve the latency and increase performance.""" class Remote_MEM: name = "Remote_MEM" domain = "Clocks_Estimated" area = "BE/Mem" level = 5 htoff = False sample = ['MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM:pp'] errcount = 0 sibling = None metricgroup = frozenset(['Server', 'Snoop']) maxval = 1.0 def compute(self, EV): try: self.val = (Mem_Remote_DRAM_Cost(self, EV, 5) - Mem_XSNP_None_Cost(self, EV, 5)) * EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM", 5) * FB_Factor(self, EV, 5) / CLKS(self, EV, 5) if DS else 0 EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM", 5) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Remote_MEM zero division") return self.val desc = """ This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory. This is caused often due to non-optimal NUMA allocations.""" class Remote_Cache: name = "Remote_Cache" domain = "Clocks_Estimated" area = "BE/Mem" level = 5 htoff = False sample = ['MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM:pp', 'MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD:pp'] errcount = 0 sibling = None metricgroup = frozenset(['Offcore', 'Server', 'Snoop']) maxval = 1.0 def compute(self, EV): try: self.val = ((Mem_Remote_HitM_Cost(self, EV, 5) - Mem_XSNP_None_Cost(self, EV, 5)) * EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM", 5) + (Mem_Remote_Fwd_Cost(self, EV, 5) - Mem_XSNP_None_Cost(self, EV, 5)) * EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD", 5)) * FB_Factor(self, EV, 5) / CLKS(self, EV, 5) if DS else 0 EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM", 5) EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD", 5) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Remote_Cache zero division") return self.val desc = """ This metric estimates fraction of cycles while the memory subsystem was handling loads from remote cache in other sockets including synchronizations issues. This is caused often due to non-optimal NUMA allocations.""" class PMM_Bound: name = "PMM_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['MemoryBound', 'Server', 'TmaL3mem']) maxval = 1.0 def compute(self, EV): try: self.val = (((1 - Mem_DDR_Hit_Fraction(self, EV, 3)) * MEM_Bound_Ratio(self, EV, 3)) if (OneMillion *(EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM", 3) + EV("MEM_LOAD_RETIRED.LOCAL_PMM", 3))> EV("MEM_LOAD_RETIRED.L1_MISS", 3)) else 0) if PMM_App_Direct else 0 EV("MEM_LOAD_RETIRED.LOCAL_PMM", 3) EV("MEM_LOAD_RETIRED.L1_MISS", 3) EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM", 3) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "PMM_Bound zero division") return self.val desc = """ This metric roughly estimates (based on idle latencies) how often the CPU was stalled on accesses to external 3D-Xpoint (Crystal Ridge, a.k.a. IXP) memory by loads, PMM stands for Persistent Memory Module. . Consider moving data-structure from AEP to DDR memory for better latency/bandwidth.""" class Store_Bound: name = "Store_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = ['MEM_INST_RETIRED.ALL_STORES:pp'] errcount = 0 sibling = None metricgroup = frozenset(['MemoryBound', 'TmaL3mem']) maxval = None def compute(self, EV): try: self.val = EV("EXE_ACTIVITY.BOUND_ON_STORES", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Store_Bound zero division") return self.val desc = """ This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck.""" class Store_Latency: name = "Store_Latency" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvML', 'MemoryLat', 'Offcore']) maxval = 1.0 def compute(self, EV): try: self.val = (Store_L2_Hit_Cycles(self, EV, 4) + (1 - Mem_Lock_St_Fraction(self, EV, 4)) * ORO_Demand_RFO_C1(self, EV, 4)) / CLKS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Store_Latency zero division") return self.val desc = """ This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Consider to avoid/reduce unnecessary (or easily load-able/computable) memory store.""" class False_Sharing: name = "False_Sharing" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM'] errcount = 0 sibling = None metricgroup = frozenset(['BvMS', 'DataSharing', 'Offcore', 'Snoop']) maxval = 1.0 def compute(self, EV): try: self.val = Mem_XSNP_HitM_Cost(self, EV, 4) * EV("OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "False_Sharing zero division") return self.val desc = """ This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. . False Sharing can be easily avoided by padding to make Logical Processors access different lines.""" class Split_Stores: name = "Split_Stores" domain = "Core_Utilization" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_INST_RETIRED.SPLIT_STORES:pp'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("MEM_INST_RETIRED.SPLIT_STORES", 4) / CORE_CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Split_Stores zero division") return self.val desc = """ This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity.""" class Streaming_Stores: name = "Streaming_Stores" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['OCR.STREAMING_WR.ANY_RESPONSE'] errcount = 0 sibling = None metricgroup = frozenset(['MemoryBW', 'Offcore']) maxval = 1.0 def compute(self, EV): try: self.val = 9 * EV("OCR.STREAMING_WR.ANY_RESPONSE", 4) / CLKS(self, EV, 4) if DS else 0 EV("OCR.STREAMING_WR.ANY_RESPONSE", 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Streaming_Stores zero division") return self.val desc = """ This metric estimates how often CPU was stalled due to Streaming store memory accesses; Streaming store optimize out a read request required by RFO stores. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should Streaming stores be a bottleneck.""" class DTLB_Store: name = "DTLB_Store" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_INST_RETIRED.STLB_MISS_STORES:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvMT', 'MemoryTLB']) maxval = 1.0 def compute(self, EV): try: self.val = (Mem_STLB_Hit_Cost * EV("DTLB_STORE_MISSES.STLB_HIT:c1", 4) + EV("DTLB_STORE_MISSES.WALK_ACTIVE", 4)) / CORE_CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "DTLB_Store zero division") return self.val desc = """ This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently- used data.""" class Store_STLB_Hit: name = "Store_STLB_Hit" domain = "Clocks_Estimated" area = "BE/Mem" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['MemoryTLB']) maxval = 1.0 def compute(self, EV): try: self.val = self.DTLB_Store.compute(EV) - self.Store_STLB_Miss.compute(EV) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Store_STLB_Hit zero division") return self.val desc = """ This metric roughly estimates the fraction of cycles where the TLB was missed by store accesses, hitting in the second- level TLB (STLB)""" class Store_STLB_Miss: name = "Store_STLB_Miss" domain = "Clocks_Calculated" area = "BE/Mem" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['MemoryTLB']) maxval = 1.0 def compute(self, EV): try: self.val = EV("DTLB_STORE_MISSES.WALK_ACTIVE", 5) / CORE_CLKS(self, EV, 5) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Store_STLB_Miss zero division") return self.val desc = """ This metric estimates the fraction of cycles where the STLB was missed by store accesses, performing a hardware page walk""" class Core_Bound: name = "Core_Bound" domain = "Slots" area = "BE/Core" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Backend', 'TmaL2', 'Compute']) maxval = None def compute(self, EV): try: self.val = max(0 , self.Backend_Bound.compute(EV) - self.Memory_Bound.compute(EV)) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Core_Bound zero division") return self.val desc = """ This metric represents fraction of slots where Core non- memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).. Tip: consider Port Saturation analysis as next step.""" class Divider: name = "Divider" domain = "Clocks" area = "BE/Core" level = 3 htoff = False sample = ['ARITH.DIVIDER_ACTIVE'] errcount = 0 sibling = None metricgroup = frozenset(['BvCB']) maxval = 1.0 def compute(self, EV): try: self.val = EV("ARITH.DIV_ACTIVE", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Divider zero division") return self.val desc = """ This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication.""" class Serializing_Operation: name = "Serializing_Operation" domain = "Clocks" area = "BE/Core" level = 3 htoff = False sample = ['RESOURCE_STALLS.SCOREBOARD'] errcount = 0 sibling = None metricgroup = frozenset(['BvIO', 'PortsUtil']) maxval = None def compute(self, EV): try: self.val = EV("RESOURCE_STALLS.SCOREBOARD", 3) / CLKS(self, EV, 3) + self.C02_WAIT.compute(EV) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Serializing_Operation zero division") return self.val desc = """ This metric represents fraction of cycles the CPU issue- pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out- of-order execution which may limit performance.""" class Slow_Pause: name = "Slow_Pause" domain = "Clocks" area = "BE/Core" level = 4 htoff = False sample = ['CPU_CLK_UNHALTED.PAUSE_INST'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("CPU_CLK_UNHALTED.PAUSE", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Slow_Pause zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions.""" class C01_WAIT: name = "C01_WAIT" domain = "Clocks" area = "BE/Core" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['C0Wait']) maxval = None def compute(self, EV): try: self.val = EV("CPU_CLK_UNHALTED.C01", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "C01_WAIT zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due staying in C0.1 power-performance optimized state (Faster wakeup time; Smaller power savings).""" class C02_WAIT: name = "C02_WAIT" domain = "Clocks" area = "BE/Core" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['C0Wait']) maxval = None def compute(self, EV): try: self.val = EV("CPU_CLK_UNHALTED.C02", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "C02_WAIT zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due staying in C0.2 power-performance optimized state (Slower wakeup time; Larger power savings).""" class Memory_Fence: name = "Memory_Fence" domain = "Clocks" area = "BE/Core" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = 1.0 def compute(self, EV): try: self.val = 13 * EV("MISC2_RETIRED.LFENCE", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Memory_Fence zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to LFENCE Instructions.""" class AMX_Busy: name = "AMX_Busy" domain = "Core_Clocks" area = "BE/Core" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvCB', 'Compute', 'HPC', 'Server']) maxval = None def compute(self, EV): try: self.val = EV("EXE.AMX_BUSY", 3) / CORE_CLKS(self, EV, 3) self.thresh = (self.val > 0.5) and self.parent.thresh except ZeroDivisionError: handle_error(self, "AMX_Busy zero division") return self.val desc = """ This metric estimates fraction of cycles where the Advanced Matrix eXtensions (AMX) execution engine was busy with tile (arithmetic) operations""" class Ports_Utilization: name = "Ports_Utilization" domain = "Clocks" area = "BE/Core" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['PortsUtil']) maxval = None def compute(self, EV): try: self.val = Core_Bound_Cycles(self, EV, 3) / CLKS(self, EV, 3) if (EV("ARITH.DIV_ACTIVE", 3)<(EV("CYCLE_ACTIVITY.STALLS_TOTAL", 3) - EV("EXE_ACTIVITY.BOUND_ON_LOADS", 3))) else Few_Uops_Executed_Threshold(self, EV, 3) / CLKS(self, EV, 3) EV("EXE_ACTIVITY.BOUND_ON_LOADS", 3) EV("CYCLE_ACTIVITY.STALLS_TOTAL", 3) EV("ARITH.DIV_ACTIVE", 3) self.thresh = (self.val > 0.15) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Ports_Utilization zero division") return self.val desc = """ This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.. Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop.""" class Ports_Utilized_0: name = "Ports_Utilized_0" domain = "Clocks" area = "BE/Core" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['PortsUtil']) maxval = None def compute(self, EV): try: self.val = (EV("EXE_ACTIVITY.EXE_BOUND_0_PORTS", 4) + max(EV("RS.EMPTY:u1", 4) - EV("RESOURCE_STALLS.SCOREBOARD", 4) , 0)) / CLKS(self, EV, 4) * (EV("CYCLE_ACTIVITY.STALLS_TOTAL", 4) - EV("EXE_ACTIVITY.BOUND_ON_LOADS", 4)) / CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Ports_Utilized_0 zero division") return self.val desc = """ This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric.. Check assembly view and Appendix C in Optimization Manual to find out instructions with say 5 or more cycles latency.. http://www.intel.com/content/www/us/en/architecture-and- technology/64-ia-32-architectures-optimization-manual.html""" class Mixing_Vectors: name = "Mixing_Vectors" domain = "Clocks" area = "BE/Core" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = 1.0 def compute(self, EV): try: self.val = 160 * EV("ASSISTS.SSE_AVX_MIX", 5) / CLKS(self, EV, 5) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "Mixing_Vectors zero division") return self.val desc = """ This metric estimates penalty in terms of percentage of cycles. Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic.""" class Ports_Utilized_1: name = "Ports_Utilized_1" domain = "Clocks" area = "BE/Core" level = 4 htoff = False sample = ['EXE_ACTIVITY.1_PORTS_UTIL'] errcount = 0 sibling = None metricgroup = frozenset(['PortsUtil']) maxval = None def compute(self, EV): try: self.val = EV("EXE_ACTIVITY.1_PORTS_UTIL", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Ports_Utilized_1 zero division") return self.val desc = """ This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). This can be due to heavy data-dependency among software instructions; or over oversubscribing a particular hardware resource. In some other cases with high 1_Port_Utilized and L1_Bound; this metric can point to L1 data-cache latency bottleneck that may not necessarily manifest with complete execution starvation (due to the short L1 latency e.g. walking a linked list) - looking at the assembly can be helpful.""" class Ports_Utilized_2: name = "Ports_Utilized_2" domain = "Clocks" area = "BE/Core" level = 4 htoff = False sample = ['EXE_ACTIVITY.2_PORTS_UTIL'] errcount = 0 sibling = None metricgroup = frozenset(['PortsUtil']) maxval = None def compute(self, EV): try: self.val = EV("EXE_ACTIVITY.2_PORTS_UTIL", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.15) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Ports_Utilized_2 zero division") return self.val desc = """ This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto- Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop.""" class Ports_Utilized_3m: name = "Ports_Utilized_3m" domain = "Clocks" area = "BE/Core" level = 4 htoff = False sample = ['UOPS_EXECUTED.CYCLES_GE_3'] errcount = 0 sibling = None metricgroup = frozenset(['BvCB', 'PortsUtil']) maxval = None def compute(self, EV): try: self.val = EV("UOPS_EXECUTED.CYCLES_GE_3", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.4) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Ports_Utilized_3m zero division") return self.val desc = """ This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise).""" class ALU_Op_Utilization: name = "ALU_Op_Utilization" domain = "Core_Execution" area = "BE/Core" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = (EV("UOPS_DISPATCHED.PORT_0", 5) + EV("UOPS_DISPATCHED.PORT_1", 5) + EV("UOPS_DISPATCHED.PORT_5_11", 5) + EV("UOPS_DISPATCHED.PORT_6", 5)) / (5 * CORE_CLKS(self, EV, 5)) self.thresh = (self.val > 0.4) except ZeroDivisionError: handle_error(self, "ALU_Op_Utilization zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.""" class Port_0: name = "Port_0" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED.PORT_0'] errcount = 0 sibling = None metricgroup = frozenset(['Compute']) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED.PORT_0", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_0 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ALU and 2nd branch""" class Port_1: name = "Port_1" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED.PORT_1'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED.PORT_1", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_1 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU)""" class Port_6: name = "Port_6" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED.PORT_6'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED.PORT_6", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_6 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 Primary Branch and simple ALU""" class Load_Op_Utilization: name = "Load_Op_Utilization" domain = "Core_Execution" area = "BE/Core" level = 5 htoff = False sample = ['UOPS_DISPATCHED.PORT_2_3_10'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED.PORT_2_3_10", 5) / (3 * CORE_CLKS(self, EV, 5)) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Load_Op_Utilization zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations""" class Store_Op_Utilization: name = "Store_Op_Utilization" domain = "Core_Execution" area = "BE/Core" level = 5 htoff = False sample = ['UOPS_DISPATCHED.PORT_7_8'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = (EV("UOPS_DISPATCHED.PORT_4_9", 5) + EV("UOPS_DISPATCHED.PORT_7_8", 5)) / (4 * CORE_CLKS(self, EV, 5)) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Store_Op_Utilization zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port for Store operations""" class Retiring: name = "Retiring" domain = "Slots" area = "RET" level = 1 htoff = False sample = ['UOPS_RETIRED.SLOTS'] errcount = 0 sibling = None metricgroup = frozenset(['BvUW', 'TmaL1']) maxval = None def compute(self, EV): try: self.val = (EV("PERF_METRICS.RETIRING", 1) / EV("TOPDOWN.SLOTS", 1)) / PERF_METRICS_SUM(self, EV, 1) if topdown_use_fixed else EV("UOPS_RETIRED.SLOTS", 1) / SLOTS(self, EV, 1) self.thresh = (self.val > 0.7) or self.Heavy_Operations.thresh except ZeroDivisionError: handle_error(self, "Retiring zero division") return self.val desc = """ This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved. Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. . A high Retiring value for non-vectorized code may be a good hint for programmer to consider vectorizing his code. Doing so essentially lets more computations be done without significantly increasing number of instructions thus improving the performance.""" class Light_Operations: name = "Light_Operations" domain = "Slots" area = "RET" level = 2 htoff = False sample = ['INST_RETIRED.PREC_DIST'] errcount = 0 sibling = None metricgroup = frozenset(['Retire', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = max(0 , self.Retiring.compute(EV) - self.Heavy_Operations.compute(EV)) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Light_Operations zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. .. Focus on techniques that reduce instruction count or result in more efficient instructions generation such as vectorization.""" class FP_Arith: name = "FP_Arith" domain = "Uops" area = "RET" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['HPC']) maxval = None def compute(self, EV): try: self.val = self.X87_Use.compute(EV) + self.FP_Scalar.compute(EV) + self.FP_Vector.compute(EV) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "FP_Arith zero division") return self.val desc = """ This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.""" class X87_Use: name = "X87_Use" domain = "Uops" area = "RET" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Compute']) maxval = None def compute(self, EV): try: self.val = self.Retiring.compute(EV) * EV("UOPS_EXECUTED.X87", 4) / EV("UOPS_EXECUTED.THREAD", 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "X87_Use zero division") return self.val desc = """ This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.. Tip: consider compiler flags to generate newer AVX (or SSE) instruction sets; which typically perform better and feature vectors.""" class FP_Scalar: name = "FP_Scalar" domain = "Uops" area = "RET" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Compute', 'Flops']) maxval = None def compute(self, EV): try: self.val = FP_Arith_Scalar(self, EV, 4) / Retired_Slots(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "FP_Scalar zero division") return self.val desc = """ This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting.. Investigate what limits (compiler) generation of vector code.""" class FP_Vector: name = "FP_Vector" domain = "Uops" area = "RET" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Compute', 'Flops']) maxval = 1.0 def compute(self, EV): try: self.val = FP_Arith_Vector(self, EV, 4) / Retired_Slots(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "FP_Vector zero division") return self.val desc = """ This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting.. Check if vector width is expected""" class FP_Vector_128b: name = "FP_Vector_128b" domain = "Uops" area = "RET" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Compute', 'Flops']) maxval = 1.0 def compute(self, EV): try: self.val = (EV("FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", 5) + EV("FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE", 5) + EV("FP_ARITH_INST_RETIRED2.128B_PACKED_HALF", 5)) / Retired_Slots(self, EV, 5) if FP16 else(EV("FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", 5) + EV("FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE", 5)) / Retired_Slots(self, EV, 5) EV("FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE", 5) EV("FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", 5) EV("FP_ARITH_INST_RETIRED2.128B_PACKED_HALF", 5) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "FP_Vector_128b zero division") return self.val desc = """ This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting.. Try to exploit wider vector length""" class FP_Vector_256b: name = "FP_Vector_256b" domain = "Uops" area = "RET" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Compute', 'Flops']) maxval = 1.0 def compute(self, EV): try: self.val = (EV("FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE", 5) + EV("FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE", 5) + EV("FP_ARITH_INST_RETIRED2.256B_PACKED_HALF", 5)) / Retired_Slots(self, EV, 5) if FP16 else(EV("FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE", 5) + EV("FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE", 5)) / Retired_Slots(self, EV, 5) EV("FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE", 5) EV("FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE", 5) EV("FP_ARITH_INST_RETIRED2.256B_PACKED_HALF", 5) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "FP_Vector_256b zero division") return self.val desc = """ This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting.. Try to exploit wider vector length""" class Int_Operations: name = "Int_Operations" domain = "Uops" area = "RET" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Pipeline']) maxval = None def compute(self, EV): try: self.val = self.Int_Vector_128b.compute(EV) + self.Int_Vector_256b.compute(EV) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Int_Operations zero division") return self.val desc = """ This metric represents overall Integer (Int) select operations fraction the CPU has executed (retired). Vector/Matrix Int operations and shuffles are counted. Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain.""" class Int_Vector_128b: name = "Int_Vector_128b" domain = "Uops" area = "RET" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Compute', 'IntVector', 'Pipeline']) maxval = None def compute(self, EV): try: self.val = (EV("INT_VEC_RETIRED.ADD_128", 4) + EV("INT_VEC_RETIRED.VNNI_128", 4)) / Retired_Slots(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Int_Vector_128b zero division") return self.val desc = """ This metric represents 128-bit vector Integer ADD/SUB/SAD or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired.""" class Int_Vector_256b: name = "Int_Vector_256b" domain = "Uops" area = "RET" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Compute', 'IntVector', 'Pipeline']) maxval = None def compute(self, EV): try: self.val = (EV("INT_VEC_RETIRED.ADD_256", 4) + EV("INT_VEC_RETIRED.MUL_256", 4) + EV("INT_VEC_RETIRED.VNNI_256", 4)) / Retired_Slots(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Int_Vector_256b zero division") return self.val desc = """ This metric represents 256-bit vector Integer ADD/SUB/SAD/MUL or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired.""" class Memory_Operations: name = "Memory_Operations" domain = "Slots" area = "RET" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Pipeline']) maxval = None def compute(self, EV): try: self.val = self.Light_Operations.compute(EV) * EV("MEM_UOP_RETIRED.ANY", 3) / Retired_Slots(self, EV, 3) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Memory_Operations zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring memory operations -- uops for memory load or store accesses.""" class Fused_Instructions: name = "Fused_Instructions" domain = "Slots" area = "RET" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Branches', 'BvBO', 'Pipeline']) maxval = None def compute(self, EV): try: self.val = self.Light_Operations.compute(EV) * EV("INST_RETIRED.MACRO_FUSED", 3) / Retired_Slots(self, EV, 3) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Fused_Instructions zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions. CMP+JCC or DEC+JCC are common examples of legacy fusions. {}. See section 'Optimizing for Macro-fusion' in Optimization Manual:""" class Non_Fused_Branches: name = "Non_Fused_Branches" domain = "Slots" area = "RET" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Branches', 'BvBO', 'Pipeline']) maxval = None def compute(self, EV): try: self.val = self.Light_Operations.compute(EV) * (EV("BR_INST_RETIRED.ALL_BRANCHES", 3) - EV("INST_RETIRED.MACRO_FUSED", 3)) / Retired_Slots(self, EV, 3) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Non_Fused_Branches zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused. Non- conditional branches like direct JMP or CALL would count here. Can be used to examine fusible conditional jumps that were not fused.""" class Other_Light_Ops: name = "Other_Light_Ops" domain = "Slots" area = "RET" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Pipeline']) maxval = None def compute(self, EV): try: self.val = max(0 , self.Light_Operations.compute(EV) - Light_Ops_Sum(self, EV, 3)) self.thresh = (self.val > 0.3) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Other_Light_Ops zero division") return self.val desc = """ This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes. May undercount due to FMA double counting""" class Nop_Instructions: name = "Nop_Instructions" domain = "Slots" area = "RET" level = 4 htoff = False sample = ['INST_RETIRED.NOP'] errcount = 0 sibling = None metricgroup = frozenset(['BvBO', 'Pipeline']) maxval = None def compute(self, EV): try: self.val = self.Light_Operations.compute(EV) * EV("INST_RETIRED.NOP", 4) / Retired_Slots(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Nop_Instructions zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body.. Improve Codegen by correctly placing NOPs outside hot sections (e.g. outside loop body).""" class Shuffles_256b: name = "Shuffles_256b" domain = "Slots" area = "RET" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['HPC', 'Pipeline']) maxval = None def compute(self, EV): try: self.val = self.Light_Operations.compute(EV) * EV("INT_VEC_RETIRED.SHUFFLES", 4) / Retired_Slots(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Shuffles_256b zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring Shuffle operations of 256-bit vector size (FP or Integer). Shuffles may incur slow cross \"vector lane\" data transfers.""" class Heavy_Operations: name = "Heavy_Operations" domain = "Slots" area = "RET" level = 2 htoff = False sample = ['UOPS_RETIRED.HEAVY'] errcount = 0 sibling = None metricgroup = frozenset(['Retire', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = (EV("PERF_METRICS.HEAVY_OPERATIONS", 2) / EV("TOPDOWN.SLOTS", 2)) / PERF_METRICS_SUM(self, EV, 2) if topdown_use_fixed else EV("UOPS_RETIRED.HEAVY", 2) / SLOTS(self, EV, 2) self.thresh = (self.val > 0.1) except ZeroDivisionError: handle_error(self, "Heavy_Operations zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences. .""" class Few_Uops_Instructions: name = "Few_Uops_Instructions" domain = "Slots" area = "RET" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = max(0 , self.Heavy_Operations.compute(EV) - self.Microcode_Sequencer.compute(EV)) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Few_Uops_Instructions zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring instructions that that are decoder into two or up to five uops. This highly-correlates with the number of uops in such instructions.""" class Microcode_Sequencer: name = "Microcode_Sequencer" domain = "Slots" area = "RET" level = 3 htoff = False sample = ['UOPS_RETIRED.MS'] errcount = 0 sibling = None metricgroup = frozenset(['MicroSeq']) maxval = None def compute(self, EV): try: self.val = EV("UOPS_RETIRED.MS", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Microcode_Sequencer zero division") return self.val desc = """ This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided..""" class Assists: name = "Assists" domain = "Slots_Estimated" area = "RET" level = 4 htoff = False sample = ['ASSISTS.ANY'] errcount = 0 sibling = None metricgroup = frozenset(['BvIO']) maxval = 1.0 def compute(self, EV): try: self.val = Avg_Assist_Cost * EV("ASSISTS.ANY", 4) / SLOTS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Assists zero division") return self.val desc = """ This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases.""" class Page_Faults: name = "Page_Faults" domain = "Slots_Estimated" area = "RET" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = 99 * EV("ASSISTS.PAGE_FAULT", 5) / SLOTS(self, EV, 5) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "Page_Faults zero division") return self.val desc = """ This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Page Faults. A Page Fault may apply on first application access to a memory page. Note operating system handling of page faults accounts for the majority of its cost.""" class FP_Assists: name = "FP_Assists" domain = "Slots_Estimated" area = "RET" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['HPC']) maxval = None def compute(self, EV): try: self.val = 30 * EV("ASSISTS.FP", 5) / SLOTS(self, EV, 5) self.thresh = (self.val > 0.1) except ZeroDivisionError: handle_error(self, "FP_Assists zero division") return self.val desc = """ This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists. FP Assist may apply when working with very small floating point values (so-called Denormals).. Consider DAZ (Denormals Are Zero) and/or FTZ (Flush To Zero) options in your compiler; \"-ffast-math\" with -O2 in GCC for example. This option may improve performance if the denormal values are not critical in your application. Also note that the DAZ and FTZ modes are not compatible with the IEEE Standard 754.. https://www.intel.com/content/www/us/en/develop/docume ntation/vtune-help/top/reference/cpu-metrics-reference/bad- speculation-back-end-bound-pipeline-slots/fp-assists.html""" class AVX_Assists: name = "AVX_Assists" domain = "Slots_Estimated" area = "RET" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['HPC']) maxval = None def compute(self, EV): try: self.val = 63 * EV("ASSISTS.SSE_AVX_MIX", 5) / SLOTS(self, EV, 5) self.thresh = (self.val > 0.1) except ZeroDivisionError: handle_error(self, "AVX_Assists zero division") return self.val desc = """ This metric estimates fraction of slots the CPU retired uops as a result of handing SSE to AVX* or AVX* to SSE transition Assists.""" class CISC: name = "CISC" domain = "Slots" area = "RET" level = 4 htoff = False sample = ['FRONTEND_RETIRED.MS_FLOWS'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = max(0 , self.Microcode_Sequencer.compute(EV) - self.Assists.compute(EV)) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "CISC zero division") return self.val desc = """ This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources.""" class Metric_Mispredictions: name = "Mispredictions" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['Bad', 'BadSpec', 'BrMispredicts', 'BvMP']) sibling = None def compute(self, EV): try: self.val = Mispredictions(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Mispredictions zero division") desc = """ Total pipeline cost of Branch Misprediction related bottlenecks""" class Metric_Big_Code: name = "Big_Code" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvBC', 'BigFootprint', 'Fed', 'Frontend', 'IcMiss', 'MemoryTLB']) sibling = None def compute(self, EV): try: self.val = Big_Code(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Big_Code zero division") desc = """ Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)""" class Metric_Instruction_Fetch_BW: name = "Instruction_Fetch_BW" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvFB', 'Fed', 'FetchBW', 'Frontend']) sibling = None def compute(self, EV): try: self.val = Instruction_Fetch_BW(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Instruction_Fetch_BW zero division") desc = """ Total pipeline cost of instruction fetch bandwidth related bottlenecks (when the front-end could not sustain operations delivery to the back-end)""" class Metric_Cache_Memory_Bandwidth: name = "Cache_Memory_Bandwidth" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvMB', 'Mem', 'MemoryBW', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Cache_Memory_Bandwidth(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Cache_Memory_Bandwidth zero division") desc = """ Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks""" class Metric_Cache_Memory_Latency: name = "Cache_Memory_Latency" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvML', 'Mem', 'MemoryLat', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Cache_Memory_Latency(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Cache_Memory_Latency zero division") desc = """ Total pipeline cost of external Memory- or Cache-Latency related bottlenecks""" class Metric_Memory_Data_TLBs: name = "Memory_Data_TLBs" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvMT', 'Mem', 'MemoryTLB', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Memory_Data_TLBs(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Memory_Data_TLBs zero division") desc = """ Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)""" class Metric_Memory_Synchronization: name = "Memory_Synchronization" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvMS', 'Mem', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Memory_Synchronization(self, EV, 0) self.thresh = (self.val > 10) except ZeroDivisionError: handle_error_metric(self, "Memory_Synchronization zero division") desc = """ Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors)""" class Metric_Compute_Bound_Est: name = "Compute_Bound_Est" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvCB', 'Cor']) sibling = None def compute(self, EV): try: self.val = Compute_Bound_Est(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Compute_Bound_Est zero division") desc = """ Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy.""" class Metric_Irregular_Overhead: name = "Irregular_Overhead" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['Bad', 'BvIO', 'Cor', 'Ret']) sibling = None def compute(self, EV): try: self.val = Irregular_Overhead(self, EV, 0) self.thresh = (self.val > 10) except ZeroDivisionError: handle_error_metric(self, "Irregular_Overhead zero division") desc = """ Total pipeline cost of irregular execution (e.g. FP-assists in HPC, Wait time with work imbalance multithreaded workloads, overhead in system services or virtualized environments)""" class Metric_Other_Bottlenecks: name = "Other_Bottlenecks" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvOB', 'Cor', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Other_Bottlenecks(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Other_Bottlenecks zero division") desc = """ Total pipeline cost of remaining bottlenecks in the back- end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls.""" class Metric_Branching_Overhead: name = "Branching_Overhead" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvBO', 'Ret']) sibling = None def compute(self, EV): try: self.val = Branching_Overhead(self, EV, 0) self.thresh = (self.val > 5) except ZeroDivisionError: handle_error_metric(self, "Branching_Overhead zero division") desc = """ Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA. Examples include function calls; loops and alignments. (A lower bound). Consider Loop Unrolling or function inlining optimizations""" class Metric_Useful_Work: name = "Useful_Work" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvUW', 'Ret']) sibling = None def compute(self, EV): try: self.val = Useful_Work(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Useful_Work zero division") desc = """ Total pipeline cost of \"useful operations\" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead.""" class Metric_Core_Bound_Likely: name = "Core_Bound_Likely" domain = "Metric" maxval = 1.0 errcount = 0 area = "Info.Botlnk.L0" metricgroup = frozenset(['Cor', 'SMT']) sibling = None def compute(self, EV): try: self.val = Core_Bound_Likely(self, EV, 0) self.thresh = (self.val > 0.5) except ZeroDivisionError: handle_error_metric(self, "Core_Bound_Likely zero division") desc = """ Probability of Core Bound bottleneck hidden by SMT-profiling artifacts. Tip: consider analysis with SMT disabled""" class Metric_IPC: name = "IPC" domain = "Metric" maxval = Pipeline_Width + 2 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Ret', 'Summary']) sibling = None def compute(self, EV): try: self.val = IPC(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IPC zero division") desc = """ Instructions Per Cycle (per Logical Processor)""" class Metric_UopPI: name = "UopPI" domain = "Metric" maxval = 2.0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Pipeline', 'Ret', 'Retire']) sibling = None def compute(self, EV): try: self.val = UopPI(self, EV, 0) self.thresh = (self.val > 1.05) except ZeroDivisionError: handle_error_metric(self, "UopPI zero division") desc = """ Uops Per Instruction""" class Metric_UpTB: name = "UpTB" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Branches', 'Fed', 'FetchBW']) sibling = None def compute(self, EV): try: self.val = UpTB(self, EV, 0) self.thresh = self.val < Pipeline_Width * 1.5 except ZeroDivisionError: handle_error_metric(self, "UpTB zero division") desc = """ Uops per taken branch""" class Metric_CPI: name = "CPI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Pipeline', 'Mem']) sibling = None def compute(self, EV): try: self.val = CPI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CPI zero division") desc = """ Cycles Per Instruction (per Logical Processor)""" class Metric_CLKS: name = "CLKS" domain = "Count" maxval = 0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Pipeline']) sibling = None def compute(self, EV): try: self.val = CLKS(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CLKS zero division") desc = """ Per-Logical Processor actual clocks when the Logical Processor is active.""" class Metric_SLOTS: name = "SLOTS" domain = "Count" maxval = 0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['TmaL1']) sibling = None def compute(self, EV): try: self.val = SLOTS(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "SLOTS zero division") desc = """ Total issue-pipeline slots (per-Physical Core till ICL; per- Logical Processor ICL onward)""" class Metric_Slots_Utilization: name = "Slots_Utilization" domain = "Metric" maxval = 1.0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['SMT', 'TmaL1']) sibling = None def compute(self, EV): try: self.val = Slots_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Slots_Utilization zero division") desc = """ Fraction of Physical Core issue-slots utilized by this Logical Processor""" class Metric_Execute_per_Issue: name = "Execute_per_Issue" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Cor', 'Pipeline']) sibling = None def compute(self, EV): try: self.val = Execute_per_Issue(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Execute_per_Issue zero division") desc = """ The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage.""" class Metric_CoreIPC: name = "CoreIPC" domain = "Core_Metric" maxval = Pipeline_Width + 2 errcount = 0 area = "Info.Core" metricgroup = frozenset(['Ret', 'SMT', 'TmaL1']) sibling = None def compute(self, EV): try: self.val = CoreIPC(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CoreIPC zero division") desc = """ Instructions Per Cycle across hyper-threads (per physical core)""" class Metric_FLOPc: name = "FLOPc" domain = "Core_Metric" maxval = 10.0 errcount = 0 area = "Info.Core" metricgroup = frozenset(['Ret', 'Flops']) sibling = None def compute(self, EV): try: self.val = FLOPc(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "FLOPc zero division") desc = """ Floating Point Operations Per Cycle""" class Metric_FP_Arith_Utilization: name = "FP_Arith_Utilization" domain = "Core_Metric" maxval = 2.0 errcount = 0 area = "Info.Core" metricgroup = frozenset(['Cor', 'Flops', 'HPC']) sibling = None def compute(self, EV): try: self.val = FP_Arith_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "FP_Arith_Utilization zero division") desc = """ Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to Fused-Multiply Add use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common.""" class Metric_ILP: name = "ILP" domain = "Metric" maxval = Exe_Ports errcount = 0 area = "Info.Core" metricgroup = frozenset(['Backend', 'Cor', 'Pipeline', 'PortsUtil']) sibling = None def compute(self, EV): try: self.val = ILP(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "ILP zero division") desc = """ Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical- processor)""" class Metric_EPC: name = "EPC" domain = "Metric" maxval = 20.0 errcount = 0 area = "Info.Core" metricgroup = frozenset(['Power']) sibling = None def compute(self, EV): try: self.val = EPC(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "EPC zero division") desc = """ uops Executed per Cycle""" class Metric_CORE_CLKS: name = "CORE_CLKS" domain = "Count" maxval = 0 errcount = 0 area = "Info.Core" metricgroup = frozenset(['SMT']) sibling = None def compute(self, EV): try: self.val = CORE_CLKS(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CORE_CLKS zero division") desc = """ Core actual clocks when any Logical Processor is active on the Physical Core""" class Metric_IpLoad: name = "IpLoad" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['InsType']) sibling = None def compute(self, EV): try: self.val = IpLoad(self, EV, 0) self.thresh = (self.val < 3) except ZeroDivisionError: handle_error_metric(self, "IpLoad zero division") desc = """ Instructions per Load (lower number means higher occurrence rate). Tip: reduce memory accesses.""" class Metric_IpStore: name = "IpStore" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['InsType']) sibling = None def compute(self, EV): try: self.val = IpStore(self, EV, 0) self.thresh = (self.val < 8) except ZeroDivisionError: handle_error_metric(self, "IpStore zero division") desc = """ Instructions per Store (lower number means higher occurrence rate). Tip: reduce memory accesses.""" class Metric_IpBranch: name = "IpBranch" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Branches', 'Fed', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpBranch(self, EV, 0) self.thresh = (self.val < 8) except ZeroDivisionError: handle_error_metric(self, "IpBranch zero division") desc = """ Instructions per Branch (lower number means higher occurrence rate)""" class Metric_IpCall: name = "IpCall" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Branches', 'Fed', 'PGO']) sibling = None def compute(self, EV): try: self.val = IpCall(self, EV, 0) self.thresh = (self.val < 200) except ZeroDivisionError: handle_error_metric(self, "IpCall zero division") desc = """ Instructions per (near) call (lower number means higher occurrence rate)""" class Metric_IpTB: name = "IpTB" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Branches', 'Fed', 'FetchBW', 'Frontend', 'PGO']) sibling = None def compute(self, EV): try: self.val = IpTB(self, EV, 0) self.thresh = self.val < Pipeline_Width * 2 + 1 except ZeroDivisionError: handle_error_metric(self, "IpTB zero division") desc = """ Instructions per taken branch""" class Metric_BpTkBranch: name = "BpTkBranch" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Branches', 'Fed', 'PGO']) sibling = None def compute(self, EV): try: self.val = BpTkBranch(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "BpTkBranch zero division") desc = """ Branch instructions per taken branch. . Can be used to approximate PGO-likelihood for non-loopy codes.""" class Metric_IpFLOP: name = "IpFLOP" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Flops', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpFLOP(self, EV, 0) self.thresh = (self.val < 10) except ZeroDivisionError: handle_error_metric(self, "IpFLOP zero division") desc = """ Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate). Reference: Tuning Performance via Metrics with Expectations. https://doi.org/10.1109/LCA.2019.2916408""" class Metric_IpArith: name = "IpArith" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Flops', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpArith(self, EV, 0) self.thresh = (self.val < 10) except ZeroDivisionError: handle_error_metric(self, "IpArith zero division") desc = """ Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW.""" class Metric_IpArith_Scalar_HP: name = "IpArith_Scalar_HP" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Flops', 'FpScalar', 'InsType', 'Server']) sibling = None def compute(self, EV): try: self.val = IpArith_Scalar_HP(self, EV, 0) self.thresh = (self.val < 10) except ZeroDivisionError: handle_error_metric(self, "IpArith_Scalar_HP zero division") desc = """ Instructions per FP Arithmetic Scalar Half-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.""" class Metric_IpArith_Scalar_SP: name = "IpArith_Scalar_SP" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Flops', 'FpScalar', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpArith_Scalar_SP(self, EV, 0) self.thresh = (self.val < 10) except ZeroDivisionError: handle_error_metric(self, "IpArith_Scalar_SP zero division") desc = """ Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.""" class Metric_IpArith_Scalar_DP: name = "IpArith_Scalar_DP" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Flops', 'FpScalar', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpArith_Scalar_DP(self, EV, 0) self.thresh = (self.val < 10) except ZeroDivisionError: handle_error_metric(self, "IpArith_Scalar_DP zero division") desc = """ Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.""" class Metric_IpArith_AVX128: name = "IpArith_AVX128" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Flops', 'FpVector', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpArith_AVX128(self, EV, 0) self.thresh = (self.val < 10) except ZeroDivisionError: handle_error_metric(self, "IpArith_AVX128 zero division") desc = """ Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.""" class Metric_IpArith_AVX256: name = "IpArith_AVX256" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Flops', 'FpVector', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpArith_AVX256(self, EV, 0) self.thresh = (self.val < 10) except ZeroDivisionError: handle_error_metric(self, "IpArith_AVX256 zero division") desc = """ Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.""" class Metric_IpPause: name = "IpPause" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Flops', 'FpVector', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpPause(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IpPause zero division") desc = """ Instructions per PAUSE (lower number means higher occurrence rate)""" class Metric_IpSWPF: name = "IpSWPF" domain = "Inst_Metric" maxval = 1000 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Prefetches']) sibling = None def compute(self, EV): try: self.val = IpSWPF(self, EV, 0) self.thresh = (self.val < 100) except ZeroDivisionError: handle_error_metric(self, "IpSWPF zero division") desc = """ Instructions per Software prefetch instruction (of any type: NTA/T0/T1/T2/Prefetch) (lower number means higher occurrence rate)""" class Metric_Instructions: name = "Instructions" domain = "Count" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Summary', 'TmaL1']) sibling = None def compute(self, EV): try: self.val = Instructions(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Instructions zero division") desc = """ Total number of retired Instructions""" class Metric_Retire: name = "Retire" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Pipeline" metricgroup = frozenset(['Pipeline', 'Ret']) sibling = None def compute(self, EV): try: self.val = Retire(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Retire zero division") desc = """ Average number of Uops retired in cycles where at least one uop has retired.""" class Metric_Strings_Cycles: name = "Strings_Cycles" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Pipeline" metricgroup = frozenset(['MicroSeq', 'Pipeline', 'Ret']) sibling = None def compute(self, EV): try: self.val = Strings_Cycles(self, EV, 0) self.thresh = (self.val > 0.1) except ZeroDivisionError: handle_error_metric(self, "Strings_Cycles zero division") desc = """ Estimated fraction of retirement-cycles dealing with repeat instructions""" class Metric_IpAssist: name = "IpAssist" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Pipeline" metricgroup = frozenset(['MicroSeq', 'Pipeline', 'Ret', 'Retire']) sibling = None def compute(self, EV): try: self.val = IpAssist(self, EV, 0) self.thresh = (self.val < 100000) except ZeroDivisionError: handle_error_metric(self, "IpAssist zero division") desc = """ Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate)""" class Metric_Execute: name = "Execute" domain = "Metric" maxval = Exe_Ports errcount = 0 area = "Info.Pipeline" metricgroup = frozenset(['Cor', 'Pipeline', 'PortsUtil', 'SMT']) sibling = None def compute(self, EV): try: self.val = Execute(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Execute zero division") desc = """ """ class Metric_Fetch_LSD: name = "Fetch_LSD" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Pipeline" metricgroup = frozenset(['Fed', 'FetchBW']) sibling = None def compute(self, EV): try: self.val = Fetch_LSD(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Fetch_LSD zero division") desc = """ Average number of uops fetched from LSD per cycle""" class Metric_Fetch_DSB: name = "Fetch_DSB" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Pipeline" metricgroup = frozenset(['Fed', 'FetchBW']) sibling = None def compute(self, EV): try: self.val = Fetch_DSB(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Fetch_DSB zero division") desc = """ Average number of uops fetched from DSB per cycle""" class Metric_Fetch_MITE: name = "Fetch_MITE" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Pipeline" metricgroup = frozenset(['Fed', 'FetchBW']) sibling = None def compute(self, EV): try: self.val = Fetch_MITE(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Fetch_MITE zero division") desc = """ Average number of uops fetched from MITE per cycle""" class Metric_Fetch_UpC: name = "Fetch_UpC" domain = "Metric" maxval = 6.0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['Fed', 'FetchBW']) sibling = None def compute(self, EV): try: self.val = Fetch_UpC(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Fetch_UpC zero division") desc = """ Average number of Uops issued by front-end when it issued something""" class Metric_DSB_Coverage: name = "DSB_Coverage" domain = "Metric" maxval = 1.0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['DSB', 'Fed', 'FetchBW']) sibling = None def compute(self, EV): try: self.val = DSB_Coverage(self, EV, 0) self.thresh = (self.val < 0.7) and HighIPC(self, EV, 1) except ZeroDivisionError: handle_error_metric(self, "DSB_Coverage zero division") desc = """ Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). See section 'Decoded ICache' in Optimization Manual. http://www.intel.com/content/www/us/en/architecture- and-technology/64-ia-32-architectures-optimization- manual.html""" class Metric_Unknown_Branch_Cost: name = "Unknown_Branch_Cost" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['Fed']) sibling = None def compute(self, EV): try: self.val = Unknown_Branch_Cost(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Unknown_Branch_Cost zero division") desc = """ Average number of cycles the front-end was delayed due to an Unknown Branch detection. See Unknown_Branches node.""" class Metric_DSB_Switch_Cost: name = "DSB_Switch_Cost" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['DSBmiss']) sibling = None def compute(self, EV): try: self.val = DSB_Switch_Cost(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "DSB_Switch_Cost zero division") desc = """ Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.""" class Metric_DSB_Misses: name = "DSB_Misses" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Botlnk.L2" metricgroup = frozenset(['DSBmiss', 'Fed']) sibling = None def compute(self, EV): try: self.val = DSB_Misses(self, EV, 0) self.thresh = (self.val > 10) except ZeroDivisionError: handle_error_metric(self, "DSB_Misses zero division") desc = """ Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck.""" class Metric_DSB_Bandwidth: name = "DSB_Bandwidth" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Botlnk.L2" metricgroup = frozenset(['DSB', 'FetchBW']) sibling = None def compute(self, EV): try: self.val = DSB_Bandwidth(self, EV, 0) self.thresh = (self.val > 10) except ZeroDivisionError: handle_error_metric(self, "DSB_Bandwidth zero division") desc = """ Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck.""" class Metric_ICache_Miss_Latency: name = "ICache_Miss_Latency" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['Fed', 'FetchLat', 'IcMiss']) sibling = None def compute(self, EV): try: self.val = ICache_Miss_Latency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "ICache_Miss_Latency zero division") desc = """ Average Latency for L1 instruction cache misses""" class Metric_IC_Misses: name = "IC_Misses" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Botlnk.L2" metricgroup = frozenset(['Fed', 'FetchLat', 'IcMiss']) sibling = None def compute(self, EV): try: self.val = IC_Misses(self, EV, 0) self.thresh = (self.val > 5) except ZeroDivisionError: handle_error_metric(self, "IC_Misses zero division") desc = """ Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck.""" class Metric_IpDSB_Miss_Ret: name = "IpDSB_Miss_Ret" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['DSBmiss', 'Fed']) sibling = None def compute(self, EV): try: self.val = IpDSB_Miss_Ret(self, EV, 0) self.thresh = (self.val < 50) except ZeroDivisionError: handle_error_metric(self, "IpDSB_Miss_Ret zero division") desc = """ Instructions per non-speculative DSB miss (lower number means higher occurrence rate)""" class Metric_IpUnknown_Branch: name = "IpUnknown_Branch" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['Fed']) sibling = None def compute(self, EV): try: self.val = IpUnknown_Branch(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IpUnknown_Branch zero division") desc = """ Instructions per speculative Unknown Branch Misprediction (BAClear) (lower number means higher occurrence rate)""" class Metric_L2MPKI_Code: name = "L2MPKI_Code" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['IcMiss']) sibling = None def compute(self, EV): try: self.val = L2MPKI_Code(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2MPKI_Code zero division") desc = """ L2 cache true code cacheline misses per kilo instruction""" class Metric_L2MPKI_Code_All: name = "L2MPKI_Code_All" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['IcMiss']) sibling = None def compute(self, EV): try: self.val = L2MPKI_Code_All(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2MPKI_Code_All zero division") desc = """ L2 cache speculative code cacheline misses per kilo instruction""" class Metric_IpMispredict: name = "IpMispredict" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Bad_Spec" metricgroup = frozenset(['Bad', 'BadSpec', 'BrMispredicts']) sibling = None def compute(self, EV): try: self.val = IpMispredict(self, EV, 0) self.thresh = (self.val < 200) except ZeroDivisionError: handle_error_metric(self, "IpMispredict zero division") desc = """ Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)""" class Metric_IpMisp_Cond_Ntaken: name = "IpMisp_Cond_Ntaken" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Bad_Spec" metricgroup = frozenset(['Bad', 'BrMispredicts']) sibling = None def compute(self, EV): try: self.val = IpMisp_Cond_Ntaken(self, EV, 0) self.thresh = (self.val < 200) except ZeroDivisionError: handle_error_metric(self, "IpMisp_Cond_Ntaken zero division") desc = """ Instructions per retired Mispredicts for conditional non- taken branches (lower number means higher occurrence rate).""" class Metric_IpMisp_Cond_Taken: name = "IpMisp_Cond_Taken" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Bad_Spec" metricgroup = frozenset(['Bad', 'BrMispredicts']) sibling = None def compute(self, EV): try: self.val = IpMisp_Cond_Taken(self, EV, 0) self.thresh = (self.val < 200) except ZeroDivisionError: handle_error_metric(self, "IpMisp_Cond_Taken zero division") desc = """ Instructions per retired Mispredicts for conditional taken branches (lower number means higher occurrence rate).""" class Metric_IpMisp_Ret: name = "IpMisp_Ret" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Bad_Spec" metricgroup = frozenset(['Bad', 'BrMispredicts']) sibling = None def compute(self, EV): try: self.val = IpMisp_Ret(self, EV, 0) self.thresh = (self.val < 500) except ZeroDivisionError: handle_error_metric(self, "IpMisp_Ret zero division") desc = """ Instructions per retired Mispredicts for return branches (lower number means higher occurrence rate).""" class Metric_IpMisp_Indirect: name = "IpMisp_Indirect" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Bad_Spec" metricgroup = frozenset(['Bad', 'BrMispredicts']) sibling = None def compute(self, EV): try: self.val = IpMisp_Indirect(self, EV, 0) self.thresh = (self.val < 1000) except ZeroDivisionError: handle_error_metric(self, "IpMisp_Indirect zero division") desc = """ Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).""" class Metric_Branch_Misprediction_Cost: name = "Branch_Misprediction_Cost" domain = "Core_Metric" maxval = 300 errcount = 0 area = "Info.Bad_Spec" metricgroup = frozenset(['Bad', 'BrMispredicts']) sibling = None def compute(self, EV): try: self.val = Branch_Misprediction_Cost(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Branch_Misprediction_Cost zero division") desc = """ Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)""" class Metric_Spec_Clears_Ratio: name = "Spec_Clears_Ratio" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Bad_Spec" metricgroup = frozenset(['BrMispredicts']) sibling = None def compute(self, EV): try: self.val = Spec_Clears_Ratio(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Spec_Clears_Ratio zero division") desc = """ Speculative to Retired ratio of all clears (covering Mispredicts and nukes)""" class Metric_Cond_NT: name = "Cond_NT" domain = "Fraction" maxval = 1.0 errcount = 0 area = "Info.Branches" metricgroup = frozenset(['Bad', 'Branches', 'CodeGen', 'PGO']) sibling = None def compute(self, EV): try: self.val = Cond_NT(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Cond_NT zero division") desc = """ Fraction of branches that are non-taken conditionals""" class Metric_Cond_TK: name = "Cond_TK" domain = "Fraction" maxval = 1.0 errcount = 0 area = "Info.Branches" metricgroup = frozenset(['Bad', 'Branches', 'CodeGen', 'PGO']) sibling = None def compute(self, EV): try: self.val = Cond_TK(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Cond_TK zero division") desc = """ Fraction of branches that are taken conditionals""" class Metric_CallRet: name = "CallRet" domain = "Fraction" maxval = 1.0 errcount = 0 area = "Info.Branches" metricgroup = frozenset(['Bad', 'Branches']) sibling = None def compute(self, EV): try: self.val = CallRet(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CallRet zero division") desc = """ Fraction of branches that are CALL or RET""" class Metric_Jump: name = "Jump" domain = "Fraction" maxval = 1.0 errcount = 0 area = "Info.Branches" metricgroup = frozenset(['Bad', 'Branches']) sibling = None def compute(self, EV): try: self.val = Jump(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Jump zero division") desc = """ Fraction of branches that are unconditional (direct or indirect) jumps""" class Metric_Other_Branches: name = "Other_Branches" domain = "Fraction" maxval = 1.0 errcount = 0 area = "Info.Branches" metricgroup = frozenset(['Bad', 'Branches']) sibling = None def compute(self, EV): try: self.val = Other_Branches(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Other_Branches zero division") desc = """ Fraction of branches of other types (not individually covered by other metrics in Info.Branches group)""" class Metric_Load_Miss_Real_Latency: name = "Load_Miss_Real_Latency" domain = "Clocks_Latency" maxval = 1000 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBound', 'MemoryLat']) sibling = None def compute(self, EV): try: self.val = Load_Miss_Real_Latency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Load_Miss_Real_Latency zero division") desc = """ Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)""" class Metric_MLP: name = "MLP" domain = "Metric" maxval = 10.0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBound', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = MLP(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "MLP zero division") desc = """ Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)""" class Metric_L1MPKI: name = "L1MPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheHits', 'Mem']) sibling = None def compute(self, EV): try: self.val = L1MPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L1MPKI zero division") desc = """ L1 cache true misses per kilo instruction for retired demand loads""" class Metric_L1MPKI_Load: name = "L1MPKI_Load" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheHits', 'Mem']) sibling = None def compute(self, EV): try: self.val = L1MPKI_Load(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L1MPKI_Load zero division") desc = """ L1 cache true misses per kilo instruction for all demand loads (including speculative)""" class Metric_L2MPKI: name = "L2MPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'Backend', 'CacheHits']) sibling = None def compute(self, EV): try: self.val = L2MPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2MPKI zero division") desc = """ L2 cache true misses per kilo instruction for retired demand loads""" class Metric_L2MPKI_All: name = "L2MPKI_All" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheHits', 'Mem', 'Offcore']) sibling = None def compute(self, EV): try: self.val = L2MPKI_All(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2MPKI_All zero division") desc = """ L2 cache true misses per kilo instruction for all request types (including speculative)""" class Metric_L2MPKI_Load: name = "L2MPKI_Load" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheHits', 'Mem']) sibling = None def compute(self, EV): try: self.val = L2MPKI_Load(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2MPKI_Load zero division") desc = """ L2 cache true misses per kilo instruction for all demand loads (including speculative)""" class Metric_L2MPKI_RFO: name = "L2MPKI_RFO" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheMisses', 'Offcore']) sibling = None def compute(self, EV): try: self.val = L2MPKI_RFO(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2MPKI_RFO zero division") desc = """ Offcore requests (L2 cache miss) per kilo instruction for demand RFOs""" class Metric_L2HPKI_All: name = "L2HPKI_All" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheHits', 'Mem']) sibling = None def compute(self, EV): try: self.val = L2HPKI_All(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2HPKI_All zero division") desc = """ L2 cache hits per kilo instruction for all request types (including speculative)""" class Metric_L2HPKI_Load: name = "L2HPKI_Load" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheHits', 'Mem']) sibling = None def compute(self, EV): try: self.val = L2HPKI_Load(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2HPKI_Load zero division") desc = """ L2 cache hits per kilo instruction for all demand loads (including speculative)""" class Metric_L3MPKI: name = "L3MPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem']) sibling = None def compute(self, EV): try: self.val = L3MPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L3MPKI zero division") desc = """ L3 cache true misses per kilo instruction for retired demand loads""" class Metric_FB_HPKI: name = "FB_HPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheHits', 'Mem']) sibling = None def compute(self, EV): try: self.val = FB_HPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "FB_HPKI zero division") desc = """ Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss- handling entries)""" class Metric_L1D_Cache_Fill_BW: name = "L1D_Cache_Fill_BW" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L1D_Cache_Fill_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L1D_Cache_Fill_BW zero division") desc = """ """ class Metric_L2_Cache_Fill_BW: name = "L2_Cache_Fill_BW" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L2_Cache_Fill_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2_Cache_Fill_BW zero division") desc = """ """ class Metric_L3_Cache_Fill_BW: name = "L3_Cache_Fill_BW" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L3_Cache_Fill_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L3_Cache_Fill_BW zero division") desc = """ """ class Metric_L3_Cache_Access_BW: name = "L3_Cache_Access_BW" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBW', 'Offcore']) sibling = None def compute(self, EV): try: self.val = L3_Cache_Access_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L3_Cache_Access_BW zero division") desc = """ """ class Metric_Page_Walks_Utilization: name = "Page_Walks_Utilization" domain = "Core_Metric" maxval = 1.0 errcount = 0 area = "Info.Memory.TLB" metricgroup = frozenset(['Mem', 'MemoryTLB']) sibling = None def compute(self, EV): try: self.val = Page_Walks_Utilization(self, EV, 0) self.thresh = (self.val > 0.5) except ZeroDivisionError: handle_error_metric(self, "Page_Walks_Utilization zero division") desc = """ Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses""" class Metric_Code_STLB_MPKI: name = "Code_STLB_MPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory.TLB" metricgroup = frozenset(['Fed', 'MemoryTLB']) sibling = None def compute(self, EV): try: self.val = Code_STLB_MPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Code_STLB_MPKI zero division") desc = """ STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)""" class Metric_Load_STLB_MPKI: name = "Load_STLB_MPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory.TLB" metricgroup = frozenset(['Mem', 'MemoryTLB']) sibling = None def compute(self, EV): try: self.val = Load_STLB_MPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Load_STLB_MPKI zero division") desc = """ STLB (2nd level TLB) data load speculative misses per kilo instruction (misses of any page-size that complete the page walk)""" class Metric_Store_STLB_MPKI: name = "Store_STLB_MPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory.TLB" metricgroup = frozenset(['Mem', 'MemoryTLB']) sibling = None def compute(self, EV): try: self.val = Store_STLB_MPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Store_STLB_MPKI zero division") desc = """ STLB (2nd level TLB) data store speculative misses per kilo instruction (misses of any page-size that complete the page walk)""" class Metric_L1D_Cache_Fill_BW_2T: name = "L1D_Cache_Fill_BW_2T" domain = "Core_Metric" maxval = 0 errcount = 0 area = "Info.Memory.Core" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L1D_Cache_Fill_BW_2T(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L1D_Cache_Fill_BW_2T zero division") desc = """ Average per-core data fill bandwidth to the L1 data cache [GB / sec]""" class Metric_L2_Cache_Fill_BW_2T: name = "L2_Cache_Fill_BW_2T" domain = "Core_Metric" maxval = 0 errcount = 0 area = "Info.Memory.Core" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L2_Cache_Fill_BW_2T(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2_Cache_Fill_BW_2T zero division") desc = """ Average per-core data fill bandwidth to the L2 cache [GB / sec]""" class Metric_L3_Cache_Fill_BW_2T: name = "L3_Cache_Fill_BW_2T" domain = "Core_Metric" maxval = 0 errcount = 0 area = "Info.Memory.Core" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L3_Cache_Fill_BW_2T(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L3_Cache_Fill_BW_2T zero division") desc = """ Average per-core data fill bandwidth to the L3 cache [GB / sec]""" class Metric_L3_Cache_Access_BW_2T: name = "L3_Cache_Access_BW_2T" domain = "Core_Metric" maxval = 0 errcount = 0 area = "Info.Memory.Core" metricgroup = frozenset(['Mem', 'MemoryBW', 'Offcore']) sibling = None def compute(self, EV): try: self.val = L3_Cache_Access_BW_2T(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L3_Cache_Access_BW_2T zero division") desc = """ Average per-core data access bandwidth to the L3 cache [GB / sec]""" class Metric_L2_Evictions_Silent_PKI: name = "L2_Evictions_Silent_PKI" domain = "Core_Metric" maxval = 0 errcount = 0 area = "Info.Memory.Core" metricgroup = frozenset(['L2Evicts', 'Mem', 'Server']) sibling = None def compute(self, EV): try: self.val = L2_Evictions_Silent_PKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2_Evictions_Silent_PKI zero division") desc = """ Rate of silent evictions from the L2 cache per Kilo instruction where the evicted lines are dropped (no writeback to L3 or memory)""" class Metric_L2_Evictions_NonSilent_PKI: name = "L2_Evictions_NonSilent_PKI" domain = "Core_Metric" maxval = 0 errcount = 0 area = "Info.Memory.Core" metricgroup = frozenset(['L2Evicts', 'Mem', 'Server']) sibling = None def compute(self, EV): try: self.val = L2_Evictions_NonSilent_PKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2_Evictions_NonSilent_PKI zero division") desc = """ Rate of non silent evictions from the L2 cache per Kilo instruction""" class Metric_Load_L2_Miss_Latency: name = "Load_L2_Miss_Latency" domain = "Clocks_Latency" maxval = 1000 errcount = 0 area = "Info.Memory.Latency" metricgroup = frozenset(['Memory_Lat', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Load_L2_Miss_Latency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Load_L2_Miss_Latency zero division") desc = """ Average Latency for L2 cache miss demand Loads""" class Metric_Load_L3_Miss_Latency: name = "Load_L3_Miss_Latency" domain = "Clocks_Latency" maxval = 1000 errcount = 0 area = "Info.Memory.Latency" metricgroup = frozenset(['Memory_Lat', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Load_L3_Miss_Latency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Load_L3_Miss_Latency zero division") desc = """ Average Latency for L3 cache miss demand Loads""" class Metric_Load_L2_MLP: name = "Load_L2_MLP" domain = "Metric" maxval = 100 errcount = 0 area = "Info.Memory.Latency" metricgroup = frozenset(['Memory_BW', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Load_L2_MLP(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Load_L2_MLP zero division") desc = """ Average Parallel L2 cache miss demand Loads""" class Metric_Data_L2_MLP: name = "Data_L2_MLP" domain = "Metric" maxval = 100 errcount = 0 area = "Info.Memory.Latency" metricgroup = frozenset(['Memory_BW', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Data_L2_MLP(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Data_L2_MLP zero division") desc = """ Average Parallel L2 cache miss data reads""" class Metric_Offcore_Read_Any_PKI: name = "Offcore_Read_Any_PKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory.Mix" metricgroup = frozenset(['CacheHits', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Offcore_Read_Any_PKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Offcore_Read_Any_PKI zero division") desc = """ Off-core accesses per kilo instruction for reads-to-core requests (speculative; including in-core HW prefetches)""" class Metric_Offcore_Read_L3M_PKI: name = "Offcore_Read_L3M_PKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory.Mix" metricgroup = frozenset(['Offcore']) sibling = None def compute(self, EV): try: self.val = Offcore_Read_L3M_PKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Offcore_Read_L3M_PKI zero division") desc = """ L3 cache misses per kilo instruction for reads-to-core requests (speculative; including in-core HW prefetches)""" class Metric_Offcore_Read_HBM_PKI: name = "Offcore_Read_HBM_PKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory.Mix" metricgroup = frozenset(['Offcore', 'Server']) sibling = None def compute(self, EV): try: self.val = Offcore_Read_HBM_PKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Offcore_Read_HBM_PKI zero division") desc = """ High-Bandwidth Memory (HBM) accesses per kilo instruction for reads-to-core requests (speculative; including in-core HW prefetches)""" class Metric_Offcore_MWrite_Any_PKI: name = "Offcore_MWrite_Any_PKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory.Mix" metricgroup = frozenset(['Offcore']) sibling = None def compute(self, EV): try: self.val = Offcore_MWrite_Any_PKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Offcore_MWrite_Any_PKI zero division") desc = """ Off-core accesses per kilo instruction for modified write requests""" class Metric_UC_Load_PKI: name = "UC_Load_PKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory.Mix" metricgroup = frozenset(['Mem']) sibling = None def compute(self, EV): try: self.val = UC_Load_PKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "UC_Load_PKI zero division") desc = """ Un-cacheable retired load per kilo instruction""" class Metric_Bus_Lock_PKI: name = "Bus_Lock_PKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory.Mix" metricgroup = frozenset(['Mem']) sibling = None def compute(self, EV): try: self.val = Bus_Lock_PKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Bus_Lock_PKI zero division") desc = """ \"Bus lock\" per kilo instruction""" class Metric_CPU_Utilization: name = "CPU_Utilization" domain = "Metric" maxval = 1 errcount = 0 area = "Info.System" metricgroup = frozenset(['HPC', 'Summary']) sibling = None def compute(self, EV): try: self.val = CPU_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CPU_Utilization zero division") desc = """ Average CPU Utilization (percentage)""" class Metric_CPUs_Utilized: name = "CPUs_Utilized" domain = "Metric" maxval = 300 errcount = 0 area = "Info.System" metricgroup = frozenset(['Summary']) sibling = None def compute(self, EV): try: self.val = CPUs_Utilized(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CPUs_Utilized zero division") desc = """ Average number of utilized CPUs""" class Metric_Core_Frequency: name = "Core_Frequency" domain = "SystemMetric" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Summary', 'Power']) sibling = None def compute(self, EV): try: self.val = Core_Frequency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Core_Frequency zero division") desc = """ Measured Average Core Frequency for unhalted processors [GHz]""" class Metric_Uncore_Frequency: name = "Uncore_Frequency" domain = "SystemMetric" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['SoC']) sibling = None def compute(self, EV): try: self.val = Uncore_Frequency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Uncore_Frequency zero division") desc = """ Measured Average Uncore Frequency for the SoC [GHz]""" class Metric_GFLOPs: name = "GFLOPs" domain = "Metric" maxval = 200 errcount = 0 area = "Info.System" metricgroup = frozenset(['Cor', 'Flops', 'HPC']) sibling = None def compute(self, EV): try: self.val = GFLOPs(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "GFLOPs zero division") desc = """ Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width""" class Metric_Turbo_Utilization: name = "Turbo_Utilization" domain = "Core_Metric" maxval = 10.0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Power']) sibling = None def compute(self, EV): try: self.val = Turbo_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Turbo_Utilization zero division") desc = """ Average Frequency Utilization relative nominal frequency""" class Metric_SMT_2T_Utilization: name = "SMT_2T_Utilization" domain = "Core_Metric" maxval = 1.0 errcount = 0 area = "Info.System" metricgroup = frozenset(['SMT']) sibling = None def compute(self, EV): try: self.val = SMT_2T_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "SMT_2T_Utilization zero division") desc = """ Fraction of cycles where both hardware Logical Processors were active""" class Metric_Kernel_Utilization: name = "Kernel_Utilization" domain = "Metric" maxval = 1.0 errcount = 0 area = "Info.System" metricgroup = frozenset(['OS']) sibling = None def compute(self, EV): try: self.val = Kernel_Utilization(self, EV, 0) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error_metric(self, "Kernel_Utilization zero division") desc = """ Fraction of cycles spent in the Operating System (OS) Kernel mode""" class Metric_Kernel_CPI: name = "Kernel_CPI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['OS']) sibling = None def compute(self, EV): try: self.val = Kernel_CPI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Kernel_CPI zero division") desc = """ Cycles Per Instruction for the Operating System (OS) Kernel mode""" class Metric_C0_Wait: name = "C0_Wait" domain = "Metric" maxval = 1.0 errcount = 0 area = "Info.System" metricgroup = frozenset(['C0Wait']) sibling = None def compute(self, EV): try: self.val = C0_Wait(self, EV, 0) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error_metric(self, "C0_Wait zero division") desc = """ Fraction of cycles the processor is waiting yet unhalted; covering legacy PAUSE instruction, as well as C0.1 / C0.2 power-performance optimized states. Sample code of TPAUSE: h ttps://github.com/torvalds/linux/blob/master/arch/x86/lib/de lay.c""" class Metric_DRAM_BW_Use: name = "DRAM_BW_Use" domain = "GB/sec" maxval = 200 errcount = 0 area = "Info.System" metricgroup = frozenset(['HPC', 'MemOffcore', 'MemoryBW', 'SoC']) sibling = None def compute(self, EV): try: self.val = DRAM_BW_Use(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "DRAM_BW_Use zero division") desc = """ Average external Memory Bandwidth Use for reads and writes [GB / sec]""" class Metric_R2C_Offcore_BW: name = "R2C_Offcore_BW" domain = "GB/sec" maxval = 0 errcount = 0 area = "Info.Memory.SoC" metricgroup = frozenset(['HPC', 'Mem', 'MemoryBW', 'SoC']) sibling = None def compute(self, EV): try: self.val = R2C_Offcore_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "R2C_Offcore_BW zero division") desc = """ Average Off-core access BW for Reads-to-Core (R2C). R2C account for demand or prefetch load/RFO/code access that fill data into the Core caches.""" class Metric_R2C_L3M_BW: name = "R2C_L3M_BW" domain = "GB/sec" maxval = 0 errcount = 0 area = "Info.Memory.SoC" metricgroup = frozenset(['HPC', 'Mem', 'MemoryBW', 'SoC']) sibling = None def compute(self, EV): try: self.val = R2C_L3M_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "R2C_L3M_BW zero division") desc = """ Average L3-cache miss BW for Reads-to-Core (R2C). This covering going to DRAM or other memory off-chip memory tears. See R2C_Offcore_BW.""" class Metric_R2C_DRAM_BW: name = "R2C_DRAM_BW" domain = "GB/sec" maxval = 0 errcount = 0 area = "Info.Memory.SoC" metricgroup = frozenset(['HPC', 'Mem', 'MemoryBW', 'SoC']) sibling = None def compute(self, EV): try: self.val = R2C_DRAM_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "R2C_DRAM_BW zero division") desc = """ Average DRAM BW for Reads-to-Core (R2C) covering for memory attached to local- and remote-socket. See R2C_Offcore_BW.""" class Metric_R2C_HBM_BW: name = "R2C_HBM_BW" domain = "GB/sec" maxval = 0 errcount = 0 area = "Info.Memory.SoC" metricgroup = frozenset(['HPC', 'Mem', 'MemoryBW', 'Server', 'SoC']) sibling = None def compute(self, EV): try: self.val = R2C_HBM_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "R2C_HBM_BW zero division") desc = """ Average HBM BW for Reads-to-Core. See R2C_Offcore_BW.""" class Metric_MEM_Read_Latency: name = "MEM_Read_Latency" domain = "NanoSeconds" maxval = 1000 errcount = 0 area = "Info.System" metricgroup = frozenset(['Mem', 'MemoryLat', 'SoC']) sibling = None def compute(self, EV): try: self.val = MEM_Read_Latency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "MEM_Read_Latency zero division") desc = """ Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches. memory-controller only""" class Metric_MEM_Parallel_Reads: name = "MEM_Parallel_Reads" domain = "SystemMetric" maxval = 100 errcount = 0 area = "Info.System" metricgroup = frozenset(['Mem', 'MemoryBW', 'SoC']) sibling = None def compute(self, EV): try: self.val = MEM_Parallel_Reads(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "MEM_Parallel_Reads zero division") desc = """ Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches""" class Metric_MEM_PMM_Read_Latency: name = "MEM_PMM_Read_Latency" domain = "NanoSeconds" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['MemOffcore', 'MemoryLat', 'SoC', 'Server']) sibling = None def compute(self, EV): try: self.val = MEM_PMM_Read_Latency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "MEM_PMM_Read_Latency zero division") desc = """ Average latency of data read request to external 3D X-Point memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches""" class Metric_MEM_DRAM_Read_Latency: name = "MEM_DRAM_Read_Latency" domain = "NanoSeconds" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['MemOffcore', 'MemoryLat', 'SoC', 'Server']) sibling = None def compute(self, EV): try: self.val = MEM_DRAM_Read_Latency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "MEM_DRAM_Read_Latency zero division") desc = """ Average latency of data read request to external DRAM memory [in nanoseconds]. Accounts for demand loads and L1/L2 data- read prefetches""" class Metric_PMM_Read_BW: name = "PMM_Read_BW" domain = "GB/sec" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['MemOffcore', 'MemoryBW', 'SoC', 'Server']) sibling = None def compute(self, EV): try: self.val = PMM_Read_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "PMM_Read_BW zero division") desc = """ Average 3DXP Memory Bandwidth Use for reads [GB / sec]""" class Metric_PMM_Write_BW: name = "PMM_Write_BW" domain = "GB/sec" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['MemOffcore', 'MemoryBW', 'SoC', 'Server']) sibling = None def compute(self, EV): try: self.val = PMM_Write_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "PMM_Write_BW zero division") desc = """ Average 3DXP Memory Bandwidth Use for Writes [GB / sec]""" class Metric_IO_Read_BW: name = "IO_Read_BW" domain = "GB/sec" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['IoBW', 'MemOffcore', 'SoC', 'Server']) sibling = None def compute(self, EV): try: self.val = IO_Read_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IO_Read_BW zero division") desc = """ Average IO (network or disk) Bandwidth Use for Reads [GB / sec]. Bandwidth of IO reads that are initiated by end device controllers that are requesting memory from the CPU""" class Metric_IO_Write_BW: name = "IO_Write_BW" domain = "GB/sec" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['IoBW', 'MemOffcore', 'SoC', 'Server']) sibling = None def compute(self, EV): try: self.val = IO_Write_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IO_Write_BW zero division") desc = """ Average IO (network or disk) Bandwidth Use for Writes [GB / sec]. Bandwidth of IO writes that are initiated by end device controllers that are writing memory to the CPU""" class Metric_UPI_Data_Transmit_BW: name = "UPI_Data_Transmit_BW" domain = "MB/sec" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['SoC', 'Server']) sibling = None def compute(self, EV): try: self.val = UPI_Data_Transmit_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "UPI_Data_Transmit_BW zero division") desc = """ Cross-socket Ultra Path Interconnect (UPI) data transmit bandwidth for data only [MB / sec]""" class Metric_Time: name = "Time" domain = "Seconds" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Summary']) sibling = None def compute(self, EV): try: self.val = Time(self, EV, 0) self.thresh = (self.val < 1) except ZeroDivisionError: handle_error_metric(self, "Time zero division") desc = """ Run duration time in seconds""" class Metric_Socket_CLKS: name = "Socket_CLKS" domain = "Count" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['SoC']) sibling = None def compute(self, EV): try: self.val = Socket_CLKS(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Socket_CLKS zero division") desc = """ Socket actual clocks when any core is active on that socket""" class Metric_IpFarBranch: name = "IpFarBranch" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Branches', 'OS']) sibling = None def compute(self, EV): try: self.val = IpFarBranch(self, EV, 0) self.thresh = (self.val < 1000000) except ZeroDivisionError: handle_error_metric(self, "IpFarBranch zero division") desc = """ Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]""" # Schedule class Setup: def __init__(self, r): o = dict() n = Frontend_Bound() ; r.run(n) ; o["Frontend_Bound"] = n n = Fetch_Latency() ; r.run(n) ; o["Fetch_Latency"] = n n = ICache_Misses() ; r.run(n) ; o["ICache_Misses"] = n n = ITLB_Misses() ; r.run(n) ; o["ITLB_Misses"] = n n = Branch_Resteers() ; r.run(n) ; o["Branch_Resteers"] = n n = Mispredicts_Resteers() ; r.run(n) ; o["Mispredicts_Resteers"] = n n = Clears_Resteers() ; r.run(n) ; o["Clears_Resteers"] = n n = Unknown_Branches() ; r.run(n) ; o["Unknown_Branches"] = n n = MS_Switches() ; r.run(n) ; o["MS_Switches"] = n n = LCP() ; r.run(n) ; o["LCP"] = n n = DSB_Switches() ; r.run(n) ; o["DSB_Switches"] = n n = Fetch_Bandwidth() ; r.run(n) ; o["Fetch_Bandwidth"] = n n = MITE() ; r.run(n) ; o["MITE"] = n n = Decoder0_Alone() ; r.run(n) ; o["Decoder0_Alone"] = n n = DSB() ; r.run(n) ; o["DSB"] = n n = Bad_Speculation() ; r.run(n) ; o["Bad_Speculation"] = n n = Branch_Mispredicts() ; r.run(n) ; o["Branch_Mispredicts"] = n n = Other_Mispredicts() ; r.run(n) ; o["Other_Mispredicts"] = n n = Machine_Clears() ; r.run(n) ; o["Machine_Clears"] = n n = Other_Nukes() ; r.run(n) ; o["Other_Nukes"] = n n = Backend_Bound() ; r.run(n) ; o["Backend_Bound"] = n n = Memory_Bound() ; r.run(n) ; o["Memory_Bound"] = n n = L1_Bound() ; r.run(n) ; o["L1_Bound"] = n n = DTLB_Load() ; r.run(n) ; o["DTLB_Load"] = n n = Load_STLB_Hit() ; r.run(n) ; o["Load_STLB_Hit"] = n n = Load_STLB_Miss() ; r.run(n) ; o["Load_STLB_Miss"] = n n = Store_Fwd_Blk() ; r.run(n) ; o["Store_Fwd_Blk"] = n n = L1_Hit_Latency() ; r.run(n) ; o["L1_Hit_Latency"] = n n = Lock_Latency() ; r.run(n) ; o["Lock_Latency"] = n n = Split_Loads() ; r.run(n) ; o["Split_Loads"] = n n = FB_Full() ; r.run(n) ; o["FB_Full"] = n n = L2_Bound() ; r.run(n) ; o["L2_Bound"] = n n = L3_Bound() ; r.run(n) ; o["L3_Bound"] = n n = Contested_Accesses() ; r.run(n) ; o["Contested_Accesses"] = n n = Data_Sharing() ; r.run(n) ; o["Data_Sharing"] = n n = L3_Hit_Latency() ; r.run(n) ; o["L3_Hit_Latency"] = n n = SQ_Full() ; r.run(n) ; o["SQ_Full"] = n n = HBM_Bound() ; r.run(n) ; o["HBM_Bound"] = n n = DRAM_Bound() ; r.run(n) ; o["DRAM_Bound"] = n n = MEM_Bandwidth() ; r.run(n) ; o["MEM_Bandwidth"] = n n = MBA_Stalls() ; r.run(n) ; o["MBA_Stalls"] = n n = MEM_Latency() ; r.run(n) ; o["MEM_Latency"] = n n = Local_MEM() ; r.run(n) ; o["Local_MEM"] = n n = Remote_MEM() ; r.run(n) ; o["Remote_MEM"] = n n = Remote_Cache() ; r.run(n) ; o["Remote_Cache"] = n n = PMM_Bound() ; r.run(n) ; o["PMM_Bound"] = n n = Store_Bound() ; r.run(n) ; o["Store_Bound"] = n n = Store_Latency() ; r.run(n) ; o["Store_Latency"] = n n = False_Sharing() ; r.run(n) ; o["False_Sharing"] = n n = Split_Stores() ; r.run(n) ; o["Split_Stores"] = n n = Streaming_Stores() ; r.run(n) ; o["Streaming_Stores"] = n n = DTLB_Store() ; r.run(n) ; o["DTLB_Store"] = n n = Store_STLB_Hit() ; r.run(n) ; o["Store_STLB_Hit"] = n n = Store_STLB_Miss() ; r.run(n) ; o["Store_STLB_Miss"] = n n = Core_Bound() ; r.run(n) ; o["Core_Bound"] = n n = Divider() ; r.run(n) ; o["Divider"] = n n = Serializing_Operation() ; r.run(n) ; o["Serializing_Operation"] = n n = Slow_Pause() ; r.run(n) ; o["Slow_Pause"] = n n = C01_WAIT() ; r.run(n) ; o["C01_WAIT"] = n n = C02_WAIT() ; r.run(n) ; o["C02_WAIT"] = n n = Memory_Fence() ; r.run(n) ; o["Memory_Fence"] = n n = AMX_Busy() ; r.run(n) ; o["AMX_Busy"] = n n = Ports_Utilization() ; r.run(n) ; o["Ports_Utilization"] = n n = Ports_Utilized_0() ; r.run(n) ; o["Ports_Utilized_0"] = n n = Mixing_Vectors() ; r.run(n) ; o["Mixing_Vectors"] = n n = Ports_Utilized_1() ; r.run(n) ; o["Ports_Utilized_1"] = n n = Ports_Utilized_2() ; r.run(n) ; o["Ports_Utilized_2"] = n n = Ports_Utilized_3m() ; r.run(n) ; o["Ports_Utilized_3m"] = n n = ALU_Op_Utilization() ; r.run(n) ; o["ALU_Op_Utilization"] = n n = Port_0() ; r.run(n) ; o["Port_0"] = n n = Port_1() ; r.run(n) ; o["Port_1"] = n n = Port_6() ; r.run(n) ; o["Port_6"] = n n = Load_Op_Utilization() ; r.run(n) ; o["Load_Op_Utilization"] = n n = Store_Op_Utilization() ; r.run(n) ; o["Store_Op_Utilization"] = n n = Retiring() ; r.run(n) ; o["Retiring"] = n n = Light_Operations() ; r.run(n) ; o["Light_Operations"] = n n = FP_Arith() ; r.run(n) ; o["FP_Arith"] = n n = X87_Use() ; r.run(n) ; o["X87_Use"] = n n = FP_Scalar() ; r.run(n) ; o["FP_Scalar"] = n n = FP_Vector() ; r.run(n) ; o["FP_Vector"] = n n = FP_Vector_128b() ; r.run(n) ; o["FP_Vector_128b"] = n n = FP_Vector_256b() ; r.run(n) ; o["FP_Vector_256b"] = n n = Int_Operations() ; r.run(n) ; o["Int_Operations"] = n n = Int_Vector_128b() ; r.run(n) ; o["Int_Vector_128b"] = n n = Int_Vector_256b() ; r.run(n) ; o["Int_Vector_256b"] = n n = Memory_Operations() ; r.run(n) ; o["Memory_Operations"] = n n = Fused_Instructions() ; r.run(n) ; o["Fused_Instructions"] = n n = Non_Fused_Branches() ; r.run(n) ; o["Non_Fused_Branches"] = n n = Other_Light_Ops() ; r.run(n) ; o["Other_Light_Ops"] = n n = Nop_Instructions() ; r.run(n) ; o["Nop_Instructions"] = n n = Shuffles_256b() ; r.run(n) ; o["Shuffles_256b"] = n n = Heavy_Operations() ; r.run(n) ; o["Heavy_Operations"] = n n = Few_Uops_Instructions() ; r.run(n) ; o["Few_Uops_Instructions"] = n n = Microcode_Sequencer() ; r.run(n) ; o["Microcode_Sequencer"] = n n = Assists() ; r.run(n) ; o["Assists"] = n n = Page_Faults() ; r.run(n) ; o["Page_Faults"] = n n = FP_Assists() ; r.run(n) ; o["FP_Assists"] = n n = AVX_Assists() ; r.run(n) ; o["AVX_Assists"] = n n = CISC() ; r.run(n) ; o["CISC"] = n # parents o["Fetch_Latency"].parent = o["Frontend_Bound"] o["ICache_Misses"].parent = o["Fetch_Latency"] o["ITLB_Misses"].parent = o["Fetch_Latency"] o["Branch_Resteers"].parent = o["Fetch_Latency"] o["Mispredicts_Resteers"].parent = o["Branch_Resteers"] o["Clears_Resteers"].parent = o["Branch_Resteers"] o["Unknown_Branches"].parent = o["Branch_Resteers"] o["MS_Switches"].parent = o["Fetch_Latency"] o["LCP"].parent = o["Fetch_Latency"] o["DSB_Switches"].parent = o["Fetch_Latency"] o["Fetch_Bandwidth"].parent = o["Frontend_Bound"] o["MITE"].parent = o["Fetch_Bandwidth"] o["Decoder0_Alone"].parent = o["MITE"] o["DSB"].parent = o["Fetch_Bandwidth"] o["Branch_Mispredicts"].parent = o["Bad_Speculation"] o["Other_Mispredicts"].parent = o["Branch_Mispredicts"] o["Machine_Clears"].parent = o["Bad_Speculation"] o["Other_Nukes"].parent = o["Machine_Clears"] o["Memory_Bound"].parent = o["Backend_Bound"] o["L1_Bound"].parent = o["Memory_Bound"] o["DTLB_Load"].parent = o["L1_Bound"] o["Load_STLB_Hit"].parent = o["DTLB_Load"] o["Load_STLB_Miss"].parent = o["DTLB_Load"] o["Store_Fwd_Blk"].parent = o["L1_Bound"] o["L1_Hit_Latency"].parent = o["L1_Bound"] o["Lock_Latency"].parent = o["L1_Bound"] o["Split_Loads"].parent = o["L1_Bound"] o["FB_Full"].parent = o["L1_Bound"] o["L2_Bound"].parent = o["Memory_Bound"] o["L3_Bound"].parent = o["Memory_Bound"] o["Contested_Accesses"].parent = o["L3_Bound"] o["Data_Sharing"].parent = o["L3_Bound"] o["L3_Hit_Latency"].parent = o["L3_Bound"] o["SQ_Full"].parent = o["L3_Bound"] o["HBM_Bound"].parent = o["Memory_Bound"] o["DRAM_Bound"].parent = o["Memory_Bound"] o["MEM_Bandwidth"].parent = o["DRAM_Bound"] o["MBA_Stalls"].parent = o["MEM_Bandwidth"] o["MEM_Latency"].parent = o["DRAM_Bound"] o["Local_MEM"].parent = o["MEM_Latency"] o["Remote_MEM"].parent = o["MEM_Latency"] o["Remote_Cache"].parent = o["MEM_Latency"] o["PMM_Bound"].parent = o["Memory_Bound"] o["Store_Bound"].parent = o["Memory_Bound"] o["Store_Latency"].parent = o["Store_Bound"] o["False_Sharing"].parent = o["Store_Bound"] o["Split_Stores"].parent = o["Store_Bound"] o["Streaming_Stores"].parent = o["Store_Bound"] o["DTLB_Store"].parent = o["Store_Bound"] o["Store_STLB_Hit"].parent = o["DTLB_Store"] o["Store_STLB_Miss"].parent = o["DTLB_Store"] o["Core_Bound"].parent = o["Backend_Bound"] o["Divider"].parent = o["Core_Bound"] o["Serializing_Operation"].parent = o["Core_Bound"] o["Slow_Pause"].parent = o["Serializing_Operation"] o["C01_WAIT"].parent = o["Serializing_Operation"] o["C02_WAIT"].parent = o["Serializing_Operation"] o["Memory_Fence"].parent = o["Serializing_Operation"] o["AMX_Busy"].parent = o["Core_Bound"] o["Ports_Utilization"].parent = o["Core_Bound"] o["Ports_Utilized_0"].parent = o["Ports_Utilization"] o["Mixing_Vectors"].parent = o["Ports_Utilized_0"] o["Ports_Utilized_1"].parent = o["Ports_Utilization"] o["Ports_Utilized_2"].parent = o["Ports_Utilization"] o["Ports_Utilized_3m"].parent = o["Ports_Utilization"] o["ALU_Op_Utilization"].parent = o["Ports_Utilized_3m"] o["Port_0"].parent = o["ALU_Op_Utilization"] o["Port_1"].parent = o["ALU_Op_Utilization"] o["Port_6"].parent = o["ALU_Op_Utilization"] o["Load_Op_Utilization"].parent = o["Ports_Utilized_3m"] o["Store_Op_Utilization"].parent = o["Ports_Utilized_3m"] o["Light_Operations"].parent = o["Retiring"] o["FP_Arith"].parent = o["Light_Operations"] o["X87_Use"].parent = o["FP_Arith"] o["FP_Scalar"].parent = o["FP_Arith"] o["FP_Vector"].parent = o["FP_Arith"] o["FP_Vector_128b"].parent = o["FP_Vector"] o["FP_Vector_256b"].parent = o["FP_Vector"] o["Int_Operations"].parent = o["Light_Operations"] o["Int_Vector_128b"].parent = o["Int_Operations"] o["Int_Vector_256b"].parent = o["Int_Operations"] o["Memory_Operations"].parent = o["Light_Operations"] o["Fused_Instructions"].parent = o["Light_Operations"] o["Non_Fused_Branches"].parent = o["Light_Operations"] o["Other_Light_Ops"].parent = o["Light_Operations"] o["Nop_Instructions"].parent = o["Other_Light_Ops"] o["Shuffles_256b"].parent = o["Other_Light_Ops"] o["Heavy_Operations"].parent = o["Retiring"] o["Few_Uops_Instructions"].parent = o["Heavy_Operations"] o["Microcode_Sequencer"].parent = o["Heavy_Operations"] o["Assists"].parent = o["Microcode_Sequencer"] o["Page_Faults"].parent = o["Assists"] o["FP_Assists"].parent = o["Assists"] o["AVX_Assists"].parent = o["Assists"] o["CISC"].parent = o["Microcode_Sequencer"] # user visible metrics n = Metric_Mispredictions() ; r.metric(n) ; o["Mispredictions"] = n n = Metric_Big_Code() ; r.metric(n) ; o["Big_Code"] = n n = Metric_Instruction_Fetch_BW() ; r.metric(n) ; o["Instruction_Fetch_BW"] = n n = Metric_Cache_Memory_Bandwidth() ; r.metric(n) ; o["Cache_Memory_Bandwidth"] = n n = Metric_Cache_Memory_Latency() ; r.metric(n) ; o["Cache_Memory_Latency"] = n n = Metric_Memory_Data_TLBs() ; r.metric(n) ; o["Memory_Data_TLBs"] = n n = Metric_Memory_Synchronization() ; r.metric(n) ; o["Memory_Synchronization"] = n n = Metric_Compute_Bound_Est() ; r.metric(n) ; o["Compute_Bound_Est"] = n n = Metric_Irregular_Overhead() ; r.metric(n) ; o["Irregular_Overhead"] = n n = Metric_Other_Bottlenecks() ; r.metric(n) ; o["Other_Bottlenecks"] = n n = Metric_Branching_Overhead() ; r.metric(n) ; o["Branching_Overhead"] = n n = Metric_Useful_Work() ; r.metric(n) ; o["Useful_Work"] = n n = Metric_Core_Bound_Likely() ; r.metric(n) ; o["Core_Bound_Likely"] = n n = Metric_IPC() ; r.metric(n) ; o["IPC"] = n n = Metric_UopPI() ; r.metric(n) ; o["UopPI"] = n n = Metric_UpTB() ; r.metric(n) ; o["UpTB"] = n n = Metric_CPI() ; r.metric(n) ; o["CPI"] = n n = Metric_CLKS() ; r.metric(n) ; o["CLKS"] = n n = Metric_SLOTS() ; r.metric(n) ; o["SLOTS"] = n n = Metric_Slots_Utilization() ; r.metric(n) ; o["Slots_Utilization"] = n n = Metric_Execute_per_Issue() ; r.metric(n) ; o["Execute_per_Issue"] = n n = Metric_CoreIPC() ; r.metric(n) ; o["CoreIPC"] = n n = Metric_FLOPc() ; r.metric(n) ; o["FLOPc"] = n n = Metric_FP_Arith_Utilization() ; r.metric(n) ; o["FP_Arith_Utilization"] = n n = Metric_ILP() ; r.metric(n) ; o["ILP"] = n n = Metric_EPC() ; r.metric(n) ; o["EPC"] = n n = Metric_CORE_CLKS() ; r.metric(n) ; o["CORE_CLKS"] = n n = Metric_IpLoad() ; r.metric(n) ; o["IpLoad"] = n n = Metric_IpStore() ; r.metric(n) ; o["IpStore"] = n n = Metric_IpBranch() ; r.metric(n) ; o["IpBranch"] = n n = Metric_IpCall() ; r.metric(n) ; o["IpCall"] = n n = Metric_IpTB() ; r.metric(n) ; o["IpTB"] = n n = Metric_BpTkBranch() ; r.metric(n) ; o["BpTkBranch"] = n n = Metric_IpFLOP() ; r.metric(n) ; o["IpFLOP"] = n n = Metric_IpArith() ; r.metric(n) ; o["IpArith"] = n n = Metric_IpArith_Scalar_HP() ; r.metric(n) ; o["IpArith_Scalar_HP"] = n n = Metric_IpArith_Scalar_SP() ; r.metric(n) ; o["IpArith_Scalar_SP"] = n n = Metric_IpArith_Scalar_DP() ; r.metric(n) ; o["IpArith_Scalar_DP"] = n n = Metric_IpArith_AVX128() ; r.metric(n) ; o["IpArith_AVX128"] = n n = Metric_IpArith_AVX256() ; r.metric(n) ; o["IpArith_AVX256"] = n n = Metric_IpPause() ; r.metric(n) ; o["IpPause"] = n n = Metric_IpSWPF() ; r.metric(n) ; o["IpSWPF"] = n n = Metric_Instructions() ; r.metric(n) ; o["Instructions"] = n n = Metric_Retire() ; r.metric(n) ; o["Retire"] = n n = Metric_Strings_Cycles() ; r.metric(n) ; o["Strings_Cycles"] = n n = Metric_IpAssist() ; r.metric(n) ; o["IpAssist"] = n n = Metric_Execute() ; r.metric(n) ; o["Execute"] = n n = Metric_Fetch_LSD() ; r.metric(n) ; o["Fetch_LSD"] = n n = Metric_Fetch_DSB() ; r.metric(n) ; o["Fetch_DSB"] = n n = Metric_Fetch_MITE() ; r.metric(n) ; o["Fetch_MITE"] = n n = Metric_Fetch_UpC() ; r.metric(n) ; o["Fetch_UpC"] = n n = Metric_DSB_Coverage() ; r.metric(n) ; o["DSB_Coverage"] = n n = Metric_Unknown_Branch_Cost() ; r.metric(n) ; o["Unknown_Branch_Cost"] = n n = Metric_DSB_Switch_Cost() ; r.metric(n) ; o["DSB_Switch_Cost"] = n n = Metric_DSB_Misses() ; r.metric(n) ; o["DSB_Misses"] = n n = Metric_DSB_Bandwidth() ; r.metric(n) ; o["DSB_Bandwidth"] = n n = Metric_ICache_Miss_Latency() ; r.metric(n) ; o["ICache_Miss_Latency"] = n n = Metric_IC_Misses() ; r.metric(n) ; o["IC_Misses"] = n n = Metric_IpDSB_Miss_Ret() ; r.metric(n) ; o["IpDSB_Miss_Ret"] = n n = Metric_IpUnknown_Branch() ; r.metric(n) ; o["IpUnknown_Branch"] = n n = Metric_L2MPKI_Code() ; r.metric(n) ; o["L2MPKI_Code"] = n n = Metric_L2MPKI_Code_All() ; r.metric(n) ; o["L2MPKI_Code_All"] = n n = Metric_IpMispredict() ; r.metric(n) ; o["IpMispredict"] = n n = Metric_IpMisp_Cond_Ntaken() ; r.metric(n) ; o["IpMisp_Cond_Ntaken"] = n n = Metric_IpMisp_Cond_Taken() ; r.metric(n) ; o["IpMisp_Cond_Taken"] = n n = Metric_IpMisp_Ret() ; r.metric(n) ; o["IpMisp_Ret"] = n n = Metric_IpMisp_Indirect() ; r.metric(n) ; o["IpMisp_Indirect"] = n n = Metric_Branch_Misprediction_Cost() ; r.metric(n) ; o["Branch_Misprediction_Cost"] = n n = Metric_Spec_Clears_Ratio() ; r.metric(n) ; o["Spec_Clears_Ratio"] = n n = Metric_Cond_NT() ; r.metric(n) ; o["Cond_NT"] = n n = Metric_Cond_TK() ; r.metric(n) ; o["Cond_TK"] = n n = Metric_CallRet() ; r.metric(n) ; o["CallRet"] = n n = Metric_Jump() ; r.metric(n) ; o["Jump"] = n n = Metric_Other_Branches() ; r.metric(n) ; o["Other_Branches"] = n n = Metric_Load_Miss_Real_Latency() ; r.metric(n) ; o["Load_Miss_Real_Latency"] = n n = Metric_MLP() ; r.metric(n) ; o["MLP"] = n n = Metric_L1MPKI() ; r.metric(n) ; o["L1MPKI"] = n n = Metric_L1MPKI_Load() ; r.metric(n) ; o["L1MPKI_Load"] = n n = Metric_L2MPKI() ; r.metric(n) ; o["L2MPKI"] = n n = Metric_L2MPKI_All() ; r.metric(n) ; o["L2MPKI_All"] = n n = Metric_L2MPKI_Load() ; r.metric(n) ; o["L2MPKI_Load"] = n n = Metric_L2MPKI_RFO() ; r.metric(n) ; o["L2MPKI_RFO"] = n n = Metric_L2HPKI_All() ; r.metric(n) ; o["L2HPKI_All"] = n n = Metric_L2HPKI_Load() ; r.metric(n) ; o["L2HPKI_Load"] = n n = Metric_L3MPKI() ; r.metric(n) ; o["L3MPKI"] = n n = Metric_FB_HPKI() ; r.metric(n) ; o["FB_HPKI"] = n n = Metric_L1D_Cache_Fill_BW() ; r.metric(n) ; o["L1D_Cache_Fill_BW"] = n n = Metric_L2_Cache_Fill_BW() ; r.metric(n) ; o["L2_Cache_Fill_BW"] = n n = Metric_L3_Cache_Fill_BW() ; r.metric(n) ; o["L3_Cache_Fill_BW"] = n n = Metric_L3_Cache_Access_BW() ; r.metric(n) ; o["L3_Cache_Access_BW"] = n n = Metric_Page_Walks_Utilization() ; r.metric(n) ; o["Page_Walks_Utilization"] = n n = Metric_Code_STLB_MPKI() ; r.metric(n) ; o["Code_STLB_MPKI"] = n n = Metric_Load_STLB_MPKI() ; r.metric(n) ; o["Load_STLB_MPKI"] = n n = Metric_Store_STLB_MPKI() ; r.metric(n) ; o["Store_STLB_MPKI"] = n n = Metric_L1D_Cache_Fill_BW_2T() ; r.metric(n) ; o["L1D_Cache_Fill_BW_2T"] = n n = Metric_L2_Cache_Fill_BW_2T() ; r.metric(n) ; o["L2_Cache_Fill_BW_2T"] = n n = Metric_L3_Cache_Fill_BW_2T() ; r.metric(n) ; o["L3_Cache_Fill_BW_2T"] = n n = Metric_L3_Cache_Access_BW_2T() ; r.metric(n) ; o["L3_Cache_Access_BW_2T"] = n n = Metric_L2_Evictions_Silent_PKI() ; r.metric(n) ; o["L2_Evictions_Silent_PKI"] = n n = Metric_L2_Evictions_NonSilent_PKI() ; r.metric(n) ; o["L2_Evictions_NonSilent_PKI"] = n n = Metric_Load_L2_Miss_Latency() ; r.metric(n) ; o["Load_L2_Miss_Latency"] = n n = Metric_Load_L3_Miss_Latency() ; r.metric(n) ; o["Load_L3_Miss_Latency"] = n n = Metric_Load_L2_MLP() ; r.metric(n) ; o["Load_L2_MLP"] = n n = Metric_Data_L2_MLP() ; r.metric(n) ; o["Data_L2_MLP"] = n n = Metric_Offcore_Read_Any_PKI() ; r.metric(n) ; o["Offcore_Read_Any_PKI"] = n n = Metric_Offcore_Read_L3M_PKI() ; r.metric(n) ; o["Offcore_Read_L3M_PKI"] = n n = Metric_Offcore_Read_HBM_PKI() ; r.metric(n) ; o["Offcore_Read_HBM_PKI"] = n n = Metric_Offcore_MWrite_Any_PKI() ; r.metric(n) ; o["Offcore_MWrite_Any_PKI"] = n n = Metric_UC_Load_PKI() ; r.metric(n) ; o["UC_Load_PKI"] = n n = Metric_Bus_Lock_PKI() ; r.metric(n) ; o["Bus_Lock_PKI"] = n n = Metric_CPU_Utilization() ; r.metric(n) ; o["CPU_Utilization"] = n n = Metric_CPUs_Utilized() ; r.metric(n) ; o["CPUs_Utilized"] = n n = Metric_Core_Frequency() ; r.metric(n) ; o["Core_Frequency"] = n n = Metric_Uncore_Frequency() ; r.metric(n) ; o["Uncore_Frequency"] = n n = Metric_GFLOPs() ; r.metric(n) ; o["GFLOPs"] = n n = Metric_Turbo_Utilization() ; r.metric(n) ; o["Turbo_Utilization"] = n n = Metric_SMT_2T_Utilization() ; r.metric(n) ; o["SMT_2T_Utilization"] = n n = Metric_Kernel_Utilization() ; r.metric(n) ; o["Kernel_Utilization"] = n n = Metric_Kernel_CPI() ; r.metric(n) ; o["Kernel_CPI"] = n n = Metric_C0_Wait() ; r.metric(n) ; o["C0_Wait"] = n n = Metric_DRAM_BW_Use() ; r.metric(n) ; o["DRAM_BW_Use"] = n n = Metric_R2C_Offcore_BW() ; r.metric(n) ; o["R2C_Offcore_BW"] = n n = Metric_R2C_L3M_BW() ; r.metric(n) ; o["R2C_L3M_BW"] = n n = Metric_R2C_DRAM_BW() ; r.metric(n) ; o["R2C_DRAM_BW"] = n n = Metric_R2C_HBM_BW() ; r.metric(n) ; o["R2C_HBM_BW"] = n n = Metric_MEM_Read_Latency() ; r.metric(n) ; o["MEM_Read_Latency"] = n n = Metric_MEM_Parallel_Reads() ; r.metric(n) ; o["MEM_Parallel_Reads"] = n n = Metric_MEM_PMM_Read_Latency() ; r.metric(n) ; o["MEM_PMM_Read_Latency"] = n n = Metric_MEM_DRAM_Read_Latency() ; r.metric(n) ; o["MEM_DRAM_Read_Latency"] = n n = Metric_PMM_Read_BW() ; r.metric(n) ; o["PMM_Read_BW"] = n n = Metric_PMM_Write_BW() ; r.metric(n) ; o["PMM_Write_BW"] = n n = Metric_IO_Read_BW() ; r.metric(n) ; o["IO_Read_BW"] = n n = Metric_IO_Write_BW() ; r.metric(n) ; o["IO_Write_BW"] = n n = Metric_UPI_Data_Transmit_BW() ; r.metric(n) ; o["UPI_Data_Transmit_BW"] = n n = Metric_Time() ; r.metric(n) ; o["Time"] = n n = Metric_Socket_CLKS() ; r.metric(n) ; o["Socket_CLKS"] = n n = Metric_IpFarBranch() ; r.metric(n) ; o["IpFarBranch"] = n # references between groups o["Branch_Resteers"].Unknown_Branches = o["Unknown_Branches"] o["Mispredicts_Resteers"].Retiring = o["Retiring"] o["Mispredicts_Resteers"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Mispredicts_Resteers"].Bad_Speculation = o["Bad_Speculation"] o["Mispredicts_Resteers"].Frontend_Bound = o["Frontend_Bound"] o["Mispredicts_Resteers"].Backend_Bound = o["Backend_Bound"] o["Clears_Resteers"].Retiring = o["Retiring"] o["Clears_Resteers"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Clears_Resteers"].Bad_Speculation = o["Bad_Speculation"] o["Clears_Resteers"].Frontend_Bound = o["Frontend_Bound"] o["Clears_Resteers"].Backend_Bound = o["Backend_Bound"] o["Fetch_Bandwidth"].Frontend_Bound = o["Frontend_Bound"] o["Fetch_Bandwidth"].Fetch_Latency = o["Fetch_Latency"] o["Bad_Speculation"].Retiring = o["Retiring"] o["Bad_Speculation"].Frontend_Bound = o["Frontend_Bound"] o["Bad_Speculation"].Backend_Bound = o["Backend_Bound"] o["Other_Mispredicts"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Machine_Clears"].Retiring = o["Retiring"] o["Machine_Clears"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Machine_Clears"].Bad_Speculation = o["Bad_Speculation"] o["Machine_Clears"].Frontend_Bound = o["Frontend_Bound"] o["Machine_Clears"].Backend_Bound = o["Backend_Bound"] o["Other_Nukes"].Machine_Clears = o["Machine_Clears"] o["Other_Nukes"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Other_Nukes"].Retiring = o["Retiring"] o["Other_Nukes"].Backend_Bound = o["Backend_Bound"] o["Other_Nukes"].Bad_Speculation = o["Bad_Speculation"] o["Other_Nukes"].Frontend_Bound = o["Frontend_Bound"] o["Load_STLB_Hit"].Load_STLB_Miss = o["Load_STLB_Miss"] o["Load_STLB_Hit"].DTLB_Load = o["DTLB_Load"] o["DRAM_Bound"].HBM_Bound = o["HBM_Bound"] o["MEM_Latency"].MEM_Bandwidth = o["MEM_Bandwidth"] o["Store_STLB_Hit"].DTLB_Store = o["DTLB_Store"] o["Store_STLB_Hit"].Store_STLB_Miss = o["Store_STLB_Miss"] o["Core_Bound"].Memory_Bound = o["Memory_Bound"] o["Core_Bound"].Backend_Bound = o["Backend_Bound"] o["Serializing_Operation"].C02_WAIT = o["C02_WAIT"] o["Ports_Utilization"].Ports_Utilized_0 = o["Ports_Utilized_0"] o["Ports_Utilization"].Retiring = o["Retiring"] o["Retiring"].Heavy_Operations = o["Heavy_Operations"] o["Light_Operations"].Retiring = o["Retiring"] o["Light_Operations"].Heavy_Operations = o["Heavy_Operations"] o["FP_Arith"].Retiring = o["Retiring"] o["FP_Arith"].FP_Scalar = o["FP_Scalar"] o["FP_Arith"].X87_Use = o["X87_Use"] o["FP_Arith"].FP_Vector = o["FP_Vector"] o["X87_Use"].Retiring = o["Retiring"] o["FP_Scalar"].Retiring = o["Retiring"] o["FP_Vector"].Retiring = o["Retiring"] o["FP_Vector_128b"].Retiring = o["Retiring"] o["FP_Vector_256b"].Retiring = o["Retiring"] o["Int_Operations"].Retiring = o["Retiring"] o["Int_Operations"].Int_Vector_256b = o["Int_Vector_256b"] o["Int_Operations"].Int_Vector_128b = o["Int_Vector_128b"] o["Int_Vector_128b"].Retiring = o["Retiring"] o["Int_Vector_256b"].Retiring = o["Retiring"] o["Memory_Operations"].Retiring = o["Retiring"] o["Memory_Operations"].Light_Operations = o["Light_Operations"] o["Memory_Operations"].Heavy_Operations = o["Heavy_Operations"] o["Fused_Instructions"].Retiring = o["Retiring"] o["Fused_Instructions"].Light_Operations = o["Light_Operations"] o["Fused_Instructions"].Heavy_Operations = o["Heavy_Operations"] o["Non_Fused_Branches"].Retiring = o["Retiring"] o["Non_Fused_Branches"].Light_Operations = o["Light_Operations"] o["Non_Fused_Branches"].Heavy_Operations = o["Heavy_Operations"] o["Other_Light_Ops"].Light_Operations = o["Light_Operations"] o["Other_Light_Ops"].Retiring = o["Retiring"] o["Other_Light_Ops"].Heavy_Operations = o["Heavy_Operations"] o["Other_Light_Ops"].Int_Operations = o["Int_Operations"] o["Other_Light_Ops"].Non_Fused_Branches = o["Non_Fused_Branches"] o["Other_Light_Ops"].FP_Arith = o["FP_Arith"] o["Other_Light_Ops"].Fused_Instructions = o["Fused_Instructions"] o["Other_Light_Ops"].Int_Vector_128b = o["Int_Vector_128b"] o["Other_Light_Ops"].FP_Vector = o["FP_Vector"] o["Other_Light_Ops"].FP_Scalar = o["FP_Scalar"] o["Other_Light_Ops"].X87_Use = o["X87_Use"] o["Other_Light_Ops"].Int_Vector_256b = o["Int_Vector_256b"] o["Other_Light_Ops"].Memory_Operations = o["Memory_Operations"] o["Nop_Instructions"].Retiring = o["Retiring"] o["Nop_Instructions"].Light_Operations = o["Light_Operations"] o["Nop_Instructions"].Heavy_Operations = o["Heavy_Operations"] o["Shuffles_256b"].Retiring = o["Retiring"] o["Shuffles_256b"].Light_Operations = o["Light_Operations"] o["Shuffles_256b"].Heavy_Operations = o["Heavy_Operations"] o["Few_Uops_Instructions"].Heavy_Operations = o["Heavy_Operations"] o["Few_Uops_Instructions"].Microcode_Sequencer = o["Microcode_Sequencer"] o["CISC"].Microcode_Sequencer = o["Microcode_Sequencer"] o["CISC"].Assists = o["Assists"] o["Mispredictions"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Mispredictions"].LCP = o["LCP"] o["Mispredictions"].Retiring = o["Retiring"] o["Mispredictions"].Other_Mispredicts = o["Other_Mispredicts"] o["Mispredictions"].Microcode_Sequencer = o["Microcode_Sequencer"] o["Mispredictions"].Frontend_Bound = o["Frontend_Bound"] o["Mispredictions"].DSB_Switches = o["DSB_Switches"] o["Mispredictions"].Backend_Bound = o["Backend_Bound"] o["Mispredictions"].Branch_Resteers = o["Branch_Resteers"] o["Mispredictions"].ICache_Misses = o["ICache_Misses"] o["Mispredictions"].MS_Switches = o["MS_Switches"] o["Mispredictions"].Bad_Speculation = o["Bad_Speculation"] o["Mispredictions"].ITLB_Misses = o["ITLB_Misses"] o["Mispredictions"].Unknown_Branches = o["Unknown_Branches"] o["Mispredictions"].Fetch_Latency = o["Fetch_Latency"] o["Mispredictions"].Mispredicts_Resteers = o["Mispredicts_Resteers"] o["Big_Code"].LCP = o["LCP"] o["Big_Code"].ICache_Misses = o["ICache_Misses"] o["Big_Code"].DSB_Switches = o["DSB_Switches"] o["Big_Code"].Branch_Resteers = o["Branch_Resteers"] o["Big_Code"].MS_Switches = o["MS_Switches"] o["Big_Code"].ITLB_Misses = o["ITLB_Misses"] o["Big_Code"].Unknown_Branches = o["Unknown_Branches"] o["Big_Code"].Fetch_Latency = o["Fetch_Latency"] o["Instruction_Fetch_BW"].Retiring = o["Retiring"] o["Instruction_Fetch_BW"].Other_Mispredicts = o["Other_Mispredicts"] o["Instruction_Fetch_BW"].DSB_Switches = o["DSB_Switches"] o["Instruction_Fetch_BW"].Backend_Bound = o["Backend_Bound"] o["Instruction_Fetch_BW"].Branch_Resteers = o["Branch_Resteers"] o["Instruction_Fetch_BW"].Fetch_Latency = o["Fetch_Latency"] o["Instruction_Fetch_BW"].ICache_Misses = o["ICache_Misses"] o["Instruction_Fetch_BW"].Microcode_Sequencer = o["Microcode_Sequencer"] o["Instruction_Fetch_BW"].Frontend_Bound = o["Frontend_Bound"] o["Instruction_Fetch_BW"].Bad_Speculation = o["Bad_Speculation"] o["Instruction_Fetch_BW"].ITLB_Misses = o["ITLB_Misses"] o["Instruction_Fetch_BW"].Mispredicts_Resteers = o["Mispredicts_Resteers"] o["Instruction_Fetch_BW"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Instruction_Fetch_BW"].LCP = o["LCP"] o["Instruction_Fetch_BW"].Clears_Resteers = o["Clears_Resteers"] o["Instruction_Fetch_BW"].MS_Switches = o["MS_Switches"] o["Instruction_Fetch_BW"].Unknown_Branches = o["Unknown_Branches"] o["Cache_Memory_Bandwidth"].L1_Bound = o["L1_Bound"] o["Cache_Memory_Bandwidth"].Store_Fwd_Blk = o["Store_Fwd_Blk"] o["Cache_Memory_Bandwidth"].SQ_Full = o["SQ_Full"] o["Cache_Memory_Bandwidth"].FB_Full = o["FB_Full"] o["Cache_Memory_Bandwidth"].MEM_Bandwidth = o["MEM_Bandwidth"] o["Cache_Memory_Bandwidth"].L1_Hit_Latency = o["L1_Hit_Latency"] o["Cache_Memory_Bandwidth"].PMM_Bound = o["PMM_Bound"] o["Cache_Memory_Bandwidth"].Data_Sharing = o["Data_Sharing"] o["Cache_Memory_Bandwidth"].L2_Bound = o["L2_Bound"] o["Cache_Memory_Bandwidth"].Memory_Bound = o["Memory_Bound"] o["Cache_Memory_Bandwidth"].Lock_Latency = o["Lock_Latency"] o["Cache_Memory_Bandwidth"].MEM_Latency = o["MEM_Latency"] o["Cache_Memory_Bandwidth"].Store_Bound = o["Store_Bound"] o["Cache_Memory_Bandwidth"].Split_Loads = o["Split_Loads"] o["Cache_Memory_Bandwidth"].L3_Hit_Latency = o["L3_Hit_Latency"] o["Cache_Memory_Bandwidth"].DTLB_Load = o["DTLB_Load"] o["Cache_Memory_Bandwidth"].L3_Bound = o["L3_Bound"] o["Cache_Memory_Bandwidth"].DRAM_Bound = o["DRAM_Bound"] o["Cache_Memory_Bandwidth"].Contested_Accesses = o["Contested_Accesses"] o["Cache_Memory_Bandwidth"].HBM_Bound = o["HBM_Bound"] o["Cache_Memory_Latency"].L1_Bound = o["L1_Bound"] o["Cache_Memory_Latency"].SQ_Full = o["SQ_Full"] o["Cache_Memory_Latency"].MEM_Bandwidth = o["MEM_Bandwidth"] o["Cache_Memory_Latency"].Store_Latency = o["Store_Latency"] o["Cache_Memory_Latency"].Split_Stores = o["Split_Stores"] o["Cache_Memory_Latency"].PMM_Bound = o["PMM_Bound"] o["Cache_Memory_Latency"].Data_Sharing = o["Data_Sharing"] o["Cache_Memory_Latency"].L2_Bound = o["L2_Bound"] o["Cache_Memory_Latency"].Memory_Bound = o["Memory_Bound"] o["Cache_Memory_Latency"].DTLB_Store = o["DTLB_Store"] o["Cache_Memory_Latency"].MEM_Latency = o["MEM_Latency"] o["Cache_Memory_Latency"].Store_Bound = o["Store_Bound"] o["Cache_Memory_Latency"].L3_Hit_Latency = o["L3_Hit_Latency"] o["Cache_Memory_Latency"].False_Sharing = o["False_Sharing"] o["Cache_Memory_Latency"].L3_Bound = o["L3_Bound"] o["Cache_Memory_Latency"].DRAM_Bound = o["DRAM_Bound"] o["Cache_Memory_Latency"].Streaming_Stores = o["Streaming_Stores"] o["Cache_Memory_Latency"].Contested_Accesses = o["Contested_Accesses"] o["Cache_Memory_Latency"].HBM_Bound = o["HBM_Bound"] o["Memory_Data_TLBs"].L1_Bound = o["L1_Bound"] o["Memory_Data_TLBs"].Store_Fwd_Blk = o["Store_Fwd_Blk"] o["Memory_Data_TLBs"].L1_Hit_Latency = o["L1_Hit_Latency"] o["Memory_Data_TLBs"].FB_Full = o["FB_Full"] o["Memory_Data_TLBs"].DTLB_Load = o["DTLB_Load"] o["Memory_Data_TLBs"].Store_Latency = o["Store_Latency"] o["Memory_Data_TLBs"].Split_Stores = o["Split_Stores"] o["Memory_Data_TLBs"].PMM_Bound = o["PMM_Bound"] o["Memory_Data_TLBs"].DTLB_Store = o["DTLB_Store"] o["Memory_Data_TLBs"].L2_Bound = o["L2_Bound"] o["Memory_Data_TLBs"].Memory_Bound = o["Memory_Bound"] o["Memory_Data_TLBs"].Lock_Latency = o["Lock_Latency"] o["Memory_Data_TLBs"].Store_Bound = o["Store_Bound"] o["Memory_Data_TLBs"].False_Sharing = o["False_Sharing"] o["Memory_Data_TLBs"].Split_Loads = o["Split_Loads"] o["Memory_Data_TLBs"].L3_Bound = o["L3_Bound"] o["Memory_Data_TLBs"].HBM_Bound = o["HBM_Bound"] o["Memory_Data_TLBs"].Streaming_Stores = o["Streaming_Stores"] o["Memory_Data_TLBs"].DRAM_Bound = o["DRAM_Bound"] o["Memory_Synchronization"].L1_Bound = o["L1_Bound"] o["Memory_Synchronization"].Frontend_Bound = o["Frontend_Bound"] o["Memory_Synchronization"].False_Sharing = o["False_Sharing"] o["Memory_Synchronization"].Retiring = o["Retiring"] o["Memory_Synchronization"].PMM_Bound = o["PMM_Bound"] o["Memory_Synchronization"].Bad_Speculation = o["Bad_Speculation"] o["Memory_Synchronization"].Machine_Clears = o["Machine_Clears"] o["Memory_Synchronization"].Data_Sharing = o["Data_Sharing"] o["Memory_Synchronization"].Memory_Bound = o["Memory_Bound"] o["Memory_Synchronization"].SQ_Full = o["SQ_Full"] o["Memory_Synchronization"].Store_Bound = o["Store_Bound"] o["Memory_Synchronization"].L3_Bound = o["L3_Bound"] o["Memory_Synchronization"].L2_Bound = o["L2_Bound"] o["Memory_Synchronization"].Streaming_Stores = o["Streaming_Stores"] o["Memory_Synchronization"].Contested_Accesses = o["Contested_Accesses"] o["Memory_Synchronization"].DTLB_Store = o["DTLB_Store"] o["Memory_Synchronization"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Memory_Synchronization"].Store_Latency = o["Store_Latency"] o["Memory_Synchronization"].Split_Stores = o["Split_Stores"] o["Memory_Synchronization"].Backend_Bound = o["Backend_Bound"] o["Memory_Synchronization"].L3_Hit_Latency = o["L3_Hit_Latency"] o["Memory_Synchronization"].Other_Nukes = o["Other_Nukes"] o["Memory_Synchronization"].DRAM_Bound = o["DRAM_Bound"] o["Memory_Synchronization"].HBM_Bound = o["HBM_Bound"] o["Compute_Bound_Est"].Serializing_Operation = o["Serializing_Operation"] o["Compute_Bound_Est"].Ports_Utilization = o["Ports_Utilization"] o["Compute_Bound_Est"].C02_WAIT = o["C02_WAIT"] o["Compute_Bound_Est"].Retiring = o["Retiring"] o["Compute_Bound_Est"].AMX_Busy = o["AMX_Busy"] o["Compute_Bound_Est"].Ports_Utilized_2 = o["Ports_Utilized_2"] o["Compute_Bound_Est"].Memory_Bound = o["Memory_Bound"] o["Compute_Bound_Est"].Ports_Utilized_1 = o["Ports_Utilized_1"] o["Compute_Bound_Est"].Core_Bound = o["Core_Bound"] o["Compute_Bound_Est"].Backend_Bound = o["Backend_Bound"] o["Compute_Bound_Est"].Ports_Utilized_3m = o["Ports_Utilized_3m"] o["Compute_Bound_Est"].Divider = o["Divider"] o["Compute_Bound_Est"].Ports_Utilized_0 = o["Ports_Utilized_0"] o["Irregular_Overhead"].Heavy_Operations = o["Heavy_Operations"] o["Irregular_Overhead"].Ports_Utilization = o["Ports_Utilization"] o["Irregular_Overhead"].C02_WAIT = o["C02_WAIT"] o["Irregular_Overhead"].Retiring = o["Retiring"] o["Irregular_Overhead"].ICache_Misses = o["ICache_Misses"] o["Irregular_Overhead"].Microcode_Sequencer = o["Microcode_Sequencer"] o["Irregular_Overhead"].Frontend_Bound = o["Frontend_Bound"] o["Irregular_Overhead"].Serializing_Operation = o["Serializing_Operation"] o["Irregular_Overhead"].Core_Bound = o["Core_Bound"] o["Irregular_Overhead"].Bad_Speculation = o["Bad_Speculation"] o["Irregular_Overhead"].ITLB_Misses = o["ITLB_Misses"] o["Irregular_Overhead"].Divider = o["Divider"] o["Irregular_Overhead"].Mispredicts_Resteers = o["Mispredicts_Resteers"] o["Irregular_Overhead"].Memory_Bound = o["Memory_Bound"] o["Irregular_Overhead"].Machine_Clears = o["Machine_Clears"] o["Irregular_Overhead"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Irregular_Overhead"].LCP = o["LCP"] o["Irregular_Overhead"].Other_Mispredicts = o["Other_Mispredicts"] o["Irregular_Overhead"].AMX_Busy = o["AMX_Busy"] o["Irregular_Overhead"].Few_Uops_Instructions = o["Few_Uops_Instructions"] o["Irregular_Overhead"].DSB_Switches = o["DSB_Switches"] o["Irregular_Overhead"].Ports_Utilized_0 = o["Ports_Utilized_0"] o["Irregular_Overhead"].Assists = o["Assists"] o["Irregular_Overhead"].Backend_Bound = o["Backend_Bound"] o["Irregular_Overhead"].Branch_Resteers = o["Branch_Resteers"] o["Irregular_Overhead"].Clears_Resteers = o["Clears_Resteers"] o["Irregular_Overhead"].MS_Switches = o["MS_Switches"] o["Irregular_Overhead"].Other_Nukes = o["Other_Nukes"] o["Irregular_Overhead"].Unknown_Branches = o["Unknown_Branches"] o["Irregular_Overhead"].Fetch_Latency = o["Fetch_Latency"] o["Other_Bottlenecks"].L1_Bound = o["L1_Bound"] o["Other_Bottlenecks"].C02_WAIT = o["C02_WAIT"] o["Other_Bottlenecks"].Retiring = o["Retiring"] o["Other_Bottlenecks"].PMM_Bound = o["PMM_Bound"] o["Other_Bottlenecks"].Data_Sharing = o["Data_Sharing"] o["Other_Bottlenecks"].L2_Bound = o["L2_Bound"] o["Other_Bottlenecks"].Core_Bound = o["Core_Bound"] o["Other_Bottlenecks"].Ports_Utilization = o["Ports_Utilization"] o["Other_Bottlenecks"].Contested_Accesses = o["Contested_Accesses"] o["Other_Bottlenecks"].Divider = o["Divider"] o["Other_Bottlenecks"].L3_Bound = o["L3_Bound"] o["Other_Bottlenecks"].Ports_Utilized_3m = o["Ports_Utilized_3m"] o["Other_Bottlenecks"].L1_Hit_Latency = o["L1_Hit_Latency"] o["Other_Bottlenecks"].FB_Full = o["FB_Full"] o["Other_Bottlenecks"].MEM_Bandwidth = o["MEM_Bandwidth"] o["Other_Bottlenecks"].Store_Latency = o["Store_Latency"] o["Other_Bottlenecks"].Other_Mispredicts = o["Other_Mispredicts"] o["Other_Bottlenecks"].DSB_Switches = o["DSB_Switches"] o["Other_Bottlenecks"].Ports_Utilized_0 = o["Ports_Utilized_0"] o["Other_Bottlenecks"].Ports_Utilized_1 = o["Ports_Utilized_1"] o["Other_Bottlenecks"].Ports_Utilized_2 = o["Ports_Utilized_2"] o["Other_Bottlenecks"].Assists = o["Assists"] o["Other_Bottlenecks"].Backend_Bound = o["Backend_Bound"] o["Other_Bottlenecks"].Branch_Resteers = o["Branch_Resteers"] o["Other_Bottlenecks"].L3_Hit_Latency = o["L3_Hit_Latency"] o["Other_Bottlenecks"].Heavy_Operations = o["Heavy_Operations"] o["Other_Bottlenecks"].Fetch_Latency = o["Fetch_Latency"] o["Other_Bottlenecks"].DTLB_Load = o["DTLB_Load"] o["Other_Bottlenecks"].False_Sharing = o["False_Sharing"] o["Other_Bottlenecks"].ICache_Misses = o["ICache_Misses"] o["Other_Bottlenecks"].Microcode_Sequencer = o["Microcode_Sequencer"] o["Other_Bottlenecks"].Frontend_Bound = o["Frontend_Bound"] o["Other_Bottlenecks"].Machine_Clears = o["Machine_Clears"] o["Other_Bottlenecks"].Streaming_Stores = o["Streaming_Stores"] o["Other_Bottlenecks"].Memory_Bound = o["Memory_Bound"] o["Other_Bottlenecks"].SQ_Full = o["SQ_Full"] o["Other_Bottlenecks"].Store_Bound = o["Store_Bound"] o["Other_Bottlenecks"].Split_Loads = o["Split_Loads"] o["Other_Bottlenecks"].Bad_Speculation = o["Bad_Speculation"] o["Other_Bottlenecks"].ITLB_Misses = o["ITLB_Misses"] o["Other_Bottlenecks"].Mispredicts_Resteers = o["Mispredicts_Resteers"] o["Other_Bottlenecks"].Store_Fwd_Blk = o["Store_Fwd_Blk"] o["Other_Bottlenecks"].Serializing_Operation = o["Serializing_Operation"] o["Other_Bottlenecks"].DTLB_Store = o["DTLB_Store"] o["Other_Bottlenecks"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Other_Bottlenecks"].LCP = o["LCP"] o["Other_Bottlenecks"].Split_Stores = o["Split_Stores"] o["Other_Bottlenecks"].AMX_Busy = o["AMX_Busy"] o["Other_Bottlenecks"].Few_Uops_Instructions = o["Few_Uops_Instructions"] o["Other_Bottlenecks"].Lock_Latency = o["Lock_Latency"] o["Other_Bottlenecks"].MEM_Latency = o["MEM_Latency"] o["Other_Bottlenecks"].Clears_Resteers = o["Clears_Resteers"] o["Other_Bottlenecks"].MS_Switches = o["MS_Switches"] o["Other_Bottlenecks"].Other_Nukes = o["Other_Nukes"] o["Other_Bottlenecks"].DRAM_Bound = o["DRAM_Bound"] o["Other_Bottlenecks"].Unknown_Branches = o["Unknown_Branches"] o["Other_Bottlenecks"].HBM_Bound = o["HBM_Bound"] o["Useful_Work"].Retiring = o["Retiring"] o["Useful_Work"].Heavy_Operations = o["Heavy_Operations"] o["Useful_Work"].Microcode_Sequencer = o["Microcode_Sequencer"] o["Useful_Work"].Few_Uops_Instructions = o["Few_Uops_Instructions"] o["Useful_Work"].Assists = o["Assists"] o["Core_Bound_Likely"].Memory_Bound = o["Memory_Bound"] o["Core_Bound_Likely"].Ports_Utilized_0 = o["Ports_Utilized_0"] o["Core_Bound_Likely"].Core_Bound = o["Core_Bound"] o["Core_Bound_Likely"].Ports_Utilization = o["Ports_Utilization"] o["Core_Bound_Likely"].Retiring = o["Retiring"] o["Core_Bound_Likely"].Backend_Bound = o["Backend_Bound"] o["UopPI"].Retiring = o["Retiring"] o["UpTB"].Retiring = o["Retiring"] o["Retire"].Retiring = o["Retiring"] o["DSB_Misses"].MITE = o["MITE"] o["DSB_Misses"].LCP = o["LCP"] o["DSB_Misses"].Fetch_Bandwidth = o["Fetch_Bandwidth"] o["DSB_Misses"].Frontend_Bound = o["Frontend_Bound"] o["DSB_Misses"].DSB_Switches = o["DSB_Switches"] o["DSB_Misses"].Branch_Resteers = o["Branch_Resteers"] o["DSB_Misses"].ICache_Misses = o["ICache_Misses"] o["DSB_Misses"].MS_Switches = o["MS_Switches"] o["DSB_Misses"].ITLB_Misses = o["ITLB_Misses"] o["DSB_Misses"].DSB = o["DSB"] o["DSB_Misses"].Unknown_Branches = o["Unknown_Branches"] o["DSB_Misses"].Fetch_Latency = o["Fetch_Latency"] o["DSB_Bandwidth"].Fetch_Bandwidth = o["Fetch_Bandwidth"] o["DSB_Bandwidth"].Frontend_Bound = o["Frontend_Bound"] o["DSB_Bandwidth"].DSB = o["DSB"] o["DSB_Bandwidth"].MITE = o["MITE"] o["DSB_Bandwidth"].Fetch_Latency = o["Fetch_Latency"] o["IC_Misses"].Fetch_Latency = o["Fetch_Latency"] o["IC_Misses"].LCP = o["LCP"] o["IC_Misses"].MS_Switches = o["MS_Switches"] o["IC_Misses"].ICache_Misses = o["ICache_Misses"] o["IC_Misses"].ITLB_Misses = o["ITLB_Misses"] o["IC_Misses"].Unknown_Branches = o["Unknown_Branches"] o["IC_Misses"].DSB_Switches = o["DSB_Switches"] o["IC_Misses"].Branch_Resteers = o["Branch_Resteers"] o["Branch_Misprediction_Cost"].Retiring = o["Retiring"] o["Branch_Misprediction_Cost"].ICache_Misses = o["ICache_Misses"] o["Branch_Misprediction_Cost"].Microcode_Sequencer = o["Microcode_Sequencer"] o["Branch_Misprediction_Cost"].Frontend_Bound = o["Frontend_Bound"] o["Branch_Misprediction_Cost"].Bad_Speculation = o["Bad_Speculation"] o["Branch_Misprediction_Cost"].ITLB_Misses = o["ITLB_Misses"] o["Branch_Misprediction_Cost"].Mispredicts_Resteers = o["Mispredicts_Resteers"] o["Branch_Misprediction_Cost"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Branch_Misprediction_Cost"].LCP = o["LCP"] o["Branch_Misprediction_Cost"].Other_Mispredicts = o["Other_Mispredicts"] o["Branch_Misprediction_Cost"].DSB_Switches = o["DSB_Switches"] o["Branch_Misprediction_Cost"].Backend_Bound = o["Backend_Bound"] o["Branch_Misprediction_Cost"].Branch_Resteers = o["Branch_Resteers"] o["Branch_Misprediction_Cost"].MS_Switches = o["MS_Switches"] o["Branch_Misprediction_Cost"].Unknown_Branches = o["Unknown_Branches"] o["Branch_Misprediction_Cost"].Fetch_Latency = o["Fetch_Latency"] # siblings cross-tree o["Mispredicts_Resteers"].sibling = (o["Branch_Mispredicts"],) o["Clears_Resteers"].sibling = (o["MS_Switches"], o["Machine_Clears"], o["L1_Bound"], o["Microcode_Sequencer"],) o["MS_Switches"].sibling = (o["Clears_Resteers"], o["Machine_Clears"], o["L1_Bound"], o["Serializing_Operation"], o["Mixing_Vectors"], o["Microcode_Sequencer"],) o["LCP"].sibling = (o["DSB_Switches"], o["Fetch_Bandwidth"],) o["DSB_Switches"].sibling = (o["LCP"], o["Fetch_Bandwidth"],) o["Fetch_Bandwidth"].sibling = (o["LCP"], o["DSB_Switches"],) o["Decoder0_Alone"].sibling = (o["Few_Uops_Instructions"],) o["Branch_Mispredicts"].sibling = (o["Mispredicts_Resteers"],) o["Machine_Clears"].sibling = (o["Clears_Resteers"], o["MS_Switches"], o["L1_Bound"], o["Contested_Accesses"], o["Data_Sharing"], o["Remote_Cache"], o["False_Sharing"], o["Microcode_Sequencer"],) o["L1_Bound"].sibling = (o["Clears_Resteers"], o["MS_Switches"], o["Machine_Clears"], o["Ports_Utilized_1"], o["Microcode_Sequencer"],) o["DTLB_Load"].sibling = (o["DTLB_Store"],) o["Lock_Latency"].sibling = (o["Store_Latency"],) o["FB_Full"].sibling = (o["SQ_Full"], o["MEM_Bandwidth"], o["Store_Latency"], o["Streaming_Stores"],) o["Contested_Accesses"].sibling = (o["Machine_Clears"], o["Data_Sharing"], o["Remote_Cache"], o["False_Sharing"],) o["Data_Sharing"].sibling = (o["Machine_Clears"], o["Contested_Accesses"], o["Remote_Cache"], o["False_Sharing"],) o["L3_Hit_Latency"].sibling = (o["MEM_Latency"],) o["L3_Hit_Latency"].overlap = True o["SQ_Full"].sibling = (o["FB_Full"], o["MEM_Bandwidth"],) o["MEM_Bandwidth"].sibling = (o["FB_Full"], o["SQ_Full"],) o["MEM_Latency"].sibling = (o["L3_Hit_Latency"],) o["Remote_Cache"].sibling = (o["Machine_Clears"], o["Contested_Accesses"], o["Data_Sharing"], o["False_Sharing"],) o["Store_Latency"].sibling = (o["Lock_Latency"], o["FB_Full"],) o["Store_Latency"].overlap = True o["False_Sharing"].sibling = (o["Machine_Clears"], o["Contested_Accesses"], o["Data_Sharing"], o["Remote_Cache"],) o["Streaming_Stores"].sibling = (o["FB_Full"],) o["DTLB_Store"].sibling = (o["DTLB_Load"],) o["Serializing_Operation"].sibling = (o["MS_Switches"],) o["Mixing_Vectors"].sibling = (o["MS_Switches"],) o["Ports_Utilized_1"].sibling = (o["L1_Bound"],) o["Ports_Utilized_2"].sibling = (o["Port_0"], o["Port_1"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["Int_Vector_128b"], o["Int_Vector_256b"],) o["Port_0"].sibling = (o["Ports_Utilized_2"], o["Port_1"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["Int_Vector_128b"], o["Int_Vector_256b"],) o["Port_1"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["Int_Vector_128b"], o["Int_Vector_256b"],) o["Port_6"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["Int_Vector_128b"], o["Int_Vector_256b"],) o["FP_Scalar"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_6"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["Int_Vector_128b"], o["Int_Vector_256b"],) o["FP_Vector"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_6"], o["FP_Scalar"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["Int_Vector_128b"], o["Int_Vector_256b"],) o["FP_Vector_128b"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_256b"], o["Int_Vector_128b"], o["Int_Vector_256b"],) o["FP_Vector_256b"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["Int_Vector_128b"], o["Int_Vector_256b"],) o["Int_Vector_128b"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["Int_Vector_256b"],) o["Int_Vector_256b"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["Int_Vector_128b"],) o["Few_Uops_Instructions"].sibling = (o["Decoder0_Alone"],) o["Microcode_Sequencer"].sibling = (o["Clears_Resteers"], o["MS_Switches"], o["Machine_Clears"], o["L1_Bound"],) o["Mispredictions"].sibling = (o["Mispredicts_Resteers"], o["Branch_Mispredicts"],) o["Cache_Memory_Bandwidth"].sibling = (o["FB_Full"], o["SQ_Full"], o["MEM_Bandwidth"],) o["Cache_Memory_Latency"].sibling = (o["L3_Hit_Latency"], o["MEM_Latency"],) o["Memory_Data_TLBs"].sibling = (o["DTLB_Load"], o["DTLB_Store"],) o["Memory_Synchronization"].sibling = (o["DTLB_Load"], o["DTLB_Store"],) o["Irregular_Overhead"].sibling = (o["MS_Switches"], o["Microcode_Sequencer"],) o["IpTB"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],) o["DSB_Coverage"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],) o["DSB_Misses"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],) o["DSB_Bandwidth"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],) o["Branch_Misprediction_Cost"].sibling = (o["Mispredicts_Resteers"], o["Branch_Mispredicts"],) o["DRAM_BW_Use"].sibling = (o["FB_Full"], o["SQ_Full"], o["MEM_Bandwidth"],)
273,481
Python
.py
6,251
37.783875
1,927
0.65589
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,910
ivb_client_ratios.py
andikleen_pmu-tools/ivb_client_ratios.py
# -*- coding: latin-1 -*- # # auto generated TopDown/TMA 4.8-full-perf description for Intel 3rd gen Core (code named IvyBridge) # Please see http://ark.intel.com for more details on these CPUs. # # References: # http://bit.ly/tma-ispass14 # http://halobates.de/blog/p/262 # https://sites.google.com/site/analysismethods/yasin-pubs # https://download.01.org/perfmon/ # https://github.com/andikleen/pmu-tools/wiki/toplev-manual # # Helpers print_error = lambda msg: False smt_enabled = False ebs_mode = False version = "4.8-full-perf" base_frequency = -1.0 Memory = 0 Average_Frequency = 0.0 num_cores = 1 num_threads = 1 num_sockets = 1 def handle_error(obj, msg): print_error(msg) obj.errcount += 1 obj.val = 0 obj.thresh = False def handle_error_metric(obj, msg): print_error(msg) obj.errcount += 1 obj.val = 0 # Constants Exe_Ports = 6 Mem_L2_Store_Cost = 9 Mem_L3_Weight = 7 Mem_STLB_Hit_Cost = 7 BAClear_Cost = 12 MS_Switches_Cost = 3 Avg_Assist_Cost = 66 Pipeline_Width = 4 OneMillion = 1000000 OneBillion = 1000000000 Energy_Unit = 15.6 EBS_Mode = 0 DS = 0 # Aux. formulas def Backend_Bound_Cycles(self, EV, level): return (STALLS_TOTAL(self, EV, level) + EV("UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC", level) - Few_Uops_Executed_Threshold(self, EV, level) - Frontend_RS_Empty_Cycles(self, EV, level) + EV("RESOURCE_STALLS.SB", level)) def Cycles_0_Ports_Utilized(self, EV, level): return (EV("UOPS_EXECUTED.CORE:i1:c1", level)) / 2 if smt_enabled else(STALLS_TOTAL(self, EV, level) - Frontend_RS_Empty_Cycles(self, EV, level)) def Cycles_1_Port_Utilized(self, EV, level): return (EV("UOPS_EXECUTED.CORE:c1", level) - EV("UOPS_EXECUTED.CORE:c2", level)) / 2 if smt_enabled else(EV("UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC", level) - EV("UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC", level)) def Cycles_2_Ports_Utilized(self, EV, level): return (EV("UOPS_EXECUTED.CORE:c2", level) - EV("UOPS_EXECUTED.CORE:c3", level)) / 2 if smt_enabled else(EV("UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC", level) - EV("UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC", level)) def Cycles_3m_Ports_Utilized(self, EV, level): return (EV("UOPS_EXECUTED.CORE:c3", level) / 2) if smt_enabled else EV("UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC", level) def DurationTimeInSeconds(self, EV, level): return EV("interval-ms", 0) / 1000 def Execute_Cycles(self, EV, level): return (EV("UOPS_EXECUTED.CORE:c1", level) / 2) if smt_enabled else EV("UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC", level) def Fetched_Uops(self, EV, level): return (EV("IDQ.DSB_UOPS", level) + EV("LSD.UOPS", level) + EV("IDQ.MITE_UOPS", level) + EV("IDQ.MS_UOPS", level)) def Few_Uops_Executed_Threshold(self, EV, level): EV("UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC", level) EV("UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC", level) return EV("UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC", level) if (IPC(self, EV, level)> 1.8) else EV("UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC", level) # Floating Point computational (arithmetic) Operations Count def FLOP_Count(self, EV, level): return (1 *(EV("FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE", level) + EV("FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE", level)) + 2 * EV("FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE", level) + 4 *(EV("FP_COMP_OPS_EXE.SSE_PACKED_SINGLE", level) + EV("SIMD_FP_256.PACKED_DOUBLE", level)) + 8 * EV("SIMD_FP_256.PACKED_SINGLE", level)) # Floating Point computational (arithmetic) Operations Count def FP_Arith_Scalar(self, EV, level): return EV("FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE", level) + EV("FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE", level) # Floating Point computational (arithmetic) Operations Count def FP_Arith_Vector(self, EV, level): return EV("FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE", level) + EV("FP_COMP_OPS_EXE.SSE_PACKED_SINGLE", level) + EV("SIMD_FP_256.PACKED_SINGLE", level) + EV("SIMD_FP_256.PACKED_DOUBLE", level) def Frontend_RS_Empty_Cycles(self, EV, level): EV("RS_EVENTS.EMPTY_CYCLES", level) return EV("RS_EVENTS.EMPTY_CYCLES", level) if (self.Fetch_Latency.compute(EV)> 0.1) else 0 def Frontend_Latency_Cycles(self, EV, level): return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE", level)) , level ) def HighIPC(self, EV, level): val = IPC(self, EV, level) / Pipeline_Width return val def ITLB_Miss_Cycles(self, EV, level): return (12 * EV("ITLB_MISSES.STLB_HIT", level) + EV("ITLB_MISSES.WALK_DURATION", level)) def LOAD_L1_MISS(self, EV, level): return EV("MEM_LOAD_UOPS_RETIRED.L2_HIT", level) + EV("MEM_LOAD_UOPS_RETIRED.LLC_HIT", level) + EV("MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT", level) + EV("MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM", level) + EV("MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS", level) def LOAD_L1_MISS_NET(self, EV, level): return LOAD_L1_MISS(self, EV, level) + EV("MEM_LOAD_UOPS_RETIRED.LLC_MISS", level) def LOAD_L3_HIT(self, EV, level): return EV("MEM_LOAD_UOPS_RETIRED.LLC_HIT", level) * (1 + EV("MEM_LOAD_UOPS_RETIRED.HIT_LFB", level) / LOAD_L1_MISS_NET(self, EV, level)) def LOAD_XSNP_HIT(self, EV, level): return EV("MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT", level) * (1 + EV("MEM_LOAD_UOPS_RETIRED.HIT_LFB", level) / LOAD_L1_MISS_NET(self, EV, level)) def LOAD_XSNP_HITM(self, EV, level): return EV("MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM", level) * (1 + EV("MEM_LOAD_UOPS_RETIRED.HIT_LFB", level) / LOAD_L1_MISS_NET(self, EV, level)) def LOAD_XSNP_MISS(self, EV, level): return EV("MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS", level) * (1 + EV("MEM_LOAD_UOPS_RETIRED.HIT_LFB", level) / LOAD_L1_MISS_NET(self, EV, level)) def Mem_L3_Hit_Fraction(self, EV, level): return EV("MEM_LOAD_UOPS_RETIRED.LLC_HIT", level) / (EV("MEM_LOAD_UOPS_RETIRED.LLC_HIT", level) + Mem_L3_Weight * EV("MEM_LOAD_UOPS_RETIRED.LLC_MISS", level)) def Mem_Lock_St_Fraction(self, EV, level): return EV("MEM_UOPS_RETIRED.LOCK_LOADS", level) / EV("MEM_UOPS_RETIRED.ALL_STORES", level) def Memory_Bound_Fraction(self, EV, level): return (STALLS_MEM_ANY(self, EV, level) + EV("RESOURCE_STALLS.SB", level)) / Backend_Bound_Cycles(self, EV, level) def Mispred_Clears_Fraction(self, EV, level): return EV("BR_MISP_RETIRED.ALL_BRANCHES", level) / (EV("BR_MISP_RETIRED.ALL_BRANCHES", level) + EV("MACHINE_CLEARS.COUNT", level)) def ORO_Demand_RFO_C1(self, EV, level): return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO", level)) , level ) def ORO_DRD_Any_Cycles(self, EV, level): return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", level)) , level ) def ORO_DRD_BW_Cycles(self, EV, level): return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD:c6", level)) , level ) def SQ_Full_Cycles(self, EV, level): return (EV("OFFCORE_REQUESTS_BUFFER.SQ_FULL", level) / 2) if smt_enabled else EV("OFFCORE_REQUESTS_BUFFER.SQ_FULL", level) def STALLS_MEM_ANY(self, EV, level): return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("CYCLE_ACTIVITY.STALLS_LDM_PENDING", level)) , level ) def STALLS_TOTAL(self, EV, level): return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("CYCLE_ACTIVITY.CYCLES_NO_EXECUTE", level)) , level ) def Store_L2_Hit_Cycles(self, EV, level): return EV("L2_RQSTS.RFO_HIT", level) * Mem_L2_Store_Cost *(1 - Mem_Lock_St_Fraction(self, EV, level)) def Mem_XSNP_HitM_Cost(self, EV, level): return 60 def Mem_XSNP_Hit_Cost(self, EV, level): return 43 def Mem_XSNP_None_Cost(self, EV, level): return 29 def Recovery_Cycles(self, EV, level): return (EV("INT_MISC.RECOVERY_CYCLES_ANY", level) / 2) if smt_enabled else EV("INT_MISC.RECOVERY_CYCLES", level) def Retire_Fraction(self, EV, level): return Retired_Slots(self, EV, level) / EV("UOPS_ISSUED.ANY", level) # Retired slots per Logical Processor def Retired_Slots(self, EV, level): return EV("UOPS_RETIRED.RETIRE_SLOTS", level) # Number of logical processors (enabled or online) on the target system def Num_CPUs(self, EV, level): return 8 if smt_enabled else 4 # Instructions Per Cycle (per Logical Processor) def IPC(self, EV, level): return EV("INST_RETIRED.ANY", level) / CLKS(self, EV, level) # Uops Per Instruction def UopPI(self, EV, level): val = Retired_Slots(self, EV, level) / EV("INST_RETIRED.ANY", level) self.thresh = (val > 1.05) return val # Uops per taken branch def UpTB(self, EV, level): val = Retired_Slots(self, EV, level) / EV("BR_INST_RETIRED.NEAR_TAKEN", level) self.thresh = val < Pipeline_Width * 1.5 return val # Cycles Per Instruction (per Logical Processor) def CPI(self, EV, level): return 1 / IPC(self, EV, level) # Per-Logical Processor actual clocks when the Logical Processor is active. def CLKS(self, EV, level): return EV("CPU_CLK_UNHALTED.THREAD", level) # Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward) def SLOTS(self, EV, level): return Pipeline_Width * CORE_CLKS(self, EV, level) # The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of "execute" at rename stage. def Execute_per_Issue(self, EV, level): return EV("UOPS_EXECUTED.THREAD", level) / EV("UOPS_ISSUED.ANY", level) # Instructions Per Cycle across hyper-threads (per physical core) def CoreIPC(self, EV, level): return EV("INST_RETIRED.ANY", level) / CORE_CLKS(self, EV, level) # Floating Point Operations Per Cycle def FLOPc(self, EV, level): return FLOP_Count(self, EV, level) / CORE_CLKS(self, EV, level) # Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor) def ILP(self, EV, level): return EV("UOPS_EXECUTED.THREAD", level) / EV("UOPS_EXECUTED.THREAD:c1", level) # Core actual clocks when any Logical Processor is active on the Physical Core def CORE_CLKS(self, EV, level): return ((EV("CPU_CLK_UNHALTED.THREAD", level) / 2) * (1 + EV("CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE", level) / EV("CPU_CLK_UNHALTED.REF_XCLK", level))) if ebs_mode else(EV("CPU_CLK_UNHALTED.THREAD_ANY", level) / 2) if smt_enabled else CLKS(self, EV, level) # Instructions per Load (lower number means higher occurrence rate). Tip: reduce memory accesses. #Link Opt Guide section: Minimize Register Spills def IpLoad(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("MEM_UOPS_RETIRED.ALL_LOADS", level) self.thresh = (val < 3) return val # Instructions per Store (lower number means higher occurrence rate). Tip: reduce memory accesses. #Link Opt Guide section: Minimize Register Spills def IpStore(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("MEM_UOPS_RETIRED.ALL_STORES", level) self.thresh = (val < 8) return val # Instructions per Branch (lower number means higher occurrence rate) def IpBranch(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.ALL_BRANCHES", level) self.thresh = (val < 8) return val # Instructions per (near) call (lower number means higher occurrence rate) def IpCall(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.NEAR_CALL", level) self.thresh = (val < 200) return val # Instructions per taken branch def IpTB(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.NEAR_TAKEN", level) self.thresh = val < Pipeline_Width * 2 + 1 return val # Branch instructions per taken branch. . Can be used to approximate PGO-likelihood for non-loopy codes. def BpTkBranch(self, EV, level): return EV("BR_INST_RETIRED.ALL_BRANCHES", level) / EV("BR_INST_RETIRED.NEAR_TAKEN", level) # Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW. def IpArith(self, EV, level): val = 1 /(self.FP_Scalar.compute(EV) + self.FP_Vector.compute(EV)) self.thresh = (val < 10) return val # Total number of retired Instructions def Instructions(self, EV, level): return EV("INST_RETIRED.ANY", level) # Average number of Uops retired in cycles where at least one uop has retired. def Retire(self, EV, level): return Retired_Slots(self, EV, level) / EV("UOPS_RETIRED.RETIRE_SLOTS:c1", level) def Execute(self, EV, level): return EV("UOPS_EXECUTED.THREAD", level) / Execute_Cycles(self, EV, level) # Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). See section 'Decoded ICache' in Optimization Manual. http://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-optimization-manual.html def DSB_Coverage(self, EV, level): val = EV("IDQ.DSB_UOPS", level) / Fetched_Uops(self, EV, level) self.thresh = (val < 0.7) and HighIPC(self, EV, 1) return val # Instructions per speculative Unknown Branch Misprediction (BAClear) (lower number means higher occurrence rate) def IpUnknown_Branch(self, EV, level): return Instructions(self, EV, level) / EV("BACLEARS.ANY", level) # Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate) def IpMispredict(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_MISP_RETIRED.ALL_BRANCHES", level) self.thresh = (val < 200) return val # Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate). def IpMisp_Indirect(self, EV, level): val = Instructions(self, EV, level) / (Retire_Fraction(self, EV, level) * EV("BR_MISP_EXEC.INDIRECT", level)) self.thresh = (val < 1000) return val # Actual Average Latency for L1 data-cache miss demand load operations (in core cycles) def Load_Miss_Real_Latency(self, EV, level): return EV("L1D_PEND_MISS.PENDING", level) / (EV("MEM_LOAD_UOPS_RETIRED.L1_MISS", level) + EV("MEM_LOAD_UOPS_RETIRED.HIT_LFB", level)) # Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor) def MLP(self, EV, level): return EV("L1D_PEND_MISS.PENDING", level) / EV("L1D_PEND_MISS.PENDING_CYCLES", level) # L1 cache true misses per kilo instruction for retired demand loads def L1MPKI(self, EV, level): return 1000 * EV("MEM_LOAD_UOPS_RETIRED.L1_MISS", level) / EV("INST_RETIRED.ANY", level) # L2 cache true misses per kilo instruction for retired demand loads def L2MPKI(self, EV, level): return 1000 * EV("MEM_LOAD_UOPS_RETIRED.L2_MISS", level) / EV("INST_RETIRED.ANY", level) # Offcore requests (L2 cache miss) per kilo instruction for demand RFOs def L2MPKI_RFO(self, EV, level): return 1000 * EV("OFFCORE_REQUESTS.DEMAND_RFO", level) / EV("INST_RETIRED.ANY", level) # L3 cache true misses per kilo instruction for retired demand loads def L3MPKI(self, EV, level): return 1000 * EV("MEM_LOAD_UOPS_RETIRED.LLC_MISS", level) / EV("INST_RETIRED.ANY", level) def L1D_Cache_Fill_BW(self, EV, level): return 64 * EV("L1D.REPLACEMENT", level) / OneBillion / Time(self, EV, level) def L2_Cache_Fill_BW(self, EV, level): return 64 * EV("L2_LINES_IN.ALL", level) / OneBillion / Time(self, EV, level) def L3_Cache_Fill_BW(self, EV, level): return 64 * EV("LONGEST_LAT_CACHE.MISS", level) / OneBillion / Time(self, EV, level) # Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses def Page_Walks_Utilization(self, EV, level): val = (EV("ITLB_MISSES.WALK_DURATION", level) + EV("DTLB_LOAD_MISSES.WALK_DURATION", level) + EV("DTLB_STORE_MISSES.WALK_DURATION", level)) / CORE_CLKS(self, EV, level) self.thresh = (val > 0.5) return val # Average per-core data fill bandwidth to the L1 data cache [GB / sec] def L1D_Cache_Fill_BW_2T(self, EV, level): return L1D_Cache_Fill_BW(self, EV, level) # Average per-core data fill bandwidth to the L2 cache [GB / sec] def L2_Cache_Fill_BW_2T(self, EV, level): return L2_Cache_Fill_BW(self, EV, level) # Average per-core data fill bandwidth to the L3 cache [GB / sec] def L3_Cache_Fill_BW_2T(self, EV, level): return L3_Cache_Fill_BW(self, EV, level) # Average Latency for L2 cache miss demand Loads def Load_L2_Miss_Latency(self, EV, level): return EV("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD", level) / EV("OFFCORE_REQUESTS.DEMAND_DATA_RD", level) # Average Parallel L2 cache miss demand Loads def Load_L2_MLP(self, EV, level): return EV("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD", level) / EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD", level) # Average Parallel L2 cache miss data reads def Data_L2_MLP(self, EV, level): return EV("OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD", level) / EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", level) # Average CPU Utilization (percentage) def CPU_Utilization(self, EV, level): return CPUs_Utilized(self, EV, level) / Num_CPUs(self, EV, level) # Average number of utilized CPUs def CPUs_Utilized(self, EV, level): return EV("CPU_CLK_UNHALTED.REF_TSC", level) / EV("msr/tsc/", 0) # Measured Average Core Frequency for unhalted processors [GHz] def Core_Frequency(self, EV, level): return Turbo_Utilization(self, EV, level) * EV("msr/tsc/", 0) / OneBillion / Time(self, EV, level) # Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width def GFLOPs(self, EV, level): return (FLOP_Count(self, EV, level) / OneBillion) / Time(self, EV, level) # Average Frequency Utilization relative nominal frequency def Turbo_Utilization(self, EV, level): return CLKS(self, EV, level) / EV("CPU_CLK_UNHALTED.REF_TSC", level) # Fraction of cycles where both hardware Logical Processors were active def SMT_2T_Utilization(self, EV, level): return 1 - EV("CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE", level) / (EV("CPU_CLK_UNHALTED.REF_XCLK_ANY", level) / 2) if smt_enabled else 0 # Fraction of cycles spent in the Operating System (OS) Kernel mode def Kernel_Utilization(self, EV, level): val = EV("CPU_CLK_UNHALTED.THREAD_P:SUP", level) / EV("CPU_CLK_UNHALTED.THREAD", level) self.thresh = (val > 0.05) return val # Cycles Per Instruction for the Operating System (OS) Kernel mode def Kernel_CPI(self, EV, level): return EV("CPU_CLK_UNHALTED.THREAD_P:SUP", level) / EV("INST_RETIRED.ANY_P:SUP", level) # Average external Memory Bandwidth Use for reads and writes [GB / sec] def DRAM_BW_Use(self, EV, level): return 64 *(EV("UNC_ARB_TRK_REQUESTS.ALL", level) + EV("UNC_ARB_COH_TRK_REQUESTS.ALL", level)) / OneMillion / Time(self, EV, level) / 1000 # Total package Power in Watts def Power(self, EV, level): return EV("UNC_PKG_ENERGY_STATUS", level) * Energy_Unit /(Time(self, EV, level) * OneMillion ) # Run duration time in seconds def Time(self, EV, level): val = EV("interval-s", 0) self.thresh = (val < 1) return val # Socket actual clocks when any core is active on that socket def Socket_CLKS(self, EV, level): return EV("UNC_CLOCK.SOCKET", level) # Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate] def IpFarBranch(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.FAR_BRANCH:USER", level) self.thresh = (val < 1000000) return val # Event groups class Frontend_Bound: name = "Frontend_Bound" domain = "Slots" area = "FE" level = 1 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvFB', 'BvIO', 'TmaL1', 'PGO']) maxval = None def compute(self, EV): try: self.val = EV("IDQ_UOPS_NOT_DELIVERED.CORE", 1) / SLOTS(self, EV, 1) self.thresh = (self.val > 0.15) except ZeroDivisionError: handle_error(self, "Frontend_Bound zero division") return self.val desc = """ This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.""" class Fetch_Latency: name = "Fetch_Latency" domain = "Slots" area = "FE" level = 2 htoff = False sample = ['RS_EVENTS.EMPTY_END'] errcount = 0 sibling = None metricgroup = frozenset(['Frontend', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = Pipeline_Width * Frontend_Latency_Cycles(self, EV, 2) / SLOTS(self, EV, 2) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Fetch_Latency zero division") return self.val desc = """ This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction- cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period.""" class ICache_Misses: name = "ICache_Misses" domain = "Clocks" area = "FE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BigFootprint', 'BvBC', 'FetchLat', 'IcMiss']) maxval = None def compute(self, EV): try: self.val = EV("ICACHE.IFETCH_STALL", 3) / CLKS(self, EV, 3) - self.ITLB_Misses.compute(EV) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "ICache_Misses zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to instruction cache misses.. Using compiler's Profile-Guided Optimization (PGO) can reduce i-cache misses through improved hot code layout.""" class ITLB_Misses: name = "ITLB_Misses" domain = "Clocks" area = "FE" level = 3 htoff = False sample = ['ITLB_MISSES.WALK_COMPLETED'] errcount = 0 sibling = None metricgroup = frozenset(['BigFootprint', 'BvBC', 'FetchLat', 'MemoryTLB']) maxval = None def compute(self, EV): try: self.val = ITLB_Miss_Cycles(self, EV, 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "ITLB_Misses zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses.. Consider large 2M pages for code (selectively prefer hot large-size function, due to limited 2M entries). Linux options: standard binaries use libhugetlbfs; Hfsort.. https://github. com/libhugetlbfs/libhugetlbfs;https://research.fb.com/public ations/optimizing-function-placement-for-large-scale-data- center-applications-2/""" class Branch_Resteers: name = "Branch_Resteers" domain = "Clocks" area = "FE" level = 3 htoff = False sample = ['BR_MISP_RETIRED.ALL_BRANCHES'] errcount = 0 sibling = None metricgroup = frozenset(['FetchLat']) maxval = None def compute(self, EV): try: self.val = BAClear_Cost *(EV("BR_MISP_RETIRED.ALL_BRANCHES", 3) + EV("MACHINE_CLEARS.COUNT", 3) + EV("BACLEARS.ANY", 3)) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Branch_Resteers zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings.""" class MS_Switches: name = "MS_Switches" domain = "Clocks_Estimated" area = "FE" level = 3 htoff = False sample = ['IDQ.MS_SWITCHES'] errcount = 0 sibling = None metricgroup = frozenset(['FetchLat', 'MicroSeq']) maxval = 1.0 def compute(self, EV): try: self.val = MS_Switches_Cost * EV("IDQ.MS_SWITCHES", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "MS_Switches zero division") return self.val desc = """ This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals.""" class LCP: name = "LCP" domain = "Clocks" area = "FE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['FetchLat']) maxval = None def compute(self, EV): try: self.val = EV("ILD_STALL.LCP", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "LCP zero division") return self.val desc = """ This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this.""" class DSB_Switches: name = "DSB_Switches" domain = "Clocks" area = "FE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['DSBmiss', 'FetchLat']) maxval = None def compute(self, EV): try: self.val = EV("DSB2MITE_SWITCHES.PENALTY_CYCLES", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "DSB_Switches zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty.. See section 'Optimization for Decoded Icache' in Optimization Manual:. http://www.intel.com/content/www/us/en /architecture-and-technology/64-ia-32-architectures- optimization-manual.html""" class Fetch_Bandwidth: name = "Fetch_Bandwidth" domain = "Slots" area = "FE" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['FetchBW', 'Frontend', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = self.Frontend_Bound.compute(EV) - self.Fetch_Latency.compute(EV) self.thresh = (self.val > 0.2) except ZeroDivisionError: handle_error(self, "Fetch_Bandwidth zero division") return self.val desc = """ This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend.""" class MITE: name = "MITE" domain = "Slots_Estimated" area = "FE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['DSBmiss', 'FetchBW']) maxval = None def compute(self, EV): try: self.val = (EV("IDQ.ALL_MITE_CYCLES_ANY_UOPS", 3) - EV("IDQ.ALL_MITE_CYCLES_4_UOPS", 3)) / CORE_CLKS(self, EV, 3) / 2 self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "MITE zero division") return self.val desc = """ This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck.. Consider tuning codegen of 'small hotspots' that can fit in DSB. Read about 'Decoded ICache' in Optimization Manual:. http://www.intel.com/content/www/us/en /architecture-and-technology/64-ia-32-architectures- optimization-manual.html""" class DSB: name = "DSB" domain = "Slots_Estimated" area = "FE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['DSB', 'FetchBW']) maxval = None def compute(self, EV): try: self.val = (EV("IDQ.ALL_DSB_CYCLES_ANY_UOPS", 3) - EV("IDQ.ALL_DSB_CYCLES_4_UOPS", 3)) / CORE_CLKS(self, EV, 3) / 2 self.thresh = (self.val > 0.15) and self.parent.thresh except ZeroDivisionError: handle_error(self, "DSB zero division") return self.val desc = """ This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.""" class Bad_Speculation: name = "Bad_Speculation" domain = "Slots" area = "BAD" level = 1 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['TmaL1']) maxval = None def compute(self, EV): try: self.val = (EV("UOPS_ISSUED.ANY", 1) - Retired_Slots(self, EV, 1) + Pipeline_Width * Recovery_Cycles(self, EV, 1)) / SLOTS(self, EV, 1) self.thresh = (self.val > 0.15) except ZeroDivisionError: handle_error(self, "Bad_Speculation zero division") return self.val desc = """ This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss- predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.""" class Branch_Mispredicts: name = "Branch_Mispredicts" domain = "Slots" area = "BAD" level = 2 htoff = False sample = ['BR_MISP_RETIRED.ALL_BRANCHES:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BadSpec', 'BrMispredicts', 'BvMP', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = Mispred_Clears_Fraction(self, EV, 2) * self.Bad_Speculation.compute(EV) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Branch_Mispredicts zero division") return self.val desc = """ This metric represents fraction of slots the CPU has wasted due to Branch Misprediction. These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path.. Using profile feedback in the compiler may help. Please see the Optimization Manual for general strategies for addressing branch misprediction issues.. http://www.intel.com/content/www/us/en/architecture-and- technology/64-ia-32-architectures-optimization-manual.html""" class Machine_Clears: name = "Machine_Clears" domain = "Slots" area = "BAD" level = 2 htoff = False sample = ['MACHINE_CLEARS.COUNT'] errcount = 0 sibling = None metricgroup = frozenset(['BadSpec', 'BvMS', 'MachineClears', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = self.Bad_Speculation.compute(EV) - self.Branch_Mispredicts.compute(EV) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Machine_Clears zero division") return self.val desc = """ This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes.. See \"Memory Disambiguation\" in Optimization Manual and:. https://software.intel.com/sites/default/files/ m/d/4/1/d/8/sma.pdf""" class Backend_Bound: name = "Backend_Bound" domain = "Slots" area = "BE" level = 1 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvOB', 'TmaL1']) maxval = None def compute(self, EV): try: self.val = 1 -(self.Frontend_Bound.compute(EV) + self.Bad_Speculation.compute(EV) + self.Retiring.compute(EV)) self.thresh = (self.val > 0.2) except ZeroDivisionError: handle_error(self, "Backend_Bound zero division") return self.val desc = """ This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.""" class Memory_Bound: name = "Memory_Bound" domain = "Slots" area = "BE/Mem" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Backend', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = Memory_Bound_Fraction(self, EV, 2) * self.Backend_Bound.compute(EV) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Memory_Bound zero division") return self.val desc = """ This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).""" class L1_Bound: name = "L1_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = ['MEM_LOAD_UOPS_RETIRED.L1_HIT:pp', 'MEM_LOAD_UOPS_RETIRED.HIT_LFB:pp'] errcount = 0 sibling = None metricgroup = frozenset(['CacheHits', 'MemoryBound', 'TmaL3mem']) maxval = None def compute(self, EV): try: self.val = max((STALLS_MEM_ANY(self, EV, 3) - EV("CYCLE_ACTIVITY.STALLS_L1D_PENDING", 3)) / CLKS(self, EV, 3) , 0 ) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "L1_Bound zero division") return self.val desc = """ This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache.""" class DTLB_Load: name = "DTLB_Load" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_UOPS_RETIRED.STLB_MISS_LOADS:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvMT', 'MemoryTLB']) maxval = 1.0 def compute(self, EV): try: self.val = (Mem_STLB_Hit_Cost * EV("DTLB_LOAD_MISSES.STLB_HIT", 4) + EV("DTLB_LOAD_MISSES.WALK_DURATION", 4)) / CLKS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "DTLB_Load zero division") return self.val desc = """ This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss..""" class Store_Fwd_Blk: name = "Store_Fwd_Blk" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = 1.0 def compute(self, EV): try: self.val = 13 * EV("LD_BLOCKS.STORE_FORWARD", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Store_Fwd_Blk zero division") return self.val desc = """ This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading.""" class Lock_Latency: name = "Lock_Latency" domain = "Clocks" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_UOPS_RETIRED.LOCK_LOADS:pp'] errcount = 0 sibling = None metricgroup = frozenset(['Offcore']) maxval = 1.0 def compute(self, EV): try: self.val = Mem_Lock_St_Fraction(self, EV, 4) * ORO_Demand_RFO_C1(self, EV, 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Lock_Latency zero division") return self.val desc = """ This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them.""" class Split_Loads: name = "Split_Loads" domain = "Clocks_Calculated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_UOPS_RETIRED.SPLIT_LOADS:pp'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = 1.0 def compute(self, EV): try: self.val = 13 * EV("LD_BLOCKS.NO_SR", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Split_Loads zero division") return self.val desc = """ This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. . Consider aligning data or hot structure fields. See the Optimization Manual for more details""" class G4K_Aliasing: name = "4K_Aliasing" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("LD_BLOCKS_PARTIAL.ADDRESS_ALIAS", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "G4K_Aliasing zero division") return self.val desc = """ This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset. False match is possible; which incur a few cycles load re-issue. However; the short re-issue duration is often hidden by the out-of-order core and HW optimizations; hence a user may safely ignore a high value of this metric unless it manages to propagate up into parent nodes of the hierarchy (e.g. to L1_Bound).. Consider reducing independent loads/stores accesses with 4K offsets. See the Optimization Manual for more details""" class FB_Full: name = "FB_Full" domain = "Clocks_Calculated" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvMB', 'MemoryBW']) maxval = None def compute(self, EV): try: self.val = Load_Miss_Real_Latency(self, EV, 4) * EV("L1D_PEND_MISS.FB_FULL:c1", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.3) except ZeroDivisionError: handle_error(self, "FB_Full zero division") return self.val desc = """ This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory).. See $issueBW and $issueSL hints. Avoid software prefetches if indeed memory BW limited.""" class L2_Bound: name = "L2_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = ['MEM_LOAD_UOPS_RETIRED.L2_HIT:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvML', 'CacheHits', 'MemoryBound', 'TmaL3mem']) maxval = None def compute(self, EV): try: self.val = (EV("CYCLE_ACTIVITY.STALLS_L1D_PENDING", 3) - EV("CYCLE_ACTIVITY.STALLS_L2_PENDING", 3)) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "L2_Bound zero division") return self.val desc = """ This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance.""" class L3_Bound: name = "L3_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = ['MEM_LOAD_UOPS_RETIRED.LLC_HIT:pp'] errcount = 0 sibling = None metricgroup = frozenset(['CacheHits', 'MemoryBound', 'TmaL3mem']) maxval = None def compute(self, EV): try: self.val = Mem_L3_Hit_Fraction(self, EV, 3) * EV("CYCLE_ACTIVITY.STALLS_L2_PENDING", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "L3_Bound zero division") return self.val desc = """ This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance.""" class Contested_Accesses: name = "Contested_Accesses" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM:pp', 'MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvMS', 'DataSharing', 'Offcore', 'Snoop']) maxval = 1.0 def compute(self, EV): try: self.val = (Mem_XSNP_HitM_Cost(self, EV, 4) * LOAD_XSNP_HITM(self, EV, 4) + Mem_XSNP_Hit_Cost(self, EV, 4) * LOAD_XSNP_MISS(self, EV, 4)) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Contested_Accesses zero division") return self.val desc = """ This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing.""" class Data_Sharing: name = "Data_Sharing" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvMS', 'Offcore', 'Snoop']) maxval = 1.0 def compute(self, EV): try: self.val = Mem_XSNP_Hit_Cost(self, EV, 4) * LOAD_XSNP_HIT(self, EV, 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Data_Sharing zero division") return self.val desc = """ This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance.""" class L3_Hit_Latency: name = "L3_Hit_Latency" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_LOAD_UOPS_RETIRED.LLC_HIT:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvML', 'MemoryLat']) maxval = 1.0 def compute(self, EV): try: self.val = Mem_XSNP_None_Cost(self, EV, 4) * LOAD_L3_HIT(self, EV, 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "L3_Hit_Latency zero division") return self.val desc = """ This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings.""" class SQ_Full: name = "SQ_Full" domain = "Clocks" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvMB', 'MemoryBW', 'Offcore']) maxval = None def compute(self, EV): try: self.val = SQ_Full_Cycles(self, EV, 4) / CORE_CLKS(self, EV, 4) self.thresh = (self.val > 0.3) and self.parent.thresh except ZeroDivisionError: handle_error(self, "SQ_Full zero division") return self.val desc = """ This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors).""" class DRAM_Bound: name = "DRAM_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = ['MEM_LOAD_UOPS_RETIRED.LLC_MISS:pp'] errcount = 0 sibling = None metricgroup = frozenset(['MemoryBound', 'TmaL3mem']) maxval = 1.0 def compute(self, EV): try: self.val = (1 - Mem_L3_Hit_Fraction(self, EV, 3)) * EV("CYCLE_ACTIVITY.STALLS_L2_PENDING", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "DRAM_Bound zero division") return self.val desc = """ This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance.""" class MEM_Bandwidth: name = "MEM_Bandwidth" domain = "Clocks" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvMB', 'MemoryBW', 'Offcore']) maxval = None def compute(self, EV): try: self.val = ORO_DRD_BW_Cycles(self, EV, 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "MEM_Bandwidth zero division") return self.val desc = """ This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that).. Improve data accesses to reduce cacheline transfers from/to memory. Examples: 1) Consume all bytes of a each cacheline before it is evicted (e.g. reorder structure elements and split non-hot ones), 2) merge computed-limited with BW-limited loops, 3) NUMA optimizations in multi-socket system. Note: software prefetches will not help BW-limited application..""" class MEM_Latency: name = "MEM_Latency" domain = "Clocks" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvML', 'MemoryLat', 'Offcore']) maxval = None def compute(self, EV): try: self.val = ORO_DRD_Any_Cycles(self, EV, 4) / CLKS(self, EV, 4) - self.MEM_Bandwidth.compute(EV) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "MEM_Latency zero division") return self.val desc = """ This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that).. Improve data accesses or interleave them with compute. Examples: 1) Data layout re-structuring, 2) Software Prefetches (also through the compiler)..""" class Store_Bound: name = "Store_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = ['MEM_UOPS_RETIRED.ALL_STORES:pp'] errcount = 0 sibling = None metricgroup = frozenset(['MemoryBound', 'TmaL3mem']) maxval = None def compute(self, EV): try: self.val = EV("RESOURCE_STALLS.SB", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Store_Bound zero division") return self.val desc = """ This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck.""" class Store_Latency: name = "Store_Latency" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvML', 'MemoryLat', 'Offcore']) maxval = 1.0 def compute(self, EV): try: self.val = (Store_L2_Hit_Cycles(self, EV, 4) + (1 - Mem_Lock_St_Fraction(self, EV, 4)) * ORO_Demand_RFO_C1(self, EV, 4)) / CLKS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Store_Latency zero division") return self.val desc = """ This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Consider to avoid/reduce unnecessary (or easily load-able/computable) memory store.""" class False_Sharing: name = "False_Sharing" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM:pp', 'OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE'] errcount = 0 sibling = None metricgroup = frozenset(['BvMS', 'DataSharing', 'Offcore', 'Snoop']) maxval = 1.0 def compute(self, EV): try: self.val = Mem_XSNP_HitM_Cost(self, EV, 4) * EV("OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "False_Sharing zero division") return self.val desc = """ This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. . False Sharing can be easily avoided by padding to make Logical Processors access different lines.""" class Split_Stores: name = "Split_Stores" domain = "Core_Utilization" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_UOPS_RETIRED.SPLIT_STORES:pp'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = 2 * EV("MEM_UOPS_RETIRED.SPLIT_STORES", 4) / CORE_CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Split_Stores zero division") return self.val desc = """ This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity.""" class DTLB_Store: name = "DTLB_Store" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_UOPS_RETIRED.STLB_MISS_STORES:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvMT', 'MemoryTLB']) maxval = 1.0 def compute(self, EV): try: self.val = (Mem_STLB_Hit_Cost * EV("DTLB_STORE_MISSES.STLB_HIT", 4) + EV("DTLB_STORE_MISSES.WALK_DURATION", 4)) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "DTLB_Store zero division") return self.val desc = """ This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently- used data.""" class Core_Bound: name = "Core_Bound" domain = "Slots" area = "BE/Core" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Backend', 'TmaL2', 'Compute']) maxval = None def compute(self, EV): try: self.val = self.Backend_Bound.compute(EV) - self.Memory_Bound.compute(EV) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Core_Bound zero division") return self.val desc = """ This metric represents fraction of slots where Core non- memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).. Tip: consider Port Saturation analysis as next step.""" class Divider: name = "Divider" domain = "Clocks" area = "BE/Core" level = 3 htoff = False sample = ['ARITH.FPU_DIV_ACTIVE'] errcount = 0 sibling = None metricgroup = frozenset(['BvCB']) maxval = 1.0 def compute(self, EV): try: self.val = EV("ARITH.FPU_DIV_ACTIVE", 3) / CORE_CLKS(self, EV, 3) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Divider zero division") return self.val desc = """ This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication.""" class Ports_Utilization: name = "Ports_Utilization" domain = "Clocks" area = "BE/Core" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['PortsUtil']) maxval = None def compute(self, EV): try: self.val = (Backend_Bound_Cycles(self, EV, 3) - EV("RESOURCE_STALLS.SB", 3) - STALLS_MEM_ANY(self, EV, 3)) / CLKS(self, EV, 3) self.thresh = (self.val > 0.15) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Ports_Utilization zero division") return self.val desc = """ This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.. Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop.""" class Ports_Utilized_0: name = "Ports_Utilized_0" domain = "Clocks" area = "BE/Core" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['PortsUtil']) maxval = None def compute(self, EV): try: self.val = Cycles_0_Ports_Utilized(self, EV, 4) / CORE_CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Ports_Utilized_0 zero division") return self.val desc = """ This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric.. Check assembly view and Appendix C in Optimization Manual to find out instructions with say 5 or more cycles latency.. http://www.intel.com/content/www/us/en/architecture-and- technology/64-ia-32-architectures-optimization-manual.html""" class Ports_Utilized_1: name = "Ports_Utilized_1" domain = "Clocks" area = "BE/Core" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['PortsUtil']) maxval = None def compute(self, EV): try: self.val = Cycles_1_Port_Utilized(self, EV, 4) / CORE_CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Ports_Utilized_1 zero division") return self.val desc = """ This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). This can be due to heavy data-dependency among software instructions; or over oversubscribing a particular hardware resource. In some other cases with high 1_Port_Utilized and L1_Bound; this metric can point to L1 data-cache latency bottleneck that may not necessarily manifest with complete execution starvation (due to the short L1 latency e.g. walking a linked list) - looking at the assembly can be helpful.""" class Ports_Utilized_2: name = "Ports_Utilized_2" domain = "Clocks" area = "BE/Core" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['PortsUtil']) maxval = None def compute(self, EV): try: self.val = Cycles_2_Ports_Utilized(self, EV, 4) / CORE_CLKS(self, EV, 4) self.thresh = (self.val > 0.15) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Ports_Utilized_2 zero division") return self.val desc = """ This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto- Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop.""" class Ports_Utilized_3m: name = "Ports_Utilized_3m" domain = "Clocks" area = "BE/Core" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvCB', 'PortsUtil']) maxval = None def compute(self, EV): try: self.val = Cycles_3m_Ports_Utilized(self, EV, 4) / CORE_CLKS(self, EV, 4) self.thresh = (self.val > 0.4) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Ports_Utilized_3m zero division") return self.val desc = """ This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise).""" class ALU_Op_Utilization: name = "ALU_Op_Utilization" domain = "Core_Execution" area = "BE/Core" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = (EV("UOPS_DISPATCHED_PORT.PORT_0", 5) + EV("UOPS_DISPATCHED_PORT.PORT_1", 5) + EV("UOPS_DISPATCHED_PORT.PORT_5", 5)) / (3 * CORE_CLKS(self, EV, 5)) self.thresh = (self.val > 0.4) except ZeroDivisionError: handle_error(self, "ALU_Op_Utilization zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.""" class Port_0: name = "Port_0" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED_PORT.PORT_0'] errcount = 0 sibling = None metricgroup = frozenset(['Compute']) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_0", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_0 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ALU""" class Port_1: name = "Port_1" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED_PORT.PORT_1'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_1", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_1 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU)""" class Port_5: name = "Port_5" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED_PORT.PORT_5'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_5", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_5 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 Branches and ALU. See section 'Handling Port 5 Pressure' in Optimization Manual:. http://www.intel.com/content/www/us/en/architecture-and- technology/64-ia-32-architectures-optimization-manual.html""" class Load_Op_Utilization: name = "Load_Op_Utilization" domain = "Core_Execution" area = "BE/Core" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = (EV("UOPS_DISPATCHED_PORT.PORT_2", 5) + EV("UOPS_DISPATCHED_PORT.PORT_3", 5) - EV("UOPS_DISPATCHED_PORT.PORT_4", 5)) / (2 * CORE_CLKS(self, EV, 5)) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Load_Op_Utilization zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations""" class Port_2: name = "Port_2" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED_PORT.PORT_2'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_2", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_2 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 2 Loads and Store-address""" class Port_3: name = "Port_3" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED_PORT.PORT_3'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_3", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_3 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 3 Loads and Store-address""" class Store_Op_Utilization: name = "Store_Op_Utilization" domain = "Core_Execution" area = "BE/Core" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_4", 5) / CORE_CLKS(self, EV, 5) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Store_Op_Utilization zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port for Store operations""" class Port_4: name = "Port_4" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED_PORT.PORT_4'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_4", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_4 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 4 (Store-data)""" class Retiring: name = "Retiring" domain = "Slots" area = "RET" level = 1 htoff = False sample = ['UOPS_RETIRED.RETIRE_SLOTS'] errcount = 0 sibling = None metricgroup = frozenset(['BvUW', 'TmaL1']) maxval = None def compute(self, EV): try: self.val = Retired_Slots(self, EV, 1) / SLOTS(self, EV, 1) self.thresh = (self.val > 0.7) or self.Heavy_Operations.thresh except ZeroDivisionError: handle_error(self, "Retiring zero division") return self.val desc = """ This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved. Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. . A high Retiring value for non-vectorized code may be a good hint for programmer to consider vectorizing his code. Doing so essentially lets more computations be done without significantly increasing number of instructions thus improving the performance.""" class Light_Operations: name = "Light_Operations" domain = "Slots" area = "RET" level = 2 htoff = False sample = ['INST_RETIRED.PREC_DIST'] errcount = 0 sibling = None metricgroup = frozenset(['Retire', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = self.Retiring.compute(EV) - self.Heavy_Operations.compute(EV) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Light_Operations zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. . Focus on techniques that reduce instruction count or result in more efficient instructions generation such as vectorization.""" class FP_Arith: name = "FP_Arith" domain = "Uops" area = "RET" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['HPC']) maxval = None def compute(self, EV): try: self.val = self.X87_Use.compute(EV) + self.FP_Scalar.compute(EV) + self.FP_Vector.compute(EV) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "FP_Arith zero division") return self.val desc = """ This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.""" class X87_Use: name = "X87_Use" domain = "Uops" area = "RET" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Compute']) maxval = None def compute(self, EV): try: self.val = Retired_Slots(self, EV, 4) * EV("FP_COMP_OPS_EXE.X87", 4) / EV("UOPS_EXECUTED.THREAD", 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "X87_Use zero division") return self.val desc = """ This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.. Tip: consider compiler flags to generate newer AVX (or SSE) instruction sets; which typically perform better and feature vectors.""" class FP_Scalar: name = "FP_Scalar" domain = "Uops" area = "RET" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Compute', 'Flops']) maxval = None def compute(self, EV): try: self.val = FP_Arith_Scalar(self, EV, 4) / EV("UOPS_EXECUTED.THREAD", 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "FP_Scalar zero division") return self.val desc = """ This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting.. Investigate what limits (compiler) generation of vector code.""" class FP_Vector: name = "FP_Vector" domain = "Uops" area = "RET" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Compute', 'Flops']) maxval = 1.0 def compute(self, EV): try: self.val = FP_Arith_Vector(self, EV, 4) / EV("UOPS_EXECUTED.THREAD", 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "FP_Vector zero division") return self.val desc = """ This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting.. Check if vector width is expected""" class FP_Vector_128b: name = "FP_Vector_128b" domain = "Uops" area = "RET" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Compute', 'Flops']) maxval = 1.0 def compute(self, EV): try: self.val = (EV("FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE", 5) + EV("FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE", 5)) / EV("UOPS_EXECUTED.THREAD", 5) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "FP_Vector_128b zero division") return self.val desc = """ This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting.. Try to exploit wider vector length""" class FP_Vector_256b: name = "FP_Vector_256b" domain = "Uops" area = "RET" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Compute', 'Flops']) maxval = 1.0 def compute(self, EV): try: self.val = (EV("SIMD_FP_256.PACKED_DOUBLE", 5) + EV("SIMD_FP_256.PACKED_SINGLE", 5)) / EV("UOPS_EXECUTED.THREAD", 5) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "FP_Vector_256b zero division") return self.val desc = """ This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting.. Try to exploit wider vector length""" class Heavy_Operations: name = "Heavy_Operations" domain = "Slots" area = "RET" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Retire', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = self.Microcode_Sequencer.compute(EV) self.thresh = (self.val > 0.1) except ZeroDivisionError: handle_error(self, "Heavy_Operations zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.""" class Microcode_Sequencer: name = "Microcode_Sequencer" domain = "Slots" area = "RET" level = 3 htoff = False sample = ['IDQ.MS_UOPS'] errcount = 0 sibling = None metricgroup = frozenset(['MicroSeq']) maxval = None def compute(self, EV): try: self.val = Retire_Fraction(self, EV, 3) * EV("IDQ.MS_UOPS", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Microcode_Sequencer zero division") return self.val desc = """ This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided..""" class Assists: name = "Assists" domain = "Slots_Estimated" area = "RET" level = 4 htoff = False sample = ['OTHER_ASSISTS.ANY_WB_ASSIST'] errcount = 0 sibling = None metricgroup = frozenset(['BvIO']) maxval = 1.0 def compute(self, EV): try: self.val = Avg_Assist_Cost * EV("OTHER_ASSISTS.ANY_WB_ASSIST", 4) / SLOTS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Assists zero division") return self.val desc = """ This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases.""" class CISC: name = "CISC" domain = "Slots" area = "RET" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = max(0 , self.Microcode_Sequencer.compute(EV) - self.Assists.compute(EV)) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "CISC zero division") return self.val desc = """ This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources.""" class Metric_IPC: name = "IPC" domain = "Metric" maxval = Pipeline_Width + 2 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Ret', 'Summary']) sibling = None def compute(self, EV): try: self.val = IPC(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IPC zero division") desc = """ Instructions Per Cycle (per Logical Processor)""" class Metric_UopPI: name = "UopPI" domain = "Metric" maxval = 2.0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Pipeline', 'Ret', 'Retire']) sibling = None def compute(self, EV): try: self.val = UopPI(self, EV, 0) self.thresh = (self.val > 1.05) except ZeroDivisionError: handle_error_metric(self, "UopPI zero division") desc = """ Uops Per Instruction""" class Metric_UpTB: name = "UpTB" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Branches', 'Fed', 'FetchBW']) sibling = None def compute(self, EV): try: self.val = UpTB(self, EV, 0) self.thresh = self.val < Pipeline_Width * 1.5 except ZeroDivisionError: handle_error_metric(self, "UpTB zero division") desc = """ Uops per taken branch""" class Metric_CPI: name = "CPI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Pipeline', 'Mem']) sibling = None def compute(self, EV): try: self.val = CPI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CPI zero division") desc = """ Cycles Per Instruction (per Logical Processor)""" class Metric_CLKS: name = "CLKS" domain = "Count" maxval = 0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Pipeline']) sibling = None def compute(self, EV): try: self.val = CLKS(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CLKS zero division") desc = """ Per-Logical Processor actual clocks when the Logical Processor is active.""" class Metric_SLOTS: name = "SLOTS" domain = "Count" maxval = 0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['TmaL1']) sibling = None def compute(self, EV): try: self.val = SLOTS(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "SLOTS zero division") desc = """ Total issue-pipeline slots (per-Physical Core till ICL; per- Logical Processor ICL onward)""" class Metric_Execute_per_Issue: name = "Execute_per_Issue" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Cor', 'Pipeline']) sibling = None def compute(self, EV): try: self.val = Execute_per_Issue(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Execute_per_Issue zero division") desc = """ The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage.""" class Metric_CoreIPC: name = "CoreIPC" domain = "Core_Metric" maxval = Pipeline_Width + 2 errcount = 0 area = "Info.Core" metricgroup = frozenset(['Ret', 'SMT', 'TmaL1']) sibling = None def compute(self, EV): try: self.val = CoreIPC(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CoreIPC zero division") desc = """ Instructions Per Cycle across hyper-threads (per physical core)""" class Metric_FLOPc: name = "FLOPc" domain = "Core_Metric" maxval = 10.0 errcount = 0 area = "Info.Core" metricgroup = frozenset(['Ret', 'Flops']) sibling = None def compute(self, EV): try: self.val = FLOPc(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "FLOPc zero division") desc = """ Floating Point Operations Per Cycle""" class Metric_ILP: name = "ILP" domain = "Metric" maxval = Exe_Ports errcount = 0 area = "Info.Core" metricgroup = frozenset(['Backend', 'Cor', 'Pipeline', 'PortsUtil']) sibling = None def compute(self, EV): try: self.val = ILP(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "ILP zero division") desc = """ Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical- processor)""" class Metric_CORE_CLKS: name = "CORE_CLKS" domain = "Count" maxval = 0 errcount = 0 area = "Info.Core" metricgroup = frozenset(['SMT']) sibling = None def compute(self, EV): try: self.val = CORE_CLKS(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CORE_CLKS zero division") desc = """ Core actual clocks when any Logical Processor is active on the Physical Core""" class Metric_IpLoad: name = "IpLoad" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['InsType']) sibling = None def compute(self, EV): try: self.val = IpLoad(self, EV, 0) self.thresh = (self.val < 3) except ZeroDivisionError: handle_error_metric(self, "IpLoad zero division") desc = """ Instructions per Load (lower number means higher occurrence rate). Tip: reduce memory accesses.""" class Metric_IpStore: name = "IpStore" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['InsType']) sibling = None def compute(self, EV): try: self.val = IpStore(self, EV, 0) self.thresh = (self.val < 8) except ZeroDivisionError: handle_error_metric(self, "IpStore zero division") desc = """ Instructions per Store (lower number means higher occurrence rate). Tip: reduce memory accesses.""" class Metric_IpBranch: name = "IpBranch" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Branches', 'Fed', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpBranch(self, EV, 0) self.thresh = (self.val < 8) except ZeroDivisionError: handle_error_metric(self, "IpBranch zero division") desc = """ Instructions per Branch (lower number means higher occurrence rate)""" class Metric_IpCall: name = "IpCall" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Branches', 'Fed', 'PGO']) sibling = None def compute(self, EV): try: self.val = IpCall(self, EV, 0) self.thresh = (self.val < 200) except ZeroDivisionError: handle_error_metric(self, "IpCall zero division") desc = """ Instructions per (near) call (lower number means higher occurrence rate)""" class Metric_IpTB: name = "IpTB" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Branches', 'Fed', 'FetchBW', 'Frontend', 'PGO']) sibling = None def compute(self, EV): try: self.val = IpTB(self, EV, 0) self.thresh = self.val < Pipeline_Width * 2 + 1 except ZeroDivisionError: handle_error_metric(self, "IpTB zero division") desc = """ Instructions per taken branch""" class Metric_BpTkBranch: name = "BpTkBranch" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Branches', 'Fed', 'PGO']) sibling = None def compute(self, EV): try: self.val = BpTkBranch(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "BpTkBranch zero division") desc = """ Branch instructions per taken branch. . Can be used to approximate PGO-likelihood for non-loopy codes.""" class Metric_IpArith: name = "IpArith" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Flops', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpArith(self, EV, 0) self.thresh = (self.val < 10) except ZeroDivisionError: handle_error_metric(self, "IpArith zero division") desc = """ Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW.""" class Metric_Instructions: name = "Instructions" domain = "Count" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Summary', 'TmaL1']) sibling = None def compute(self, EV): try: self.val = Instructions(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Instructions zero division") desc = """ Total number of retired Instructions""" class Metric_Retire: name = "Retire" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Pipeline" metricgroup = frozenset(['Pipeline', 'Ret']) sibling = None def compute(self, EV): try: self.val = Retire(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Retire zero division") desc = """ Average number of Uops retired in cycles where at least one uop has retired.""" class Metric_Execute: name = "Execute" domain = "Metric" maxval = Exe_Ports errcount = 0 area = "Info.Pipeline" metricgroup = frozenset(['Cor', 'Pipeline', 'PortsUtil', 'SMT']) sibling = None def compute(self, EV): try: self.val = Execute(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Execute zero division") desc = """ """ class Metric_DSB_Coverage: name = "DSB_Coverage" domain = "Metric" maxval = 1.0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['DSB', 'Fed', 'FetchBW']) sibling = None def compute(self, EV): try: self.val = DSB_Coverage(self, EV, 0) self.thresh = (self.val < 0.7) and HighIPC(self, EV, 1) except ZeroDivisionError: handle_error_metric(self, "DSB_Coverage zero division") desc = """ Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). See section 'Decoded ICache' in Optimization Manual. http://www.intel.com/content/www/us/en/architecture- and-technology/64-ia-32-architectures-optimization- manual.html""" class Metric_IpUnknown_Branch: name = "IpUnknown_Branch" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['Fed']) sibling = None def compute(self, EV): try: self.val = IpUnknown_Branch(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IpUnknown_Branch zero division") desc = """ Instructions per speculative Unknown Branch Misprediction (BAClear) (lower number means higher occurrence rate)""" class Metric_IpMispredict: name = "IpMispredict" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Bad_Spec" metricgroup = frozenset(['Bad', 'BadSpec', 'BrMispredicts']) sibling = None def compute(self, EV): try: self.val = IpMispredict(self, EV, 0) self.thresh = (self.val < 200) except ZeroDivisionError: handle_error_metric(self, "IpMispredict zero division") desc = """ Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)""" class Metric_IpMisp_Indirect: name = "IpMisp_Indirect" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Bad_Spec" metricgroup = frozenset(['Bad', 'BrMispredicts']) sibling = None def compute(self, EV): try: self.val = IpMisp_Indirect(self, EV, 0) self.thresh = (self.val < 1000) except ZeroDivisionError: handle_error_metric(self, "IpMisp_Indirect zero division") desc = """ Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).""" class Metric_Load_Miss_Real_Latency: name = "Load_Miss_Real_Latency" domain = "Clocks_Latency" maxval = 1000 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBound', 'MemoryLat']) sibling = None def compute(self, EV): try: self.val = Load_Miss_Real_Latency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Load_Miss_Real_Latency zero division") desc = """ Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)""" class Metric_MLP: name = "MLP" domain = "Metric" maxval = 10.0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBound', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = MLP(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "MLP zero division") desc = """ Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)""" class Metric_L1MPKI: name = "L1MPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheHits', 'Mem']) sibling = None def compute(self, EV): try: self.val = L1MPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L1MPKI zero division") desc = """ L1 cache true misses per kilo instruction for retired demand loads""" class Metric_L2MPKI: name = "L2MPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'Backend', 'CacheHits']) sibling = None def compute(self, EV): try: self.val = L2MPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2MPKI zero division") desc = """ L2 cache true misses per kilo instruction for retired demand loads""" class Metric_L2MPKI_RFO: name = "L2MPKI_RFO" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheMisses', 'Offcore']) sibling = None def compute(self, EV): try: self.val = L2MPKI_RFO(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2MPKI_RFO zero division") desc = """ Offcore requests (L2 cache miss) per kilo instruction for demand RFOs""" class Metric_L3MPKI: name = "L3MPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem']) sibling = None def compute(self, EV): try: self.val = L3MPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L3MPKI zero division") desc = """ L3 cache true misses per kilo instruction for retired demand loads""" class Metric_L1D_Cache_Fill_BW: name = "L1D_Cache_Fill_BW" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L1D_Cache_Fill_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L1D_Cache_Fill_BW zero division") desc = """ """ class Metric_L2_Cache_Fill_BW: name = "L2_Cache_Fill_BW" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L2_Cache_Fill_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2_Cache_Fill_BW zero division") desc = """ """ class Metric_L3_Cache_Fill_BW: name = "L3_Cache_Fill_BW" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L3_Cache_Fill_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L3_Cache_Fill_BW zero division") desc = """ """ class Metric_Page_Walks_Utilization: name = "Page_Walks_Utilization" domain = "Core_Metric" maxval = 1.0 errcount = 0 area = "Info.Memory.TLB" metricgroup = frozenset(['Mem', 'MemoryTLB']) sibling = None def compute(self, EV): try: self.val = Page_Walks_Utilization(self, EV, 0) self.thresh = (self.val > 0.5) except ZeroDivisionError: handle_error_metric(self, "Page_Walks_Utilization zero division") desc = """ Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses""" class Metric_L1D_Cache_Fill_BW_2T: name = "L1D_Cache_Fill_BW_2T" domain = "Core_Metric" maxval = 0 errcount = 0 area = "Info.Memory.Core" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L1D_Cache_Fill_BW_2T(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L1D_Cache_Fill_BW_2T zero division") desc = """ Average per-core data fill bandwidth to the L1 data cache [GB / sec]""" class Metric_L2_Cache_Fill_BW_2T: name = "L2_Cache_Fill_BW_2T" domain = "Core_Metric" maxval = 0 errcount = 0 area = "Info.Memory.Core" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L2_Cache_Fill_BW_2T(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2_Cache_Fill_BW_2T zero division") desc = """ Average per-core data fill bandwidth to the L2 cache [GB / sec]""" class Metric_L3_Cache_Fill_BW_2T: name = "L3_Cache_Fill_BW_2T" domain = "Core_Metric" maxval = 0 errcount = 0 area = "Info.Memory.Core" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L3_Cache_Fill_BW_2T(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L3_Cache_Fill_BW_2T zero division") desc = """ Average per-core data fill bandwidth to the L3 cache [GB / sec]""" class Metric_Load_L2_Miss_Latency: name = "Load_L2_Miss_Latency" domain = "Clocks_Latency" maxval = 1000 errcount = 0 area = "Info.Memory.Latency" metricgroup = frozenset(['Memory_Lat', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Load_L2_Miss_Latency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Load_L2_Miss_Latency zero division") desc = """ Average Latency for L2 cache miss demand Loads""" class Metric_Load_L2_MLP: name = "Load_L2_MLP" domain = "Metric" maxval = 100 errcount = 0 area = "Info.Memory.Latency" metricgroup = frozenset(['Memory_BW', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Load_L2_MLP(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Load_L2_MLP zero division") desc = """ Average Parallel L2 cache miss demand Loads""" class Metric_Data_L2_MLP: name = "Data_L2_MLP" domain = "Metric" maxval = 100 errcount = 0 area = "Info.Memory.Latency" metricgroup = frozenset(['Memory_BW', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Data_L2_MLP(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Data_L2_MLP zero division") desc = """ Average Parallel L2 cache miss data reads""" class Metric_CPU_Utilization: name = "CPU_Utilization" domain = "Metric" maxval = 1 errcount = 0 area = "Info.System" metricgroup = frozenset(['HPC', 'Summary']) sibling = None def compute(self, EV): try: self.val = CPU_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CPU_Utilization zero division") desc = """ Average CPU Utilization (percentage)""" class Metric_CPUs_Utilized: name = "CPUs_Utilized" domain = "Metric" maxval = 300 errcount = 0 area = "Info.System" metricgroup = frozenset(['Summary']) sibling = None def compute(self, EV): try: self.val = CPUs_Utilized(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CPUs_Utilized zero division") desc = """ Average number of utilized CPUs""" class Metric_Core_Frequency: name = "Core_Frequency" domain = "SystemMetric" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Summary', 'Power']) sibling = None def compute(self, EV): try: self.val = Core_Frequency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Core_Frequency zero division") desc = """ Measured Average Core Frequency for unhalted processors [GHz]""" class Metric_GFLOPs: name = "GFLOPs" domain = "Metric" maxval = 200 errcount = 0 area = "Info.System" metricgroup = frozenset(['Cor', 'Flops', 'HPC']) sibling = None def compute(self, EV): try: self.val = GFLOPs(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "GFLOPs zero division") desc = """ Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width""" class Metric_Turbo_Utilization: name = "Turbo_Utilization" domain = "Core_Metric" maxval = 10.0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Power']) sibling = None def compute(self, EV): try: self.val = Turbo_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Turbo_Utilization zero division") desc = """ Average Frequency Utilization relative nominal frequency""" class Metric_SMT_2T_Utilization: name = "SMT_2T_Utilization" domain = "Core_Metric" maxval = 1.0 errcount = 0 area = "Info.System" metricgroup = frozenset(['SMT']) sibling = None def compute(self, EV): try: self.val = SMT_2T_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "SMT_2T_Utilization zero division") desc = """ Fraction of cycles where both hardware Logical Processors were active""" class Metric_Kernel_Utilization: name = "Kernel_Utilization" domain = "Metric" maxval = 1.0 errcount = 0 area = "Info.System" metricgroup = frozenset(['OS']) sibling = None def compute(self, EV): try: self.val = Kernel_Utilization(self, EV, 0) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error_metric(self, "Kernel_Utilization zero division") desc = """ Fraction of cycles spent in the Operating System (OS) Kernel mode""" class Metric_Kernel_CPI: name = "Kernel_CPI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['OS']) sibling = None def compute(self, EV): try: self.val = Kernel_CPI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Kernel_CPI zero division") desc = """ Cycles Per Instruction for the Operating System (OS) Kernel mode""" class Metric_DRAM_BW_Use: name = "DRAM_BW_Use" domain = "GB/sec" maxval = 200 errcount = 0 area = "Info.System" metricgroup = frozenset(['HPC', 'MemOffcore', 'MemoryBW', 'SoC']) sibling = None def compute(self, EV): try: self.val = DRAM_BW_Use(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "DRAM_BW_Use zero division") desc = """ Average external Memory Bandwidth Use for reads and writes [GB / sec]""" class Metric_Power: name = "Power" domain = "SystemMetric" maxval = 200 errcount = 0 area = "Info.System" metricgroup = frozenset(['Power', 'SoC']) sibling = None def compute(self, EV): try: self.val = Power(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Power zero division") desc = """ Total package Power in Watts""" class Metric_Time: name = "Time" domain = "Seconds" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Summary']) sibling = None def compute(self, EV): try: self.val = Time(self, EV, 0) self.thresh = (self.val < 1) except ZeroDivisionError: handle_error_metric(self, "Time zero division") desc = """ Run duration time in seconds""" class Metric_Socket_CLKS: name = "Socket_CLKS" domain = "Count" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['SoC']) sibling = None def compute(self, EV): try: self.val = Socket_CLKS(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Socket_CLKS zero division") desc = """ Socket actual clocks when any core is active on that socket""" class Metric_IpFarBranch: name = "IpFarBranch" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Branches', 'OS']) sibling = None def compute(self, EV): try: self.val = IpFarBranch(self, EV, 0) self.thresh = (self.val < 1000000) except ZeroDivisionError: handle_error_metric(self, "IpFarBranch zero division") desc = """ Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]""" # Schedule class Setup: def __init__(self, r): o = dict() n = Frontend_Bound() ; r.run(n) ; o["Frontend_Bound"] = n n = Fetch_Latency() ; r.run(n) ; o["Fetch_Latency"] = n n = ICache_Misses() ; r.run(n) ; o["ICache_Misses"] = n n = ITLB_Misses() ; r.run(n) ; o["ITLB_Misses"] = n n = Branch_Resteers() ; r.run(n) ; o["Branch_Resteers"] = n n = MS_Switches() ; r.run(n) ; o["MS_Switches"] = n n = LCP() ; r.run(n) ; o["LCP"] = n n = DSB_Switches() ; r.run(n) ; o["DSB_Switches"] = n n = Fetch_Bandwidth() ; r.run(n) ; o["Fetch_Bandwidth"] = n n = MITE() ; r.run(n) ; o["MITE"] = n n = DSB() ; r.run(n) ; o["DSB"] = n n = Bad_Speculation() ; r.run(n) ; o["Bad_Speculation"] = n n = Branch_Mispredicts() ; r.run(n) ; o["Branch_Mispredicts"] = n n = Machine_Clears() ; r.run(n) ; o["Machine_Clears"] = n n = Backend_Bound() ; r.run(n) ; o["Backend_Bound"] = n n = Memory_Bound() ; r.run(n) ; o["Memory_Bound"] = n n = L1_Bound() ; r.run(n) ; o["L1_Bound"] = n n = DTLB_Load() ; r.run(n) ; o["DTLB_Load"] = n n = Store_Fwd_Blk() ; r.run(n) ; o["Store_Fwd_Blk"] = n n = Lock_Latency() ; r.run(n) ; o["Lock_Latency"] = n n = Split_Loads() ; r.run(n) ; o["Split_Loads"] = n n = G4K_Aliasing() ; r.run(n) ; o["G4K_Aliasing"] = n n = FB_Full() ; r.run(n) ; o["FB_Full"] = n n = L2_Bound() ; r.run(n) ; o["L2_Bound"] = n n = L3_Bound() ; r.run(n) ; o["L3_Bound"] = n n = Contested_Accesses() ; r.run(n) ; o["Contested_Accesses"] = n n = Data_Sharing() ; r.run(n) ; o["Data_Sharing"] = n n = L3_Hit_Latency() ; r.run(n) ; o["L3_Hit_Latency"] = n n = SQ_Full() ; r.run(n) ; o["SQ_Full"] = n n = DRAM_Bound() ; r.run(n) ; o["DRAM_Bound"] = n n = MEM_Bandwidth() ; r.run(n) ; o["MEM_Bandwidth"] = n n = MEM_Latency() ; r.run(n) ; o["MEM_Latency"] = n n = Store_Bound() ; r.run(n) ; o["Store_Bound"] = n n = Store_Latency() ; r.run(n) ; o["Store_Latency"] = n n = False_Sharing() ; r.run(n) ; o["False_Sharing"] = n n = Split_Stores() ; r.run(n) ; o["Split_Stores"] = n n = DTLB_Store() ; r.run(n) ; o["DTLB_Store"] = n n = Core_Bound() ; r.run(n) ; o["Core_Bound"] = n n = Divider() ; r.run(n) ; o["Divider"] = n n = Ports_Utilization() ; r.run(n) ; o["Ports_Utilization"] = n n = Ports_Utilized_0() ; r.run(n) ; o["Ports_Utilized_0"] = n n = Ports_Utilized_1() ; r.run(n) ; o["Ports_Utilized_1"] = n n = Ports_Utilized_2() ; r.run(n) ; o["Ports_Utilized_2"] = n n = Ports_Utilized_3m() ; r.run(n) ; o["Ports_Utilized_3m"] = n n = ALU_Op_Utilization() ; r.run(n) ; o["ALU_Op_Utilization"] = n n = Port_0() ; r.run(n) ; o["Port_0"] = n n = Port_1() ; r.run(n) ; o["Port_1"] = n n = Port_5() ; r.run(n) ; o["Port_5"] = n n = Load_Op_Utilization() ; r.run(n) ; o["Load_Op_Utilization"] = n n = Port_2() ; r.run(n) ; o["Port_2"] = n n = Port_3() ; r.run(n) ; o["Port_3"] = n n = Store_Op_Utilization() ; r.run(n) ; o["Store_Op_Utilization"] = n n = Port_4() ; r.run(n) ; o["Port_4"] = n n = Retiring() ; r.run(n) ; o["Retiring"] = n n = Light_Operations() ; r.run(n) ; o["Light_Operations"] = n n = FP_Arith() ; r.run(n) ; o["FP_Arith"] = n n = X87_Use() ; r.run(n) ; o["X87_Use"] = n n = FP_Scalar() ; r.run(n) ; o["FP_Scalar"] = n n = FP_Vector() ; r.run(n) ; o["FP_Vector"] = n n = FP_Vector_128b() ; r.run(n) ; o["FP_Vector_128b"] = n n = FP_Vector_256b() ; r.run(n) ; o["FP_Vector_256b"] = n n = Heavy_Operations() ; r.run(n) ; o["Heavy_Operations"] = n n = Microcode_Sequencer() ; r.run(n) ; o["Microcode_Sequencer"] = n n = Assists() ; r.run(n) ; o["Assists"] = n n = CISC() ; r.run(n) ; o["CISC"] = n # parents o["Fetch_Latency"].parent = o["Frontend_Bound"] o["ICache_Misses"].parent = o["Fetch_Latency"] o["ITLB_Misses"].parent = o["Fetch_Latency"] o["Branch_Resteers"].parent = o["Fetch_Latency"] o["MS_Switches"].parent = o["Fetch_Latency"] o["LCP"].parent = o["Fetch_Latency"] o["DSB_Switches"].parent = o["Fetch_Latency"] o["Fetch_Bandwidth"].parent = o["Frontend_Bound"] o["MITE"].parent = o["Fetch_Bandwidth"] o["DSB"].parent = o["Fetch_Bandwidth"] o["Branch_Mispredicts"].parent = o["Bad_Speculation"] o["Machine_Clears"].parent = o["Bad_Speculation"] o["Memory_Bound"].parent = o["Backend_Bound"] o["L1_Bound"].parent = o["Memory_Bound"] o["DTLB_Load"].parent = o["L1_Bound"] o["Store_Fwd_Blk"].parent = o["L1_Bound"] o["Lock_Latency"].parent = o["L1_Bound"] o["Split_Loads"].parent = o["L1_Bound"] o["G4K_Aliasing"].parent = o["L1_Bound"] o["FB_Full"].parent = o["L1_Bound"] o["L2_Bound"].parent = o["Memory_Bound"] o["L3_Bound"].parent = o["Memory_Bound"] o["Contested_Accesses"].parent = o["L3_Bound"] o["Data_Sharing"].parent = o["L3_Bound"] o["L3_Hit_Latency"].parent = o["L3_Bound"] o["SQ_Full"].parent = o["L3_Bound"] o["DRAM_Bound"].parent = o["Memory_Bound"] o["MEM_Bandwidth"].parent = o["DRAM_Bound"] o["MEM_Latency"].parent = o["DRAM_Bound"] o["Store_Bound"].parent = o["Memory_Bound"] o["Store_Latency"].parent = o["Store_Bound"] o["False_Sharing"].parent = o["Store_Bound"] o["Split_Stores"].parent = o["Store_Bound"] o["DTLB_Store"].parent = o["Store_Bound"] o["Core_Bound"].parent = o["Backend_Bound"] o["Divider"].parent = o["Core_Bound"] o["Ports_Utilization"].parent = o["Core_Bound"] o["Ports_Utilized_0"].parent = o["Ports_Utilization"] o["Ports_Utilized_1"].parent = o["Ports_Utilization"] o["Ports_Utilized_2"].parent = o["Ports_Utilization"] o["Ports_Utilized_3m"].parent = o["Ports_Utilization"] o["ALU_Op_Utilization"].parent = o["Ports_Utilized_3m"] o["Port_0"].parent = o["ALU_Op_Utilization"] o["Port_1"].parent = o["ALU_Op_Utilization"] o["Port_5"].parent = o["ALU_Op_Utilization"] o["Load_Op_Utilization"].parent = o["Ports_Utilized_3m"] o["Port_2"].parent = o["Load_Op_Utilization"] o["Port_3"].parent = o["Load_Op_Utilization"] o["Store_Op_Utilization"].parent = o["Ports_Utilized_3m"] o["Port_4"].parent = o["Store_Op_Utilization"] o["Light_Operations"].parent = o["Retiring"] o["FP_Arith"].parent = o["Light_Operations"] o["X87_Use"].parent = o["FP_Arith"] o["FP_Scalar"].parent = o["FP_Arith"] o["FP_Vector"].parent = o["FP_Arith"] o["FP_Vector_128b"].parent = o["FP_Vector"] o["FP_Vector_256b"].parent = o["FP_Vector"] o["Heavy_Operations"].parent = o["Retiring"] o["Microcode_Sequencer"].parent = o["Heavy_Operations"] o["Assists"].parent = o["Microcode_Sequencer"] o["CISC"].parent = o["Microcode_Sequencer"] # user visible metrics n = Metric_IPC() ; r.metric(n) ; o["IPC"] = n n = Metric_UopPI() ; r.metric(n) ; o["UopPI"] = n n = Metric_UpTB() ; r.metric(n) ; o["UpTB"] = n n = Metric_CPI() ; r.metric(n) ; o["CPI"] = n n = Metric_CLKS() ; r.metric(n) ; o["CLKS"] = n n = Metric_SLOTS() ; r.metric(n) ; o["SLOTS"] = n n = Metric_Execute_per_Issue() ; r.metric(n) ; o["Execute_per_Issue"] = n n = Metric_CoreIPC() ; r.metric(n) ; o["CoreIPC"] = n n = Metric_FLOPc() ; r.metric(n) ; o["FLOPc"] = n n = Metric_ILP() ; r.metric(n) ; o["ILP"] = n n = Metric_CORE_CLKS() ; r.metric(n) ; o["CORE_CLKS"] = n n = Metric_IpLoad() ; r.metric(n) ; o["IpLoad"] = n n = Metric_IpStore() ; r.metric(n) ; o["IpStore"] = n n = Metric_IpBranch() ; r.metric(n) ; o["IpBranch"] = n n = Metric_IpCall() ; r.metric(n) ; o["IpCall"] = n n = Metric_IpTB() ; r.metric(n) ; o["IpTB"] = n n = Metric_BpTkBranch() ; r.metric(n) ; o["BpTkBranch"] = n n = Metric_IpArith() ; r.metric(n) ; o["IpArith"] = n n = Metric_Instructions() ; r.metric(n) ; o["Instructions"] = n n = Metric_Retire() ; r.metric(n) ; o["Retire"] = n n = Metric_Execute() ; r.metric(n) ; o["Execute"] = n n = Metric_DSB_Coverage() ; r.metric(n) ; o["DSB_Coverage"] = n n = Metric_IpUnknown_Branch() ; r.metric(n) ; o["IpUnknown_Branch"] = n n = Metric_IpMispredict() ; r.metric(n) ; o["IpMispredict"] = n n = Metric_IpMisp_Indirect() ; r.metric(n) ; o["IpMisp_Indirect"] = n n = Metric_Load_Miss_Real_Latency() ; r.metric(n) ; o["Load_Miss_Real_Latency"] = n n = Metric_MLP() ; r.metric(n) ; o["MLP"] = n n = Metric_L1MPKI() ; r.metric(n) ; o["L1MPKI"] = n n = Metric_L2MPKI() ; r.metric(n) ; o["L2MPKI"] = n n = Metric_L2MPKI_RFO() ; r.metric(n) ; o["L2MPKI_RFO"] = n n = Metric_L3MPKI() ; r.metric(n) ; o["L3MPKI"] = n n = Metric_L1D_Cache_Fill_BW() ; r.metric(n) ; o["L1D_Cache_Fill_BW"] = n n = Metric_L2_Cache_Fill_BW() ; r.metric(n) ; o["L2_Cache_Fill_BW"] = n n = Metric_L3_Cache_Fill_BW() ; r.metric(n) ; o["L3_Cache_Fill_BW"] = n n = Metric_Page_Walks_Utilization() ; r.metric(n) ; o["Page_Walks_Utilization"] = n n = Metric_L1D_Cache_Fill_BW_2T() ; r.metric(n) ; o["L1D_Cache_Fill_BW_2T"] = n n = Metric_L2_Cache_Fill_BW_2T() ; r.metric(n) ; o["L2_Cache_Fill_BW_2T"] = n n = Metric_L3_Cache_Fill_BW_2T() ; r.metric(n) ; o["L3_Cache_Fill_BW_2T"] = n n = Metric_Load_L2_Miss_Latency() ; r.metric(n) ; o["Load_L2_Miss_Latency"] = n n = Metric_Load_L2_MLP() ; r.metric(n) ; o["Load_L2_MLP"] = n n = Metric_Data_L2_MLP() ; r.metric(n) ; o["Data_L2_MLP"] = n n = Metric_CPU_Utilization() ; r.metric(n) ; o["CPU_Utilization"] = n n = Metric_CPUs_Utilized() ; r.metric(n) ; o["CPUs_Utilized"] = n n = Metric_Core_Frequency() ; r.metric(n) ; o["Core_Frequency"] = n n = Metric_GFLOPs() ; r.metric(n) ; o["GFLOPs"] = n n = Metric_Turbo_Utilization() ; r.metric(n) ; o["Turbo_Utilization"] = n n = Metric_SMT_2T_Utilization() ; r.metric(n) ; o["SMT_2T_Utilization"] = n n = Metric_Kernel_Utilization() ; r.metric(n) ; o["Kernel_Utilization"] = n n = Metric_Kernel_CPI() ; r.metric(n) ; o["Kernel_CPI"] = n n = Metric_DRAM_BW_Use() ; r.metric(n) ; o["DRAM_BW_Use"] = n n = Metric_Power() ; r.metric(n) ; o["Power"] = n n = Metric_Time() ; r.metric(n) ; o["Time"] = n n = Metric_Socket_CLKS() ; r.metric(n) ; o["Socket_CLKS"] = n n = Metric_IpFarBranch() ; r.metric(n) ; o["IpFarBranch"] = n # references between groups o["ICache_Misses"].ITLB_Misses = o["ITLB_Misses"] o["Fetch_Bandwidth"].Frontend_Bound = o["Frontend_Bound"] o["Fetch_Bandwidth"].Fetch_Latency = o["Fetch_Latency"] o["Branch_Mispredicts"].Bad_Speculation = o["Bad_Speculation"] o["Machine_Clears"].Bad_Speculation = o["Bad_Speculation"] o["Machine_Clears"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Backend_Bound"].Retiring = o["Retiring"] o["Backend_Bound"].Bad_Speculation = o["Bad_Speculation"] o["Backend_Bound"].Frontend_Bound = o["Frontend_Bound"] o["Memory_Bound"].Retiring = o["Retiring"] o["Memory_Bound"].Bad_Speculation = o["Bad_Speculation"] o["Memory_Bound"].Frontend_Bound = o["Frontend_Bound"] o["Memory_Bound"].Backend_Bound = o["Backend_Bound"] o["Memory_Bound"].Fetch_Latency = o["Fetch_Latency"] o["MEM_Latency"].MEM_Bandwidth = o["MEM_Bandwidth"] o["Core_Bound"].Retiring = o["Retiring"] o["Core_Bound"].Frontend_Bound = o["Frontend_Bound"] o["Core_Bound"].Memory_Bound = o["Memory_Bound"] o["Core_Bound"].Backend_Bound = o["Backend_Bound"] o["Core_Bound"].Bad_Speculation = o["Bad_Speculation"] o["Core_Bound"].Fetch_Latency = o["Fetch_Latency"] o["Ports_Utilization"].Fetch_Latency = o["Fetch_Latency"] o["Ports_Utilized_0"].Fetch_Latency = o["Fetch_Latency"] o["Retiring"].Heavy_Operations = o["Heavy_Operations"] o["Light_Operations"].Retiring = o["Retiring"] o["Light_Operations"].Heavy_Operations = o["Heavy_Operations"] o["Light_Operations"].Microcode_Sequencer = o["Microcode_Sequencer"] o["FP_Arith"].FP_Scalar = o["FP_Scalar"] o["FP_Arith"].X87_Use = o["X87_Use"] o["FP_Arith"].FP_Vector = o["FP_Vector"] o["Heavy_Operations"].Microcode_Sequencer = o["Microcode_Sequencer"] o["CISC"].Microcode_Sequencer = o["Microcode_Sequencer"] o["CISC"].Assists = o["Assists"] o["IpArith"].FP_Vector = o["FP_Vector"] o["IpArith"].FP_Scalar = o["FP_Scalar"] # siblings cross-tree o["MS_Switches"].sibling = (o["Machine_Clears"], o["L1_Bound"], o["Microcode_Sequencer"],) o["LCP"].sibling = (o["DSB_Switches"], o["Fetch_Bandwidth"],) o["DSB_Switches"].sibling = (o["LCP"], o["Fetch_Bandwidth"],) o["Fetch_Bandwidth"].sibling = (o["LCP"], o["DSB_Switches"],) o["Machine_Clears"].sibling = (o["MS_Switches"], o["L1_Bound"], o["Contested_Accesses"], o["Data_Sharing"], o["False_Sharing"], o["Microcode_Sequencer"],) o["L1_Bound"].sibling = (o["MS_Switches"], o["Machine_Clears"], o["Ports_Utilized_1"], o["Microcode_Sequencer"],) o["DTLB_Load"].sibling = (o["DTLB_Store"],) o["Lock_Latency"].sibling = (o["Store_Latency"],) o["FB_Full"].sibling = (o["SQ_Full"], o["MEM_Bandwidth"], o["Store_Latency"],) o["Contested_Accesses"].sibling = (o["Machine_Clears"], o["Data_Sharing"], o["False_Sharing"],) o["Data_Sharing"].sibling = (o["Machine_Clears"], o["Contested_Accesses"], o["False_Sharing"],) o["L3_Hit_Latency"].sibling = (o["MEM_Latency"],) o["L3_Hit_Latency"].overlap = True o["SQ_Full"].sibling = (o["FB_Full"], o["MEM_Bandwidth"],) o["MEM_Bandwidth"].sibling = (o["FB_Full"], o["SQ_Full"],) o["MEM_Latency"].sibling = (o["L3_Hit_Latency"],) o["Store_Latency"].sibling = (o["Lock_Latency"], o["FB_Full"],) o["Store_Latency"].overlap = True o["False_Sharing"].sibling = (o["Machine_Clears"], o["Contested_Accesses"], o["Data_Sharing"],) o["Split_Stores"].sibling = (o["Port_4"],) o["DTLB_Store"].sibling = (o["DTLB_Load"],) o["Ports_Utilized_1"].sibling = (o["L1_Bound"],) o["Ports_Utilized_2"].sibling = (o["Port_0"], o["Port_1"], o["Port_5"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"],) o["Port_0"].sibling = (o["Ports_Utilized_2"], o["Port_1"], o["Port_5"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"],) o["Port_1"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_5"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"],) o["Port_5"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"],) o["Port_4"].sibling = (o["Split_Stores"],) o["FP_Scalar"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"],) o["FP_Vector"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"], o["FP_Scalar"], o["FP_Vector_128b"], o["FP_Vector_256b"],) o["FP_Vector_128b"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_256b"],) o["FP_Vector_256b"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"],) o["Microcode_Sequencer"].sibling = (o["MS_Switches"], o["Machine_Clears"], o["L1_Bound"],) o["IpTB"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],) o["DSB_Coverage"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],) o["DRAM_BW_Use"].sibling = (o["FB_Full"], o["SQ_Full"], o["MEM_Bandwidth"],)
127,185
Python
.py
3,171
34.451908
306
0.656546
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,911
interval-merge.py
andikleen_pmu-tools/interval-merge.py
#!/usr/bin/env python3 # merge multiple --perf-output files. requires header from __future__ import print_function import csv import argparse from collections import OrderedDict, Counter import sys ap = argparse.ArgumentParser() ap.add_argument('csvfiles', nargs='+', type=argparse.FileType('r')) args = ap.parse_args() def genkey(c, hdr, count): k = [count] if 'Timestamp' in hdr: k.append(c[hdr['Timestamp']]) if 'Location' in hdr: k.append(c[hdr['Location']]) k.append(c[hdr['Event']]) return tuple(k) d = OrderedDict() hdr = None hdrl = None prev = Counter() for fh in args.csvfiles: csvf = csv.reader(fh, delimiter=';') for c in csvf: if hdr is None: hdrl = c hdr = dict([(y,x) for x,y in enumerate(c)]) continue if c[0] in ("Timestamp", "Location", "Value"): continue pkey = (fh, c[hdr['Timestamp']] if 'Timestamp' in hdr else None, c[hdr['Event']]) prev[pkey] += 1 key = genkey(c, hdr, prev[pkey]) try: if key in d: o = d[key] o[hdr['Run-Time']] += float(c[hdr['Run-Time']]) o[hdr['Enabled']] = (float(o[hdr['Enabled']]) + o[hdr['Enabled']]) / 2 o[hdr['Value']] += float(c[hdr['Value']]) else: d[key] = c o = d[key] o[hdr['Value']] = float(c[hdr['Value']]) o[hdr['Enabled']] = float(c[hdr['Enabled']]) o[hdr['Run-Time']] = float(c[hdr['Run-Time']]) except ValueError as e: print("cannot parse", c, e, file=sys.stderr) csvf = csv.writer(sys.stdout, delimiter=';') csvf.writerow(hdrl) for j in d.values(): csvf.writerow(j)
1,764
Python
.py
52
26.461538
89
0.552693
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,912
listutils.py
andikleen_pmu-tools/listutils.py
# generic utilities for lists import sys from itertools import chain if sys.version_info.major == 3: from itertools import zip_longest else: from itertools import izip_longest zip_longest = izip_longest def flatten(x): return list(chain(*x)) def filternot(p, l): return list(filter(lambda x: not p(x), l)) # add items from b to a if not already in a def cat_unique(a, b): aset = set(a) add = [x for x in b if x not in aset] return a + add # remove duplicates without reordering def dedup(a): l = [] prev = set() for j in a: if j not in prev: l.append(j) prev.add(j) return l def not_list(l): return [not x for x in l] # merge two dicts with appending lists def append_dict(a, b): for k in b: if k in a: a[k] += b[k] else: a[k] = b[k] # create dict/list with same shape as a, but filled with dummy values def dummy_dict(a, val=0.0): return {k: [val] * len(a[k]) for k in a} def padlist(l, length, val=0.0): if len(l) < length: return l + [val]*(length-len(l)) return l def findprefix(l, prefix, stop=None): for i, v in enumerate(l): if v == stop: break if v.startswith(prefix): return i return -1
1,302
Python
.py
49
21.367347
69
0.607401
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,913
tl-serve.py
andikleen_pmu-tools/tl-serve.py
#!/usr/bin/env python3 # serve toplev csv file as http using dygraph # toplev.py -I100 -o x.csv -v -x, ... # tl-serve.py x.csv [host [port]] from __future__ import print_function import string import argparse import csv import re import os import signal import sys try: import BaseHTTPServer except ImportError: import http.server as BaseHTTPServer import tldata ap = argparse.ArgumentParser(usage="Serve toplev csv file as http or generate in directory") ap.add_argument('csvfile', help='toplev csv file to serve') ap.add_argument('host', nargs='?', default="localhost", help='Hostname to bind to (default localhost)') ap.add_argument('port', nargs='?', default="9001", type=int, help='Port to bind to (default 9001)') ap.add_argument('--verbose', '-v', action='store_true', help='Display all metrics, even if below threshold') ap.add_argument('--gen', help='Generate HTML files in specified directory') ap.add_argument('--title', help='Title for output') args = ap.parse_args() T = string.Template data = tldata.TLData(args.csvfile, args.verbose) data.update() def jsname(n): return n.replace(".", "_").replace("-", "_") def comma_strings(l): return ",".join(['"%s"' % (x) for x in l]) def gen_html_header(): graph = """<html><head><title>Toplev</title> <link rel="shortcut icon" href="toplev.ico" /> <script type="text/javascript" src="dygraph-combined.js"></script> </head> <body> <script type="text/javascript"> var cpus = [""" graph += comma_strings(sorted(data.cpus)) + "]\n" graph += "var nodes = [" + comma_strings(tldata.level_order(data)) + "]" graph += """ var graphs = [] var goptions = [] var num_graphs = 0 var block_redraw = false function enable(el) { for (i = 0; i < cpus.length; i++) { p = document.getElementById("d_" + cpus[i] + "_" + el.name) p.style.display = el.checked ? 'block' : 'none'; } } function change_all(flag) { all_displays = document.getElementsByClassName("disp") for (i = 0; i < all_displays.length; i++) { p = all_displays[i]; p.style.display = flag ? 'block' : 'none'; } togs = document.getElementsByClassName("toggles") for (i = 0; i < togs.length; i++) { p = togs[i]; p.checked = flag; } } var timer function toggle_refresh(el) { p = document.getElementById("refresh_rate") if (timer) { clearInterval(timer) timer = null } if (el.checked) { timer = setInterval(function () { for (i = 0; i < num_graphs; i++) { graphs[i].updateOptions(goptions[i]) } }, Number(p.value)) } } function add_node(cpu, nd) { p = document.getElementById("d_" + cpus[cpu] + "_" + nodes[nd]) p.parentNode.appendChild(p) } function toggle_interleave(el) { if (!el.checked) { for (cpu = 0; cpu < cpus.length; cpu++) { for (nd = 0; nd < nodes.length; nd++) { add_node(cpu, nd) } } } else { for (nd = 0; nd < nodes.length; nd++) { for (cpu = 0; cpu < cpus.length; cpu++) { add_node(cpu, nd) } } } } function draw_graph(me, initial) { if (block_redraw || initial) return; block_redraw = true xrange = me.xAxisRange() for (i = 0; i < num_graphs; i++) { if (graphs[i] != me) { graphs[i].updateOptions({ dateWindow: xrange, }) } } block_redraw = false } function hilight_help(e, x, pts, row, help) { p = document.getElementById("help") h = "" for (i = 0; i < pts.length; i++) { n = pts[i].name if (n in help && help[n] != "") { h += "<b>" + n + "</b>: " + help[n] + " <br /> " } else { // location.reload(); // XXX } } p.innerHTML = h } function unhilight_help(e, x, pts, row) { p = document.getElementById("help") p.innerHTML = "" } </script> """ if args.title: graph += T("<h1>$title</h1>\n").substitute({"title": args.title}) graph += """ <div><p> <b>Display:</b> """ lev = tldata.level_order(data) for num, name in enumerate(lev): graph += T("""\ <input id="$id" class="toggles" type=checkbox name="$name" onClick="enable(this)" checked /> <label for="$id">$name</label> """).substitute({"id": num, "name": name}) graph += """ <input id="all" type=checkbox name="dall" onClick="change_all(this.checked)" checked /> <label for="all">Toggle all</label> <input id="enable_refresh" type=checkbox onClick="toggle_refresh(this)" /> <label for="enable_refresh">Auto-refresh</label> <input id="refresh_rate" type="text" size=4 value="1000" name="refresh" /> <label for="refresh_rate">Refresh rate (ms)</label> <input id="interleave" type=checkbox onClick="toggle_interleave(this)" /> <label for="interleave">Interleave CPUs</label> </p></div> Drag to zoom. Double click to zoom out again<br /> <div id="help" style="position:fixed; right:0; width:300px; font-size: 11"> </div> """ for j in lev: graph += T(""" <script type="text/javascript"> help_$name = { """).substitute({"name": jsname(j)}) for i in data.levels[j]: if i not in data.helptxt: #print(i,"not found in",data.helptxt.keys()) continue graph += T(""" "$name": "$help", """).substitute({"name": get_postfix(i), "help": data.helptxt[i]}) graph += """ } </script> """ return graph def gen_html_cpu(cpu): lev = tldata.level_order(data) graph = "" for name in lev: opts = { "title": name + " " + cpu, "width": 1000, "height": 180, #"xlabel": "time", } if name in data.metrics: unit = None if name in data.units: # XXX handle CPU-METRIC unit = data.units[name] if unit: opts["ylabel"] = unit else: opts["stackedGraph"] = 1 opts["stackedGraphNaNFill"] = "none" opts["ylabel"] = "% CPU time" unit = '%' if unit == '%': opts["valueRange"] = [-5, 110] graph += T(""" <div id="d_${cpu}_$name" class="disp"></div> <script type="text/javascript"> i = num_graphs++ goptions[i] = $opts goptions[i].highlightCallback = function(e, x, pts, row) { hilight_help(e, x, pts, row, help_$jname) } goptions[i].unhighlightCallback = unhilight_help goptions[i].drawCallback = draw_graph graphs[i] = new Dygraph(document.getElementById("d_${cpu}_$name"), "$cpu.$file.csv", goptions[i]) goptions[i]["file"] = "$cpu.$file.csv" </script> """).substitute({"name": name, "jname": jsname(name), "file": name, "cpu": cpu, "opts": opts}) return graph def gen_html(): graph = gen_html_header() for cpu in sorted(data.cpus): graph += gen_html_cpu(cpu) graph += """ </body> </html>""" return graph def get_postfix(s): m = re.match(r'.*\.(.*)', s) if m: return m.group(1) return s def gencsv(wfile, l, cpu): hdr = sorted(data.levels[l]) wr = csv.writer(wfile, lineterminator='\n') wr.writerow(["Timestamp"] + map(get_postfix, hdr)) for val, ts in zip(data.vals, data.times): wr.writerow([ts] + [val[(x, cpu)] if (x, cpu) in val else "" for x in hdr]) class TLHandler(BaseHTTPServer.BaseHTTPRequestHandler): def header(self, typ): self.send_response(200) self.send_header('Content-Type', typ) self.end_headers() def bad(self): self.send_response(401) self.send_header('Content-Type', 'text/html') self.end_headers() self.wfile.write("%s not found" % (self.path)) def serve_file(self, fn, mime): with open(fn, "r") as f: self.header(mime) self.wfile.write(f.read().encode('utf-8')) def do_GET(self): if self.path == "/": self.header("text/html") self.wfile.write(gen_html().encode()) elif self.path == "/dygraph-combined.js": self.serve_file("dygraph-combined.js", "text/javascript") elif self.path == "/toplev.ico": self.serve_file("toplev.ico", "image/x-icon") elif self.path.endswith(".csv"): data.update() m = re.match(r"/(cpu|C\d+|S\d+-C\d+|C\d+-T\d+)\.(.*?)\.csv", self.path) if not m: self.bad() return cpu = m.group(1) l = m.group(2) if l not in data.levels: self.bad() return self.header("text/csv") gencsv(self.wfile, l, cpu) else: self.bad() def copyfile(a, b): with open(a, "rb") as af: with open(b, "wb") as bf: bf.write(af.read()) def term(signal, frame): print("sigterm") sys.exit(0) if args.gen: if not os.path.isdir(args.gen): os.makedirs(args.gen) genfn = os.path.join with open(genfn(args.gen, "index.html"), 'wb') as f: f.write(gen_html().encode()) copyfile('dygraph-combined.js', genfn(args.gen, 'dygraph-combined.js')) copyfile('toplev.ico', genfn(args.gen, 'favicon.ico')) for cpu in data.cpus: for l in data.levels: with open(genfn(args.gen, cpu + "." + l + ".csv"), 'w') as f: gencsv(f, l, cpu) print("Please browse", args.gen, "through a web server, not through file:") else: signal.signal(signal.SIGTERM, term) httpd = BaseHTTPServer.HTTPServer((args.host, args.port), TLHandler) print("serving at",args.host,"port",args.port,"until Ctrl-C") try: httpd.serve_forever() except KeyboardInterrupt: httpd.socket.close()
9,872
Python
.py
296
26.939189
110
0.573286
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,914
frequency.py
andikleen_pmu-tools/frequency.py
nominal_freq = 1.0 class Frequency: name = "Frequency" desc = " Frequency ratio" subplot = "Frequency" domain = "CoreMetric" def compute(self, EV): try: self.val = (EV("cycles", 1) / EV("CPU_CLK_UNHALTED.REF_TSC", 1)) * nominal_freq except ZeroDivisionError: self.val = 0 class SetupCPU: def __init__(self, r, cpu): global nominal_freq if cpu.freq: nominal_freq = cpu.freq r.force_metric(Frequency())
504
Python
.py
17
22.588235
91
0.584711
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,915
gen-dot.py
andikleen_pmu-tools/gen-dot.py
#!/usr/bin/env python3 # generate dot diagram of top down tree from module from __future__ import print_function import sys max_level = 5 first = 1 if sys.argv[1:] and sys.argv[1][:2] == "-l": max_level = int(sys.argv[1][2:]) first += 1 if len(sys.argv) > first and sys.argv[first] == "simple": import simple_ratios m = simple_ratios else: import adl_glc_ratios m = adl_glc_ratios def has(obj, name): return name in obj.__class__.__dict__ class Runner: def __init__(self): self.olist = [] def run(self, n): if n.level <= max_level: self.olist.append(n) def metric(self, n): pass def finish(self): for n in self.olist: if n.level == 1: print('"%s";' % (n.name)) elif n.parent: print('"%s" -> "%s";' % (n.parent.name, n.name)) #if n.sibling: # print('"%s" -> "%s";' % (n.name, n.sibling.name)) runner = Runner() m.Setup(runner) print(runner.olist, file=sys.stderr) print("digraph {") print("fontname=\"Courier\";") runner.finish() print("}")
1,118
Python
.py
40
22.575
66
0.569561
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,916
clx_server_ratios.py
andikleen_pmu-tools/clx_server_ratios.py
# -*- coding: latin-1 -*- # # auto generated TopDown/TMA 4.8-full-perf description for Intel Xeon Scalable Processors 2nd gen (code named Cascade Lake) # Please see http://ark.intel.com for more details on these CPUs. # # References: # http://bit.ly/tma-ispass14 # http://halobates.de/blog/p/262 # https://sites.google.com/site/analysismethods/yasin-pubs # https://download.01.org/perfmon/ # https://github.com/andikleen/pmu-tools/wiki/toplev-manual # # Helpers print_error = lambda msg: False smt_enabled = False ebs_mode = False version = "4.8-full-perf" base_frequency = -1.0 Memory = 1 Average_Frequency = 0.0 num_cores = 1 num_threads = 1 num_sockets = 1 def handle_error(obj, msg): print_error(msg) obj.errcount += 1 obj.val = 0 obj.thresh = False def handle_error_metric(obj, msg): print_error(msg) obj.errcount += 1 obj.val = 0 # Constants Exe_Ports = 8 Mem_L2_Store_Cost = 11 Mem_STLB_Hit_Cost = 9 BAClear_Cost = 9 MS_Switches_Cost = 2 Avg_Assist_Cost = 34 Pipeline_Width = 4 OneMillion = 1000000 OneBillion = 1000000000 Energy_Unit = 61 Errata_Whitelist = "SKL091" EBS_Mode = 0 Memory = 1 PMM_App_Direct = 1 if Memory == 1 else 0 DS = 1 # Aux. formulas def Backend_Bound_Cycles(self, EV, level): return EV("CYCLE_ACTIVITY.STALLS_TOTAL", level) + Few_Uops_Executed_Threshold(self, EV, level) + EV("EXE_ACTIVITY.BOUND_ON_STORES", level) def Br_DoI_Jumps(self, EV, level): return EV("BR_INST_RETIRED.NEAR_TAKEN", level) - (EV("BR_INST_RETIRED.COND", level) - EV("BR_INST_RETIRED.NOT_TAKEN", level)) - 2 * EV("BR_INST_RETIRED.NEAR_CALL", level) def Branching_Retired(self, EV, level): return (EV("BR_INST_RETIRED.ALL_BRANCHES", level) + 2 * EV("BR_INST_RETIRED.NEAR_CALL", level) + EV("INST_RETIRED.NOP", level)) / SLOTS(self, EV, level) def Serialize_Core(self, EV, level): return self.Core_Bound.compute(EV) * (self.Serializing_Operation.compute(EV) + self.Core_Bound.compute(EV) * EV("RS_EVENTS.EMPTY_CYCLES", level) / CLKS(self, EV, level) * self.Ports_Utilized_0.compute(EV)) / (self.Serializing_Operation.compute(EV) + self.Ports_Utilization.compute(EV) + self.Divider.compute(EV)) def Umisp(self, EV, level): return 10 * self.Microcode_Sequencer.compute(EV) * self.Other_Mispredicts.compute(EV) / self.Branch_Mispredicts.compute(EV) def Assist(self, EV, level): return (self.Microcode_Sequencer.compute(EV) / (self.Microcode_Sequencer.compute(EV) + self.Few_Uops_Instructions.compute(EV))) * (self.Assists.compute(EV) / self.Microcode_Sequencer.compute(EV)) def Assist_Frontend(self, EV, level): return Assist(self, EV, level) * self.Fetch_Latency.compute(EV) * (self.MS_Switches.compute(EV) + self.Branch_Resteers.compute(EV) * (self.Clears_Resteers.compute(EV) + self.Mispredicts_Resteers.compute(EV) * Umisp(self, EV, level)) / (self.Clears_Resteers.compute(EV) + self.Unknown_Branches.compute(EV) + self.Mispredicts_Resteers.compute(EV))) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV)) def Assist_Retired(self, EV, level): return Assist(self, EV, level) * self.Heavy_Operations.compute(EV) def Core_Bound_Cycles(self, EV, level): return self.Ports_Utilized_0.compute(EV) * CLKS(self, EV, level) + Few_Uops_Executed_Threshold(self, EV, level) def Cycles_1_Port_Utilized(self, EV, level): return (EV("UOPS_EXECUTED.CORE_CYCLES_GE_1", level) - EV("UOPS_EXECUTED.CORE_CYCLES_GE_2", level)) / 2 if smt_enabled else EV("EXE_ACTIVITY.1_PORTS_UTIL", level) def Cycles_2_Ports_Utilized(self, EV, level): return (EV("UOPS_EXECUTED.CORE_CYCLES_GE_2", level) - EV("UOPS_EXECUTED.CORE_CYCLES_GE_3", level)) / 2 if smt_enabled else EV("EXE_ACTIVITY.2_PORTS_UTIL", level) def Cycles_3m_Ports_Utilized(self, EV, level): return EV("UOPS_EXECUTED.CORE_CYCLES_GE_3", level) / 2 if smt_enabled else EV("UOPS_EXECUTED.CORE_CYCLES_GE_3", level) def DurationTimeInSeconds(self, EV, level): return EV("interval-ms", 0) / 1000 def Execute_Cycles(self, EV, level): return (EV("UOPS_EXECUTED.CORE_CYCLES_GE_1", level) / 2) if smt_enabled else EV("UOPS_EXECUTED.THREAD:c1", level) # factor used for metrics associating fixed costs for FB Hits - according to probability theory if all FB Hits come at a random rate in original L1_Miss cost interval then the average cost for each one is 0.5 of the fixed cost def FB_Factor(self, EV, level): return 1 + FBHit_per_L1Miss(self, EV, level) / 2 def FBHit_per_L1Miss(self, EV, level): return EV("MEM_LOAD_RETIRED.FB_HIT", level) / EV("MEM_LOAD_RETIRED.L1_MISS", level) def Fetched_Uops(self, EV, level): return EV("IDQ.DSB_UOPS", level) + EV("IDQ.MITE_UOPS", level) + EV("IDQ.MS_UOPS", level) def Few_Uops_Executed_Threshold(self, EV, level): return EV("EXE_ACTIVITY.1_PORTS_UTIL", level) + self.Retiring.compute(EV) * EV("EXE_ACTIVITY.2_PORTS_UTIL", level) # Floating Point computational (arithmetic) Operations Count def FLOP_Count(self, EV, level): return EV("FP_ARITH_INST_RETIRED.SCALAR", level) + 2 * EV("FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", level) + 4 * EV("FP_ARITH_INST_RETIRED.4_FLOPS", level) + 8 * EV("FP_ARITH_INST_RETIRED.8_FLOPS", level) + 16 * EV("FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE", level) # Floating Point computational (arithmetic) Operations Count def FP_Arith_Scalar(self, EV, level): return EV("FP_ARITH_INST_RETIRED.SCALAR", level) # Floating Point computational (arithmetic) Operations Count def FP_Arith_Vector(self, EV, level): return EV("FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE:u0xfc", level) def HighIPC(self, EV, level): val = IPC(self, EV, level) / Pipeline_Width return val def L2_Bound_Ratio(self, EV, level): return (EV("CYCLE_ACTIVITY.STALLS_L1D_MISS", level) - EV("CYCLE_ACTIVITY.STALLS_L2_MISS", level)) / CLKS(self, EV, level) def Light_Ops_Sum(self, EV, level): return self.FP_Arith.compute(EV) + self.Memory_Operations.compute(EV) + self.Fused_Instructions.compute(EV) + self.Non_Fused_Branches.compute(EV) def LOAD_L2_HIT(self, EV, level): return EV("MEM_LOAD_RETIRED.L2_HIT", level) * (1 + FBHit_per_L1Miss(self, EV, level)) def LOAD_L3_HIT(self, EV, level): return EV("MEM_LOAD_RETIRED.L3_HIT", level) * FB_Factor(self, EV, level) def LOAD_LCL_MEM(self, EV, level): return EV("MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM", level) * (1 + FBHit_per_L1Miss(self, EV, level)) def LOAD_LCL_PMM(self, EV, level): EV("MEM_LOAD_RETIRED.LOCAL_PMM", level) return EV("MEM_LOAD_RETIRED.LOCAL_PMM", level) * (1 + FBHit_per_L1Miss(self, EV, level)) if DS else 0 def LOAD_RMT_FWD(self, EV, level): EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD", level) return EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD", level) * (1 + FBHit_per_L1Miss(self, EV, level)) if DS else 0 def LOAD_RMT_HITM(self, EV, level): EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM", level) return EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM", level) * (1 + FBHit_per_L1Miss(self, EV, level)) if DS else 0 def LOAD_RMT_MEM(self, EV, level): EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM", level) return EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM", level) * (1 + FBHit_per_L1Miss(self, EV, level)) if DS else 0 def LOAD_RMT_PMM(self, EV, level): EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM", level) return EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM", level) * (1 + FBHit_per_L1Miss(self, EV, level)) if DS else 0 def LOAD_XSNP_HIT(self, EV, level): return EV("MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT", level) def LOAD_XSNP_HITM(self, EV, level): return EV("MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM", level) def LOAD_XSNP_MISS(self, EV, level): return EV("MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS", level) def MEM_Bound_Ratio(self, EV, level): return EV("CYCLE_ACTIVITY.STALLS_L3_MISS", level) / CLKS(self, EV, level) + L2_Bound_Ratio(self, EV, level) - self.L2_Bound.compute(EV) def Mem_DDR_Hit_Fraction(self, EV, level): return (19 * LOAD_RMT_MEM(self, EV, level) + 10 *(LOAD_LCL_MEM(self, EV, level) + LOAD_RMT_FWD(self, EV, level) + LOAD_RMT_HITM(self, EV, level))) / ((19 * LOAD_RMT_MEM(self, EV, level) + 10 *(LOAD_LCL_MEM(self, EV, level) + LOAD_RMT_FWD(self, EV, level) + LOAD_RMT_HITM(self, EV, level))) + (25 * LOAD_LCL_PMM(self, EV, level) + 33 * LOAD_RMT_PMM(self, EV, level))) if DS else 1 def Mem_Lock_St_Fraction(self, EV, level): return EV("MEM_INST_RETIRED.LOCK_LOADS", level) / EV("MEM_INST_RETIRED.ALL_STORES", level) def Memory_Bound_Fraction(self, EV, level): return (EV("CYCLE_ACTIVITY.STALLS_MEM_ANY", level) + EV("EXE_ACTIVITY.BOUND_ON_STORES", level)) / Backend_Bound_Cycles(self, EV, level) def Mispred_Clears_Fraction(self, EV, level): return EV("BR_MISP_RETIRED.ALL_BRANCHES", level) / (EV("BR_MISP_RETIRED.ALL_BRANCHES", level) + EV("MACHINE_CLEARS.COUNT", level)) def OCR_all_rfo_l3_hit_snoop_hitm(self, EV, level): return EV("OCR.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE", level) + EV("OCR.PF_L2_RFO.L3_HIT.HITM_OTHER_CORE", level) def ORO_Demand_RFO_C1(self, EV, level): return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO", level)) , level ) def ORO_DRD_Any_Cycles(self, EV, level): return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", level)) , level ) def ORO_DRD_BW_Cycles(self, EV, level): return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD:c4", level)) , level ) def SQ_Full_Cycles(self, EV, level): return (EV("OFFCORE_REQUESTS_BUFFER.SQ_FULL", level) / 2) if smt_enabled else EV("OFFCORE_REQUESTS_BUFFER.SQ_FULL", level) def Store_L2_Hit_Cycles(self, EV, level): return EV("L2_RQSTS.RFO_HIT", level) * Mem_L2_Store_Cost *(1 - Mem_Lock_St_Fraction(self, EV, level)) def Mem_XSNP_HitM_Cost(self, EV, level): return 22 * Core_Frequency(self, EV, level) def Mem_XSNP_Hit_Cost(self, EV, level): return 20 * Core_Frequency(self, EV, level) def Mem_XSNP_None_Cost(self, EV, level): return 10 * Core_Frequency(self, EV, level) def Mem_Local_DRAM_Cost(self, EV, level): return 80 * Core_Frequency(self, EV, level) def Mem_Remote_DRAM_Cost(self, EV, level): return 147.5 * Core_Frequency(self, EV, level) def Mem_Remote_HitM_Cost(self, EV, level): return 110 * Core_Frequency(self, EV, level) def Mem_Remote_Fwd_Cost(self, EV, level): return 110 * Core_Frequency(self, EV, level) def Mem_L2_Hit_Cost(self, EV, level): return 3.5 * Core_Frequency(self, EV, level) def Recovery_Cycles(self, EV, level): return (EV("INT_MISC.RECOVERY_CYCLES_ANY", level) / 2) if smt_enabled else EV("INT_MISC.RECOVERY_CYCLES", level) def Retire_Fraction(self, EV, level): return Retired_Slots(self, EV, level) / EV("UOPS_ISSUED.ANY", level) # Retired slots per Logical Processor def Retired_Slots(self, EV, level): return EV("UOPS_RETIRED.RETIRE_SLOTS", level) # Number of logical processors (enabled or online) on the target system def Num_CPUs(self, EV, level): return 112 if smt_enabled else 56 # A system parameter for dependent-loads (pointer chasing like access pattern) of the workload. An integer fraction in range from 0 (no dependent loads) to 100 (all loads are dependent loads) def Dependent_Loads_Weight(self, EV, level): return 20 # Total pipeline cost of Branch Misprediction related bottlenecks def Mispredictions(self, EV, level): val = 100 *(1 - Umisp(self, EV, level)) * (self.Branch_Mispredicts.compute(EV) + self.Fetch_Latency.compute(EV) * self.Mispredicts_Resteers.compute(EV) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV))) self.thresh = (val > 20) return val # Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses) def Big_Code(self, EV, level): val = 100 * self.Fetch_Latency.compute(EV) * (self.ITLB_Misses.compute(EV) + self.ICache_Misses.compute(EV) + self.Unknown_Branches.compute(EV)) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV)) self.thresh = (val > 20) return val # Total pipeline cost of instruction fetch bandwidth related bottlenecks (when the front-end could not sustain operations delivery to the back-end) def Instruction_Fetch_BW(self, EV, level): val = 100 *(self.Frontend_Bound.compute(EV) - (1 - Umisp(self, EV, level)) * self.Fetch_Latency.compute(EV) * self.Mispredicts_Resteers.compute(EV) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV)) - Assist_Frontend(self, EV, level)) - Big_Code(self, EV, level) self.thresh = (val > 20) return val # Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks def Cache_Memory_Bandwidth(self, EV, level): val = 100 *((self.Memory_Bound.compute(EV) * (self.DRAM_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV))) * (self.MEM_Bandwidth.compute(EV) / (self.MEM_Latency.compute(EV) + self.MEM_Bandwidth.compute(EV)))) + (self.Memory_Bound.compute(EV) * (self.L3_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV))) * (self.SQ_Full.compute(EV) / (self.L3_Hit_Latency.compute(EV) + self.Contested_Accesses.compute(EV) + self.SQ_Full.compute(EV) + self.Data_Sharing.compute(EV)))) + (self.Memory_Bound.compute(EV) * (self.L1_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV))) * (self.FB_Full.compute(EV) / (self.Store_Fwd_Blk.compute(EV) + self.L1_Hit_Latency.compute(EV) + self.DTLB_Load.compute(EV) + self.G4K_Aliasing.compute(EV) + self.Lock_Latency.compute(EV) + self.Split_Loads.compute(EV) + self.FB_Full.compute(EV))))) self.thresh = (val > 20) return val # Total pipeline cost of external Memory- or Cache-Latency related bottlenecks def Cache_Memory_Latency(self, EV, level): val = 100 *((self.Memory_Bound.compute(EV) * (self.DRAM_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV))) * (self.MEM_Latency.compute(EV) / (self.MEM_Latency.compute(EV) + self.MEM_Bandwidth.compute(EV)))) + (self.Memory_Bound.compute(EV) * (self.L3_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV))) * (self.L3_Hit_Latency.compute(EV) / (self.L3_Hit_Latency.compute(EV) + self.Contested_Accesses.compute(EV) + self.SQ_Full.compute(EV) + self.Data_Sharing.compute(EV)))) + (self.Memory_Bound.compute(EV) * self.L2_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV))) + (self.Memory_Bound.compute(EV) * (self.Store_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV))) * (self.Store_Latency.compute(EV) / (self.Split_Stores.compute(EV) + self.DTLB_Store.compute(EV) + self.Store_Latency.compute(EV) + self.False_Sharing.compute(EV)))) + (self.Memory_Bound.compute(EV) * (self.L1_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV))) * (self.L1_Hit_Latency.compute(EV) / (self.Store_Fwd_Blk.compute(EV) + self.L1_Hit_Latency.compute(EV) + self.DTLB_Load.compute(EV) + self.G4K_Aliasing.compute(EV) + self.Lock_Latency.compute(EV) + self.Split_Loads.compute(EV) + self.FB_Full.compute(EV))))) self.thresh = (val > 20) return val # Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs) def Memory_Data_TLBs(self, EV, level): val = 100 *(self.Memory_Bound.compute(EV) * (self.L1_Bound.compute(EV) / max(self.Memory_Bound.compute(EV) , (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV)))) * (self.DTLB_Load.compute(EV) / max(self.L1_Bound.compute(EV) , (self.Store_Fwd_Blk.compute(EV) + self.L1_Hit_Latency.compute(EV) + self.DTLB_Load.compute(EV) + self.G4K_Aliasing.compute(EV) + self.Lock_Latency.compute(EV) + self.Split_Loads.compute(EV) + self.FB_Full.compute(EV)))) + (self.Memory_Bound.compute(EV) * (self.Store_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV))) * (self.DTLB_Store.compute(EV) / (self.Split_Stores.compute(EV) + self.DTLB_Store.compute(EV) + self.Store_Latency.compute(EV) + self.False_Sharing.compute(EV))))) self.thresh = (val > 20) return val # Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors) def Memory_Synchronization(self, EV, level): val = 100 *(self.Memory_Bound.compute(EV) * ((self.L3_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV))) * (self.Contested_Accesses.compute(EV) + self.Data_Sharing.compute(EV)) / (self.L3_Hit_Latency.compute(EV) + self.Contested_Accesses.compute(EV) + self.SQ_Full.compute(EV) + self.Data_Sharing.compute(EV)) + (self.Store_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV))) * self.False_Sharing.compute(EV) / ((self.Split_Stores.compute(EV) + self.DTLB_Store.compute(EV) + self.Store_Latency.compute(EV) + self.False_Sharing.compute(EV)) - self.Store_Latency.compute(EV))) + self.Machine_Clears.compute(EV) * (1 - self.Other_Nukes.compute(EV) / (self.Other_Nukes.compute(EV)))) self.thresh = (val > 10) return val # Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy. def Compute_Bound_Est(self, EV, level): val = 100 *((self.Core_Bound.compute(EV) * self.Divider.compute(EV) / (self.Serializing_Operation.compute(EV) + self.Ports_Utilization.compute(EV) + self.Divider.compute(EV))) + (self.Core_Bound.compute(EV) * (self.Ports_Utilization.compute(EV) / (self.Serializing_Operation.compute(EV) + self.Ports_Utilization.compute(EV) + self.Divider.compute(EV))) * (self.Ports_Utilized_3m.compute(EV) / (self.Ports_Utilized_0.compute(EV) + self.Ports_Utilized_1.compute(EV) + self.Ports_Utilized_2.compute(EV) + self.Ports_Utilized_3m.compute(EV))))) self.thresh = (val > 20) return val # Total pipeline cost of irregular execution (e.g. FP-assists in HPC, Wait time with work imbalance multithreaded workloads, overhead in system services or virtualized environments) def Irregular_Overhead(self, EV, level): val = 100 *(Assist_Frontend(self, EV, level) + Umisp(self, EV, level) * self.Branch_Mispredicts.compute(EV) + (self.Machine_Clears.compute(EV) * self.Other_Nukes.compute(EV) / (self.Other_Nukes.compute(EV))) + Serialize_Core(self, EV, level) + Assist_Retired(self, EV, level)) self.thresh = (val > 10) return val # Total pipeline cost of remaining bottlenecks in the back-end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls. def Other_Bottlenecks(self, EV, level): val = 100 -(Big_Code(self, EV, level) + Instruction_Fetch_BW(self, EV, level) + Mispredictions(self, EV, level) + Cache_Memory_Bandwidth(self, EV, level) + Cache_Memory_Latency(self, EV, level) + Memory_Data_TLBs(self, EV, level) + Memory_Synchronization(self, EV, level) + Compute_Bound_Est(self, EV, level) + Irregular_Overhead(self, EV, level) + Branching_Overhead(self, EV, level) + Useful_Work(self, EV, level)) self.thresh = (val > 20) return val # Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA. Examples include function calls; loops and alignments. (A lower bound). Consider Loop Unrolling or function inlining optimizations def Branching_Overhead(self, EV, level): val = 100 * Branching_Retired(self, EV, level) self.thresh = (val > 5) return val # Total pipeline cost of "useful operations" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead. def Useful_Work(self, EV, level): val = 100 *(self.Retiring.compute(EV) - Branching_Retired(self, EV, level) - Assist_Retired(self, EV, level)) self.thresh = (val > 20) return val # Probability of Core Bound bottleneck hidden by SMT-profiling artifacts. Tip: consider analysis with SMT disabled def Core_Bound_Likely(self, EV, level): val = 100 *(1 - self.Core_Bound.compute(EV) / self.Ports_Utilization.compute(EV) if self.Core_Bound.compute(EV)< self.Ports_Utilization.compute(EV) else 1) if SMT_2T_Utilization(self, EV, level)> 0.5 else 0 self.thresh = (val > 0.5) return val # Instructions Per Cycle (per Logical Processor) def IPC(self, EV, level): return EV("INST_RETIRED.ANY", level) / CLKS(self, EV, level) # Uops Per Instruction def UopPI(self, EV, level): val = Retired_Slots(self, EV, level) / EV("INST_RETIRED.ANY", level) self.thresh = (val > 1.05) return val # Uops per taken branch def UpTB(self, EV, level): val = Retired_Slots(self, EV, level) / EV("BR_INST_RETIRED.NEAR_TAKEN", level) self.thresh = val < Pipeline_Width * 1.5 return val # Cycles Per Instruction (per Logical Processor) def CPI(self, EV, level): return 1 / IPC(self, EV, level) # Per-Logical Processor actual clocks when the Logical Processor is active. def CLKS(self, EV, level): return EV("CPU_CLK_UNHALTED.THREAD", level) # Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward) def SLOTS(self, EV, level): return Pipeline_Width * CORE_CLKS(self, EV, level) # The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of "execute" at rename stage. def Execute_per_Issue(self, EV, level): return EV("UOPS_EXECUTED.THREAD", level) / EV("UOPS_ISSUED.ANY", level) # Instructions Per Cycle across hyper-threads (per physical core) def CoreIPC(self, EV, level): return EV("INST_RETIRED.ANY", level) / CORE_CLKS(self, EV, level) # Floating Point Operations Per Cycle def FLOPc(self, EV, level): return FLOP_Count(self, EV, level) / CORE_CLKS(self, EV, level) # Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to Fused-Multiply Add FMA counting - common. def FP_Arith_Utilization(self, EV, level): return (FP_Arith_Scalar(self, EV, level) + FP_Arith_Vector(self, EV, level)) / (2 * CORE_CLKS(self, EV, level)) # Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor) def ILP(self, EV, level): return EV("UOPS_EXECUTED.THREAD", level) / EV("UOPS_EXECUTED.THREAD:c1", level) # uops Executed per Cycle def EPC(self, EV, level): return EV("UOPS_EXECUTED.THREAD", level) / CLKS(self, EV, level) # Core actual clocks when any Logical Processor is active on the Physical Core def CORE_CLKS(self, EV, level): return ((EV("CPU_CLK_UNHALTED.THREAD", level) / 2) * (1 + EV("CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE", level) / EV("CPU_CLK_UNHALTED.REF_XCLK", level))) if ebs_mode else(EV("CPU_CLK_UNHALTED.THREAD_ANY", level) / 2) if smt_enabled else CLKS(self, EV, level) # Instructions per Load (lower number means higher occurrence rate). Tip: reduce memory accesses. #Link Opt Guide section: Minimize Register Spills def IpLoad(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("MEM_INST_RETIRED.ALL_LOADS", level) self.thresh = (val < 3) return val # Instructions per Store (lower number means higher occurrence rate). Tip: reduce memory accesses. #Link Opt Guide section: Minimize Register Spills def IpStore(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("MEM_INST_RETIRED.ALL_STORES", level) self.thresh = (val < 8) return val # Instructions per Branch (lower number means higher occurrence rate) def IpBranch(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.ALL_BRANCHES", level) self.thresh = (val < 8) return val # Instructions per (near) call (lower number means higher occurrence rate) def IpCall(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.NEAR_CALL", level) self.thresh = (val < 200) return val # Instructions per taken branch def IpTB(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.NEAR_TAKEN", level) self.thresh = val < Pipeline_Width * 2 + 1 return val # Branch instructions per taken branch. . Can be used to approximate PGO-likelihood for non-loopy codes. def BpTkBranch(self, EV, level): return EV("BR_INST_RETIRED.ALL_BRANCHES", level) / EV("BR_INST_RETIRED.NEAR_TAKEN", level) # Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate). Reference: Tuning Performance via Metrics with Expectations. https://doi.org/10.1109/LCA.2019.2916408 def IpFLOP(self, EV, level): val = EV("INST_RETIRED.ANY", level) / FLOP_Count(self, EV, level) self.thresh = (val < 10) return val # Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW. def IpArith(self, EV, level): val = EV("INST_RETIRED.ANY", level) / (FP_Arith_Scalar(self, EV, level) + FP_Arith_Vector(self, EV, level)) self.thresh = (val < 10) return val # Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. def IpArith_Scalar_SP(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("FP_ARITH_INST_RETIRED.SCALAR_SINGLE", level) self.thresh = (val < 10) return val # Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. def IpArith_Scalar_DP(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("FP_ARITH_INST_RETIRED.SCALAR_DOUBLE", level) self.thresh = (val < 10) return val # Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. def IpArith_AVX128(self, EV, level): val = EV("INST_RETIRED.ANY", level) / (EV("FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", level) + EV("FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE", level)) self.thresh = (val < 10) return val # Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. def IpArith_AVX256(self, EV, level): val = EV("INST_RETIRED.ANY", level) / (EV("FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE", level) + EV("FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE", level)) self.thresh = (val < 10) return val # Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. def IpArith_AVX512(self, EV, level): val = EV("INST_RETIRED.ANY", level) / (EV("FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE", level) + EV("FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE", level)) self.thresh = (val < 10) return val # Instructions per PAUSE (lower number means higher occurrence rate) def IpPause(self, EV, level): return Instructions(self, EV, level) / EV("ROB_MISC_EVENTS.PAUSE_INST", level) # Instructions per Software prefetch instruction (of any type: NTA/T0/T1/T2/Prefetch) (lower number means higher occurrence rate) def IpSWPF(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("SW_PREFETCH_ACCESS.T0:u0xF", level) self.thresh = (val < 100) return val # Total number of retired Instructions def Instructions(self, EV, level): return EV("INST_RETIRED.ANY", level) # Average number of Uops retired in cycles where at least one uop has retired. def Retire(self, EV, level): return Retired_Slots(self, EV, level) / EV("UOPS_RETIRED.RETIRE_SLOTS:c1", level) # Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate) def IpAssist(self, EV, level): val = EV("INST_RETIRED.ANY", level) / (EV("FP_ASSIST.ANY", level) + EV("OTHER_ASSISTS.ANY", level)) self.thresh = (val < 100000) return val def Execute(self, EV, level): return EV("UOPS_EXECUTED.THREAD", level) / Execute_Cycles(self, EV, level) # Average number of uops fetched from DSB per cycle def Fetch_DSB(self, EV, level): return EV("IDQ.DSB_UOPS", level) / EV("IDQ.DSB_CYCLES_ANY", level) # Average number of uops fetched from MITE per cycle def Fetch_MITE(self, EV, level): return EV("IDQ.MITE_UOPS", level) / EV("IDQ.MITE_CYCLES", level) # Average number of Uops issued by front-end when it issued something def Fetch_UpC(self, EV, level): return EV("UOPS_ISSUED.ANY", level) / EV("UOPS_ISSUED.ANY:c1", level) # Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). See section 'Decoded ICache' in Optimization Manual. http://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-optimization-manual.html def DSB_Coverage(self, EV, level): val = EV("IDQ.DSB_UOPS", level) / Fetched_Uops(self, EV, level) self.thresh = (val < 0.7) and HighIPC(self, EV, 1) return val # Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details. def DSB_Switch_Cost(self, EV, level): return EV("DSB2MITE_SWITCHES.PENALTY_CYCLES", level) / EV("DSB2MITE_SWITCHES.COUNT", level) # Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck. def DSB_Misses(self, EV, level): val = 100 *(self.Fetch_Latency.compute(EV) * self.DSB_Switches.compute(EV) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV)) + self.Fetch_Bandwidth.compute(EV) * self.MITE.compute(EV) / (self.MITE.compute(EV) + self.DSB.compute(EV))) self.thresh = (val > 10) return val # Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck. def DSB_Bandwidth(self, EV, level): val = 100 *(self.Frontend_Bound.compute(EV) * (self.Fetch_Bandwidth.compute(EV) / (self.Fetch_Bandwidth.compute(EV) + self.Fetch_Latency.compute(EV))) * (self.DSB.compute(EV) / (self.MITE.compute(EV) + self.DSB.compute(EV)))) self.thresh = (val > 10) return val # Average Latency for L1 instruction cache misses def ICache_Miss_Latency(self, EV, level): return EV("ICACHE_16B.IFDATA_STALL", level) / EV("ICACHE_16B.IFDATA_STALL:c1:e1", level) + 2 # Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck. def IC_Misses(self, EV, level): val = 100 *(self.Fetch_Latency.compute(EV) * self.ICache_Misses.compute(EV) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV))) self.thresh = (val > 5) return val # Instructions per non-speculative DSB miss (lower number means higher occurrence rate) def IpDSB_Miss_Ret(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("FRONTEND_RETIRED.ANY_DSB_MISS", level) self.thresh = (val < 50) return val # Instructions per speculative Unknown Branch Misprediction (BAClear) (lower number means higher occurrence rate) def IpUnknown_Branch(self, EV, level): return Instructions(self, EV, level) / EV("BACLEARS.ANY", level) # L2 cache true code cacheline misses per kilo instruction def L2MPKI_Code(self, EV, level): return 1000 * EV("FRONTEND_RETIRED.L2_MISS", level) / EV("INST_RETIRED.ANY", level) # L2 cache speculative code cacheline misses per kilo instruction def L2MPKI_Code_All(self, EV, level): return 1000 * EV("L2_RQSTS.CODE_RD_MISS", level) / EV("INST_RETIRED.ANY", level) # Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate) def IpMispredict(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_MISP_RETIRED.ALL_BRANCHES", level) self.thresh = (val < 200) return val # Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate). def IpMisp_Indirect(self, EV, level): val = Instructions(self, EV, level) / (Retire_Fraction(self, EV, level) * EV("BR_MISP_EXEC.INDIRECT", level)) self.thresh = (val < 1000) return val # Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear) def Branch_Misprediction_Cost(self, EV, level): return Mispredictions(self, EV, level) * SLOTS(self, EV, level) / EV("BR_MISP_RETIRED.ALL_BRANCHES", level) / 100 # Speculative to Retired ratio of all clears (covering Mispredicts and nukes) def Spec_Clears_Ratio(self, EV, level): return EV("INT_MISC.CLEARS_COUNT", level) / (EV("BR_MISP_RETIRED.ALL_BRANCHES", level) + EV("MACHINE_CLEARS.COUNT", level)) # Fraction of branches that are non-taken conditionals def Cond_NT(self, EV, level): return EV("BR_INST_RETIRED.NOT_TAKEN", level) / EV("BR_INST_RETIRED.ALL_BRANCHES", level) # Fraction of branches that are taken conditionals def Cond_TK(self, EV, level): return (EV("BR_INST_RETIRED.CONDITIONAL", level) - EV("BR_INST_RETIRED.NOT_TAKEN", level)) / EV("BR_INST_RETIRED.ALL_BRANCHES", level) # Fraction of branches that are CALL or RET def CallRet(self, EV, level): return (EV("BR_INST_RETIRED.NEAR_CALL", level) + EV("BR_INST_RETIRED.NEAR_RETURN", level)) / EV("BR_INST_RETIRED.ALL_BRANCHES", level) # Fraction of branches that are unconditional (direct or indirect) jumps def Jump(self, EV, level): return Br_DoI_Jumps(self, EV, level) / EV("BR_INST_RETIRED.ALL_BRANCHES", level) # Actual Average Latency for L1 data-cache miss demand load operations (in core cycles) def Load_Miss_Real_Latency(self, EV, level): return EV("L1D_PEND_MISS.PENDING", level) / (EV("MEM_LOAD_RETIRED.L1_MISS", level) + EV("MEM_LOAD_RETIRED.FB_HIT", level)) # Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor) def MLP(self, EV, level): return EV("L1D_PEND_MISS.PENDING", level) / EV("L1D_PEND_MISS.PENDING_CYCLES", level) # L1 cache true misses per kilo instruction for retired demand loads def L1MPKI(self, EV, level): return 1000 * EV("MEM_LOAD_RETIRED.L1_MISS", level) / EV("INST_RETIRED.ANY", level) # L1 cache true misses per kilo instruction for all demand loads (including speculative) def L1MPKI_Load(self, EV, level): return 1000 * EV("L2_RQSTS.ALL_DEMAND_DATA_RD", level) / EV("INST_RETIRED.ANY", level) # L2 cache true misses per kilo instruction for retired demand loads def L2MPKI(self, EV, level): return 1000 * EV("MEM_LOAD_RETIRED.L2_MISS", level) / EV("INST_RETIRED.ANY", level) # L2 cache misses per kilo instruction for all request types (including speculative) def L2MPKI_All(self, EV, level): return 1000 * EV("L2_RQSTS.MISS", level) / EV("INST_RETIRED.ANY", level) # L2 cache misses per kilo instruction for all demand loads (including speculative) def L2MPKI_Load(self, EV, level): return 1000 * EV("L2_RQSTS.DEMAND_DATA_RD_MISS", level) / EV("INST_RETIRED.ANY", level) # Offcore requests (L2 cache miss) per kilo instruction for demand RFOs def L2MPKI_RFO(self, EV, level): return 1000 * EV("OFFCORE_REQUESTS.DEMAND_RFO", level) / EV("INST_RETIRED.ANY", level) # L2 cache hits per kilo instruction for all request types (including speculative) def L2HPKI_All(self, EV, level): return 1000 *(EV("L2_RQSTS.REFERENCES", level) - EV("L2_RQSTS.MISS", level)) / EV("INST_RETIRED.ANY", level) # L2 cache hits per kilo instruction for all demand loads (including speculative) def L2HPKI_Load(self, EV, level): return 1000 * EV("L2_RQSTS.DEMAND_DATA_RD_HIT", level) / EV("INST_RETIRED.ANY", level) # L3 cache true misses per kilo instruction for retired demand loads def L3MPKI(self, EV, level): return 1000 * EV("MEM_LOAD_RETIRED.L3_MISS", level) / EV("INST_RETIRED.ANY", level) # Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries) def FB_HPKI(self, EV, level): return 1000 * EV("MEM_LOAD_RETIRED.FB_HIT", level) / EV("INST_RETIRED.ANY", level) def L1D_Cache_Fill_BW(self, EV, level): return 64 * EV("L1D.REPLACEMENT", level) / OneBillion / Time(self, EV, level) def L2_Cache_Fill_BW(self, EV, level): return 64 * EV("L2_LINES_IN.ALL", level) / OneBillion / Time(self, EV, level) def L3_Cache_Fill_BW(self, EV, level): return 64 * EV("LONGEST_LAT_CACHE.MISS", level) / OneBillion / Time(self, EV, level) def L3_Cache_Access_BW(self, EV, level): return 64 * EV("OFFCORE_REQUESTS.ALL_REQUESTS", level) / OneBillion / Time(self, EV, level) # Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses def Page_Walks_Utilization(self, EV, level): val = (EV("ITLB_MISSES.WALK_PENDING", level) + EV("DTLB_LOAD_MISSES.WALK_PENDING", level) + EV("DTLB_STORE_MISSES.WALK_PENDING", level) + EV("EPT.WALK_PENDING", level)) / (2 * CORE_CLKS(self, EV, level)) self.thresh = (val > 0.5) return val # STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk) def Code_STLB_MPKI(self, EV, level): return 1000 * EV("ITLB_MISSES.WALK_COMPLETED", level) / EV("INST_RETIRED.ANY", level) # STLB (2nd level TLB) data load speculative misses per kilo instruction (misses of any page-size that complete the page walk) def Load_STLB_MPKI(self, EV, level): return 1000 * EV("DTLB_LOAD_MISSES.WALK_COMPLETED", level) / EV("INST_RETIRED.ANY", level) # STLB (2nd level TLB) data store speculative misses per kilo instruction (misses of any page-size that complete the page walk) def Store_STLB_MPKI(self, EV, level): return 1000 * EV("DTLB_STORE_MISSES.WALK_COMPLETED", level) / EV("INST_RETIRED.ANY", level) # Average per-core data fill bandwidth to the L1 data cache [GB / sec] def L1D_Cache_Fill_BW_2T(self, EV, level): return L1D_Cache_Fill_BW(self, EV, level) # Average per-core data fill bandwidth to the L2 cache [GB / sec] def L2_Cache_Fill_BW_2T(self, EV, level): return L2_Cache_Fill_BW(self, EV, level) # Average per-core data fill bandwidth to the L3 cache [GB / sec] def L3_Cache_Fill_BW_2T(self, EV, level): return L3_Cache_Fill_BW(self, EV, level) # Average per-core data access bandwidth to the L3 cache [GB / sec] def L3_Cache_Access_BW_2T(self, EV, level): return L3_Cache_Access_BW(self, EV, level) # Rate of silent evictions from the L2 cache per Kilo instruction where the evicted lines are dropped (no writeback to L3 or memory) def L2_Evictions_Silent_PKI(self, EV, level): return 1000 * EV("L2_LINES_OUT.SILENT", level) / Instructions(self, EV, level) # Rate of non silent evictions from the L2 cache per Kilo instruction def L2_Evictions_NonSilent_PKI(self, EV, level): return 1000 * EV("L2_LINES_OUT.NON_SILENT", level) / Instructions(self, EV, level) # Average Latency for L2 cache miss demand Loads def Load_L2_Miss_Latency(self, EV, level): return EV("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD", level) / EV("OFFCORE_REQUESTS.DEMAND_DATA_RD", level) # Average Parallel L2 cache miss demand Loads def Load_L2_MLP(self, EV, level): return EV("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD", level) / EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD", level) # Average Parallel L2 cache miss data reads def Data_L2_MLP(self, EV, level): return EV("OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD", level) / EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", level) # Un-cacheable retired load per kilo instruction def UC_Load_PKI(self, EV, level): return 1000 * EV("MEM_LOAD_MISC_RETIRED.UC", level) / EV("INST_RETIRED.ANY", level) # Average CPU Utilization (percentage) def CPU_Utilization(self, EV, level): return CPUs_Utilized(self, EV, level) / Num_CPUs(self, EV, level) # Average number of utilized CPUs def CPUs_Utilized(self, EV, level): return EV("CPU_CLK_UNHALTED.REF_TSC", level) / EV("msr/tsc/", 0) # Measured Average Core Frequency for unhalted processors [GHz] def Core_Frequency(self, EV, level): return Turbo_Utilization(self, EV, level) * EV("msr/tsc/", 0) / OneBillion / Time(self, EV, level) # Measured Average Uncore Frequency for the SoC [GHz] def Uncore_Frequency(self, EV, level): return Socket_CLKS(self, EV, level) / 1e9 / Time(self, EV, level) # Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width def GFLOPs(self, EV, level): return (FLOP_Count(self, EV, level) / OneBillion) / Time(self, EV, level) # Average Frequency Utilization relative nominal frequency def Turbo_Utilization(self, EV, level): return CLKS(self, EV, level) / EV("CPU_CLK_UNHALTED.REF_TSC", level) # Fraction of Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes. def Power_License0_Utilization(self, EV, level): return EV("CORE_POWER.LVL0_TURBO_LICENSE", level) / 2 / CORE_CLKS(self, EV, level) if smt_enabled else EV("CORE_POWER.LVL0_TURBO_LICENSE", level) / CORE_CLKS(self, EV, level) # Fraction of Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions. def Power_License1_Utilization(self, EV, level): val = EV("CORE_POWER.LVL1_TURBO_LICENSE", level) / 2 / CORE_CLKS(self, EV, level) if smt_enabled else EV("CORE_POWER.LVL1_TURBO_LICENSE", level) / CORE_CLKS(self, EV, level) self.thresh = (val > 0.5) return val # Fraction of Core cycles where the core was running with power-delivery for license level 2 (introduced in SKX). This includes high current AVX 512-bit instructions. def Power_License2_Utilization(self, EV, level): val = EV("CORE_POWER.LVL2_TURBO_LICENSE", level) / 2 / CORE_CLKS(self, EV, level) if smt_enabled else EV("CORE_POWER.LVL2_TURBO_LICENSE", level) / CORE_CLKS(self, EV, level) self.thresh = (val > 0.5) return val # Fraction of cycles where both hardware Logical Processors were active def SMT_2T_Utilization(self, EV, level): return 1 - EV("CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE", level) / (EV("CPU_CLK_UNHALTED.REF_XCLK_ANY", level) / 2) if smt_enabled else 0 # Fraction of cycles spent in the Operating System (OS) Kernel mode def Kernel_Utilization(self, EV, level): val = EV("CPU_CLK_UNHALTED.THREAD_P:SUP", level) / EV("CPU_CLK_UNHALTED.THREAD", level) self.thresh = (val > 0.05) return val # Cycles Per Instruction for the Operating System (OS) Kernel mode def Kernel_CPI(self, EV, level): return EV("CPU_CLK_UNHALTED.THREAD_P:SUP", level) / EV("INST_RETIRED.ANY_P:SUP", level) # Average external Memory Bandwidth Use for reads and writes [GB / sec] def DRAM_BW_Use(self, EV, level): return (64 *(EV("UNC_M_CAS_COUNT.RD", level) + EV("UNC_M_CAS_COUNT.WR", level)) / OneBillion) / Time(self, EV, level) # Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches. def MEM_Read_Latency(self, EV, level): return OneBillion *(EV("UNC_ARB_TRK_OCCUPANCY.DATA_READ", level) / EV("UNC_ARB_TRK_REQUESTS.DATA_READ", level)) / (Socket_CLKS(self, EV, level) / Time(self, EV, level)) # Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches def MEM_Parallel_Reads(self, EV, level): return EV("UNC_ARB_TRK_OCCUPANCY.DATA_READ", level) / EV("UNC_ARB_TRK_OCCUPANCY.DATA_READ:c1", level) # Average latency of data read request to external 3D X-Point memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches def MEM_PMM_Read_Latency(self, EV, level): EV("UNC_M_PMM_RPQ_OCCUPANCY.ALL", level) return (OneBillion *(EV("UNC_M_PMM_RPQ_OCCUPANCY.ALL", level) / EV("UNC_M_PMM_RPQ_INSERTS", level)) / EV("UNC_M_CLOCKTICKS:one_unit", level)) if PMM_App_Direct else 0 # Average latency of data read request to external DRAM memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches def MEM_DRAM_Read_Latency(self, EV, level): return OneBillion *(EV("UNC_M_RPQ_OCCUPANCY", level) / EV("UNC_M_RPQ_INSERTS", level)) / EV("UNC_M_CLOCKTICKS:one_unit", level) # Average 3DXP Memory Bandwidth Use for reads [GB / sec] def PMM_Read_BW(self, EV, level): return ((64 * EV("UNC_M_PMM_RPQ_INSERTS", level) / OneBillion) / Time(self, EV, level)) if PMM_App_Direct else 0 # Average 3DXP Memory Bandwidth Use for Writes [GB / sec] def PMM_Write_BW(self, EV, level): return ((64 * EV("UNC_M_PMM_WPQ_INSERTS", level) / OneBillion) / Time(self, EV, level)) if PMM_App_Direct else 0 # Average IO (network or disk) Bandwidth Use for Reads [GB / sec]. Bandwidth of IO reads that are initiated by end device controllers that are requesting memory from the CPU def IO_Read_BW(self, EV, level): return (EV("UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0", level) + EV("UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1", level) + EV("UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2", level) + EV("UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3", level)) * 4 / OneBillion / Time(self, EV, level) # Average IO (network or disk) Bandwidth Use for Writes [GB / sec]. Bandwidth of IO writes that are initiated by end device controllers that are writing memory to the CPU def IO_Write_BW(self, EV, level): return (EV("UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0", level) + EV("UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1", level) + EV("UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2", level) + EV("UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3", level)) * 4 / OneBillion / Time(self, EV, level) # Run duration time in seconds def Time(self, EV, level): val = EV("interval-s", 0) self.thresh = (val < 1) return val # Socket actual clocks when any core is active on that socket def Socket_CLKS(self, EV, level): return EV("UNC_CHA_CLOCKTICKS:one_unit", level) # Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate] def IpFarBranch(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.FAR_BRANCH:USER", level) self.thresh = (val < 1000000) return val # Event groups class Frontend_Bound: name = "Frontend_Bound" domain = "Slots" area = "FE" level = 1 htoff = False sample = ['FRONTEND_RETIRED.LATENCY_GE_4:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvFB', 'BvIO', 'TmaL1', 'PGO']) maxval = None def compute(self, EV): try: self.val = EV("IDQ_UOPS_NOT_DELIVERED.CORE", 1) / SLOTS(self, EV, 1) self.thresh = (self.val > 0.15) except ZeroDivisionError: handle_error(self, "Frontend_Bound zero division") return self.val desc = """ This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.""" class Fetch_Latency: name = "Fetch_Latency" domain = "Slots" area = "FE" level = 2 htoff = False sample = ['FRONTEND_RETIRED.LATENCY_GE_16:pp', 'FRONTEND_RETIRED.LATENCY_GE_8:pp'] errcount = 0 sibling = None metricgroup = frozenset(['Frontend', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = Pipeline_Width * EV("IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE", 2) / SLOTS(self, EV, 2) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Fetch_Latency zero division") return self.val desc = """ This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction- cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period.""" class ICache_Misses: name = "ICache_Misses" domain = "Clocks" area = "FE" level = 3 htoff = False sample = ['FRONTEND_RETIRED.L2_MISS:pp', 'FRONTEND_RETIRED.L1I_MISS:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BigFootprint', 'BvBC', 'FetchLat', 'IcMiss']) maxval = None def compute(self, EV): try: self.val = (EV("ICACHE_16B.IFDATA_STALL", 3) + 2 * EV("ICACHE_16B.IFDATA_STALL:c1:e1", 3)) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "ICache_Misses zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to instruction cache misses.. Using compiler's Profile-Guided Optimization (PGO) can reduce i-cache misses through improved hot code layout.""" class ITLB_Misses: name = "ITLB_Misses" domain = "Clocks" area = "FE" level = 3 htoff = False sample = ['FRONTEND_RETIRED.STLB_MISS:pp', 'FRONTEND_RETIRED.ITLB_MISS:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BigFootprint', 'BvBC', 'FetchLat', 'MemoryTLB']) maxval = None def compute(self, EV): try: self.val = EV("ICACHE_TAG.STALLS", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "ITLB_Misses zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses.. Consider large 2M pages for code (selectively prefer hot large-size function, due to limited 2M entries). Linux options: standard binaries use libhugetlbfs; Hfsort.. https://github. com/libhugetlbfs/libhugetlbfs;https://research.fb.com/public ations/optimizing-function-placement-for-large-scale-data- center-applications-2/""" class Branch_Resteers: name = "Branch_Resteers" domain = "Clocks" area = "FE" level = 3 htoff = False sample = ['BR_MISP_RETIRED.ALL_BRANCHES'] errcount = 0 sibling = None metricgroup = frozenset(['FetchLat']) maxval = None def compute(self, EV): try: self.val = EV("INT_MISC.CLEAR_RESTEER_CYCLES", 3) / CLKS(self, EV, 3) + self.Unknown_Branches.compute(EV) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Branch_Resteers zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings.""" class Mispredicts_Resteers: name = "Mispredicts_Resteers" domain = "Clocks" area = "FE" level = 4 htoff = False sample = ['INT_MISC.CLEAR_RESTEER_CYCLES'] errcount = 0 sibling = None metricgroup = frozenset(['BadSpec', 'BrMispredicts', 'BvMP']) maxval = None def compute(self, EV): try: self.val = Mispred_Clears_Fraction(self, EV, 4) * EV("INT_MISC.CLEAR_RESTEER_CYCLES", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Mispredicts_Resteers zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage.""" class Clears_Resteers: name = "Clears_Resteers" domain = "Clocks" area = "FE" level = 4 htoff = False sample = ['INT_MISC.CLEAR_RESTEER_CYCLES'] errcount = 0 sibling = None metricgroup = frozenset(['BadSpec', 'MachineClears']) maxval = None def compute(self, EV): try: self.val = (1 - Mispred_Clears_Fraction(self, EV, 4)) * EV("INT_MISC.CLEAR_RESTEER_CYCLES", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Clears_Resteers zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears.""" class Unknown_Branches: name = "Unknown_Branches" domain = "Clocks" area = "FE" level = 4 htoff = False sample = ['BACLEARS.ANY'] errcount = 0 sibling = None metricgroup = frozenset(['BigFootprint', 'BvBC', 'FetchLat']) maxval = None def compute(self, EV): try: self.val = BAClear_Cost * EV("BACLEARS.ANY", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Unknown_Branches zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches""" class MS_Switches: name = "MS_Switches" domain = "Clocks_Estimated" area = "FE" level = 3 htoff = False sample = ['IDQ.MS_SWITCHES'] errcount = 0 sibling = None metricgroup = frozenset(['FetchLat', 'MicroSeq']) maxval = 1.0 def compute(self, EV): try: self.val = MS_Switches_Cost * EV("IDQ.MS_SWITCHES", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "MS_Switches zero division") return self.val desc = """ This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals.""" class LCP: name = "LCP" domain = "Clocks" area = "FE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['FetchLat']) maxval = None def compute(self, EV): try: self.val = EV("DECODE.LCP", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "LCP zero division") return self.val desc = """ This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this.""" class DSB_Switches: name = "DSB_Switches" domain = "Clocks" area = "FE" level = 3 htoff = False sample = ['FRONTEND_RETIRED.DSB_MISS:pp'] errcount = 0 sibling = None metricgroup = frozenset(['DSBmiss', 'FetchLat']) maxval = None def compute(self, EV): try: self.val = EV("DSB2MITE_SWITCHES.PENALTY_CYCLES", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "DSB_Switches zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty.. See section 'Optimization for Decoded Icache' in Optimization Manual:. http://www.intel.com/content/www/us/en /architecture-and-technology/64-ia-32-architectures- optimization-manual.html""" class Fetch_Bandwidth: name = "Fetch_Bandwidth" domain = "Slots" area = "FE" level = 2 htoff = False sample = ['FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1:pp', 'FRONTEND_RETIRED.LATENCY_GE_1:pp', 'FRONTEND_RETIRED.LATENCY_GE_2:pp'] errcount = 0 sibling = None metricgroup = frozenset(['FetchBW', 'Frontend', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = self.Frontend_Bound.compute(EV) - self.Fetch_Latency.compute(EV) self.thresh = (self.val > 0.2) except ZeroDivisionError: handle_error(self, "Fetch_Bandwidth zero division") return self.val desc = """ This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend.""" class MITE: name = "MITE" domain = "Slots_Estimated" area = "FE" level = 3 htoff = False sample = ['FRONTEND_RETIRED.ANY_DSB_MISS'] errcount = 0 sibling = None metricgroup = frozenset(['DSBmiss', 'FetchBW']) maxval = None def compute(self, EV): try: self.val = (EV("IDQ.ALL_MITE_CYCLES_ANY_UOPS", 3) - EV("IDQ.ALL_MITE_CYCLES_4_UOPS", 3)) / CORE_CLKS(self, EV, 3) / 2 self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "MITE zero division") return self.val desc = """ This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck.. Consider tuning codegen of 'small hotspots' that can fit in DSB. Read about 'Decoded ICache' in Optimization Manual:. http://www.intel.com/content/www/us/en /architecture-and-technology/64-ia-32-architectures- optimization-manual.html""" class Decoder0_Alone: name = "Decoder0_Alone" domain = "Slots_Estimated" area = "FE" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['DSBmiss', 'FetchBW']) maxval = None def compute(self, EV): try: self.val = (EV("INST_DECODED.DECODERS:c1", 4) - EV("INST_DECODED.DECODERS:c2", 4)) / CORE_CLKS(self, EV, 4) / 2 self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Decoder0_Alone zero division") return self.val desc = """ This metric represents fraction of cycles where decoder-0 was the only active decoder""" class DSB: name = "DSB" domain = "Slots_Estimated" area = "FE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['DSB', 'FetchBW']) maxval = None def compute(self, EV): try: self.val = (EV("IDQ.DSB_CYCLES_ANY", 3) - EV("IDQ.DSB_CYCLES_OK", 3)) / CORE_CLKS(self, EV, 3) / 2 self.thresh = (self.val > 0.15) and self.parent.thresh except ZeroDivisionError: handle_error(self, "DSB zero division") return self.val desc = """ This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.""" class Bad_Speculation: name = "Bad_Speculation" domain = "Slots" area = "BAD" level = 1 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['TmaL1']) maxval = None def compute(self, EV): try: self.val = (EV("UOPS_ISSUED.ANY", 1) - Retired_Slots(self, EV, 1) + Pipeline_Width * Recovery_Cycles(self, EV, 1)) / SLOTS(self, EV, 1) self.thresh = (self.val > 0.15) except ZeroDivisionError: handle_error(self, "Bad_Speculation zero division") return self.val desc = """ This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss- predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.""" class Branch_Mispredicts: name = "Branch_Mispredicts" domain = "Slots" area = "BAD" level = 2 htoff = False sample = ['BR_MISP_RETIRED.ALL_BRANCHES:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BadSpec', 'BrMispredicts', 'BvMP', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = Mispred_Clears_Fraction(self, EV, 2) * self.Bad_Speculation.compute(EV) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Branch_Mispredicts zero division") return self.val desc = """ This metric represents fraction of slots the CPU has wasted due to Branch Misprediction. These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path.. Using profile feedback in the compiler may help. Please see the Optimization Manual for general strategies for addressing branch misprediction issues.. http://www.intel.com/content/www/us/en/architecture-and- technology/64-ia-32-architectures-optimization-manual.html""" class Other_Mispredicts: name = "Other_Mispredicts" domain = "Slots" area = "BAD" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvIO', 'BrMispredicts']) maxval = None def compute(self, EV): try: self.val = max(self.Branch_Mispredicts.compute(EV) * (1 - EV("BR_MISP_RETIRED.ALL_BRANCHES", 3) / (EV("INT_MISC.CLEARS_COUNT", 3) - EV("MACHINE_CLEARS.COUNT", 3))) , 0.0001 ) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Other_Mispredicts zero division") return self.val desc = """ This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types).""" class Machine_Clears: name = "Machine_Clears" domain = "Slots" area = "BAD" level = 2 htoff = False sample = ['MACHINE_CLEARS.COUNT'] errcount = 0 sibling = None metricgroup = frozenset(['BadSpec', 'BvMS', 'MachineClears', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = self.Bad_Speculation.compute(EV) - self.Branch_Mispredicts.compute(EV) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Machine_Clears zero division") return self.val desc = """ This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes.. See \"Memory Disambiguation\" in Optimization Manual and:. https://software.intel.com/sites/default/files/ m/d/4/1/d/8/sma.pdf""" class Other_Nukes: name = "Other_Nukes" domain = "Slots" area = "BAD" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvIO', 'Machine_Clears']) maxval = None def compute(self, EV): try: self.val = max(self.Machine_Clears.compute(EV) * (1 - EV("MACHINE_CLEARS.MEMORY_ORDERING", 3) / EV("MACHINE_CLEARS.COUNT", 3)) , 0.0001 ) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Other_Nukes zero division") return self.val desc = """ This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering.""" class Backend_Bound: name = "Backend_Bound" domain = "Slots" area = "BE" level = 1 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvOB', 'TmaL1']) maxval = None def compute(self, EV): try: self.val = 1 - self.Frontend_Bound.compute(EV) - (EV("UOPS_ISSUED.ANY", 1) + Pipeline_Width * Recovery_Cycles(self, EV, 1)) / SLOTS(self, EV, 1) self.thresh = (self.val > 0.2) except ZeroDivisionError: handle_error(self, "Backend_Bound zero division") return self.val desc = """ This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.""" class Memory_Bound: name = "Memory_Bound" domain = "Slots" area = "BE/Mem" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Backend', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = Memory_Bound_Fraction(self, EV, 2) * self.Backend_Bound.compute(EV) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Memory_Bound zero division") return self.val desc = """ This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).""" class L1_Bound: name = "L1_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = ['MEM_LOAD_RETIRED.L1_HIT:pp', 'MEM_LOAD_RETIRED.FB_HIT:pp'] errcount = 0 sibling = None metricgroup = frozenset(['CacheHits', 'MemoryBound', 'TmaL3mem']) maxval = None def compute(self, EV): try: self.val = max((EV("CYCLE_ACTIVITY.STALLS_MEM_ANY", 3) - EV("CYCLE_ACTIVITY.STALLS_L1D_MISS", 3)) / CLKS(self, EV, 3) , 0 ) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "L1_Bound zero division") return self.val desc = """ This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache.""" class DTLB_Load: name = "DTLB_Load" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_INST_RETIRED.STLB_MISS_LOADS:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvMT', 'MemoryTLB']) maxval = 1.0 def compute(self, EV): try: self.val = min(Mem_STLB_Hit_Cost * EV("DTLB_LOAD_MISSES.STLB_HIT:c1", 4) + EV("DTLB_LOAD_MISSES.WALK_ACTIVE", 4) , max(EV("CYCLE_ACTIVITY.CYCLES_MEM_ANY", 4) - EV("CYCLE_ACTIVITY.CYCLES_L1D_MISS", 4) , 0)) / CLKS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "DTLB_Load zero division") return self.val desc = """ This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss..""" class Load_STLB_Hit: name = "Load_STLB_Hit" domain = "Clocks_Estimated" area = "BE/Mem" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['MemoryTLB']) maxval = 1.0 def compute(self, EV): try: self.val = self.DTLB_Load.compute(EV) - self.Load_STLB_Miss.compute(EV) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Load_STLB_Hit zero division") return self.val desc = """ This metric roughly estimates the fraction of cycles where the (first level) DTLB was missed by load accesses, that later on hit in second-level TLB (STLB)""" class Load_STLB_Miss: name = "Load_STLB_Miss" domain = "Clocks_Calculated" area = "BE/Mem" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['MemoryTLB']) maxval = 1.0 def compute(self, EV): try: self.val = EV("DTLB_LOAD_MISSES.WALK_ACTIVE", 5) / CLKS(self, EV, 5) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Load_STLB_Miss zero division") return self.val desc = """ This metric estimates the fraction of cycles where the Second-level TLB (STLB) was missed by load accesses, performing a hardware page walk""" class Store_Fwd_Blk: name = "Store_Fwd_Blk" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = 1.0 def compute(self, EV): try: self.val = 13 * EV("LD_BLOCKS.STORE_FORWARD", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Store_Fwd_Blk zero division") return self.val desc = """ This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading.""" class L1_Hit_Latency: name = "L1_Hit_Latency" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_LOAD_RETIRED.L1_HIT'] errcount = 0 sibling = None metricgroup = frozenset(['BvML', 'MemoryLat']) maxval = 1.0 def compute(self, EV): try: self.val = min(2 *(EV("MEM_INST_RETIRED.ALL_LOADS", 4) - EV("MEM_LOAD_RETIRED.FB_HIT", 4) - EV("MEM_LOAD_RETIRED.L1_MISS", 4)) * Dependent_Loads_Weight(self, EV, 4) / 100 , max(EV("CYCLE_ACTIVITY.CYCLES_MEM_ANY", 4) - EV("CYCLE_ACTIVITY.CYCLES_L1D_MISS", 4) , 0)) / CLKS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "L1_Hit_Latency zero division") return self.val desc = """ This metric roughly estimates fraction of cycles with demand load accesses that hit the L1 cache. The short latency of the L1 data cache may be exposed in pointer-chasing memory access patterns as an example.""" class Lock_Latency: name = "Lock_Latency" domain = "Clocks" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_INST_RETIRED.LOCK_LOADS'] errcount = 0 sibling = None metricgroup = frozenset(['Offcore']) maxval = 1.0 def compute(self, EV): try: self.val = (12 * max(0 , EV("MEM_INST_RETIRED.LOCK_LOADS", 4) - EV("L2_RQSTS.ALL_RFO", 4)) + Mem_Lock_St_Fraction(self, EV, 4) * (Mem_L2_Store_Cost * EV("L2_RQSTS.RFO_HIT", 4) + ORO_Demand_RFO_C1(self, EV, 4))) / CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Lock_Latency zero division") return self.val desc = """ This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them.""" class Split_Loads: name = "Split_Loads" domain = "Clocks_Calculated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_INST_RETIRED.SPLIT_LOADS:pp'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = 1.0 def compute(self, EV): try: self.val = Load_Miss_Real_Latency(self, EV, 4) * EV("LD_BLOCKS.NO_SR", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Split_Loads zero division") return self.val desc = """ This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. . Consider aligning data or hot structure fields. See the Optimization Manual for more details""" class G4K_Aliasing: name = "4K_Aliasing" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("LD_BLOCKS_PARTIAL.ADDRESS_ALIAS", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "G4K_Aliasing zero division") return self.val desc = """ This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset. False match is possible; which incur a few cycles load re-issue. However; the short re-issue duration is often hidden by the out-of-order core and HW optimizations; hence a user may safely ignore a high value of this metric unless it manages to propagate up into parent nodes of the hierarchy (e.g. to L1_Bound).. Consider reducing independent loads/stores accesses with 4K offsets. See the Optimization Manual for more details""" class FB_Full: name = "FB_Full" domain = "Clocks_Calculated" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvMB', 'MemoryBW']) maxval = None def compute(self, EV): try: self.val = Load_Miss_Real_Latency(self, EV, 4) * EV("L1D_PEND_MISS.FB_FULL:c1", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.3) except ZeroDivisionError: handle_error(self, "FB_Full zero division") return self.val desc = """ This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory).. See $issueBW and $issueSL hints. Avoid software prefetches if indeed memory BW limited.""" class L2_Bound: name = "L2_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = ['MEM_LOAD_RETIRED.L2_HIT:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvML', 'CacheHits', 'MemoryBound', 'TmaL3mem']) maxval = None def compute(self, EV): try: self.val = (LOAD_L2_HIT(self, EV, 3) / (LOAD_L2_HIT(self, EV, 3) + EV("L1D_PEND_MISS.FB_FULL:c1", 3))) * L2_Bound_Ratio(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "L2_Bound zero division") return self.val desc = """ This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance.""" class L3_Bound: name = "L3_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = ['MEM_LOAD_RETIRED.L3_HIT:pp'] errcount = 0 sibling = None metricgroup = frozenset(['CacheHits', 'MemoryBound', 'TmaL3mem']) maxval = None def compute(self, EV): try: self.val = (EV("CYCLE_ACTIVITY.STALLS_L2_MISS", 3) - EV("CYCLE_ACTIVITY.STALLS_L3_MISS", 3)) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "L3_Bound zero division") return self.val desc = """ This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance.""" class Contested_Accesses: name = "Contested_Accesses" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM:pp', 'MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvMS', 'DataSharing', 'Offcore', 'Snoop']) maxval = 1.0 def compute(self, EV): try: self.val = ((Mem_XSNP_HitM_Cost(self, EV, 4) - Mem_L2_Hit_Cost(self, EV, 4)) * LOAD_XSNP_HITM(self, EV, 4) + (Mem_XSNP_Hit_Cost(self, EV, 4) - Mem_L2_Hit_Cost(self, EV, 4)) * LOAD_XSNP_MISS(self, EV, 4)) * FB_Factor(self, EV, 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Contested_Accesses zero division") return self.val desc = """ This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing.""" class Data_Sharing: name = "Data_Sharing" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvMS', 'Offcore', 'Snoop']) maxval = 1.0 def compute(self, EV): try: self.val = (Mem_XSNP_Hit_Cost(self, EV, 4) - Mem_L2_Hit_Cost(self, EV, 4)) * LOAD_XSNP_HIT(self, EV, 4) * FB_Factor(self, EV, 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Data_Sharing zero division") return self.val desc = """ This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance.""" class L3_Hit_Latency: name = "L3_Hit_Latency" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_LOAD_RETIRED.L3_HIT:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvML', 'MemoryLat']) maxval = 1.0 def compute(self, EV): try: self.val = (Mem_XSNP_None_Cost(self, EV, 4) - Mem_L2_Hit_Cost(self, EV, 4)) * LOAD_L3_HIT(self, EV, 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "L3_Hit_Latency zero division") return self.val desc = """ This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings.""" class SQ_Full: name = "SQ_Full" domain = "Clocks" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvMB', 'MemoryBW', 'Offcore']) maxval = None def compute(self, EV): try: self.val = SQ_Full_Cycles(self, EV, 4) / CORE_CLKS(self, EV, 4) self.thresh = (self.val > 0.3) and self.parent.thresh except ZeroDivisionError: handle_error(self, "SQ_Full zero division") return self.val desc = """ This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors).""" class DRAM_Bound: name = "DRAM_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = ['MEM_LOAD_RETIRED.L3_MISS:pp'] errcount = 0 sibling = None metricgroup = frozenset(['MemoryBound', 'TmaL3mem']) maxval = 1.0 def compute(self, EV): try: self.val = (MEM_Bound_Ratio(self, EV, 3) - self.PMM_Bound.compute(EV)) if PMM_App_Direct else MEM_Bound_Ratio(self, EV, 3) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "DRAM_Bound zero division") return self.val desc = """ This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance.""" class MEM_Bandwidth: name = "MEM_Bandwidth" domain = "Clocks" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvMB', 'MemoryBW', 'Offcore']) maxval = None def compute(self, EV): try: self.val = ORO_DRD_BW_Cycles(self, EV, 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "MEM_Bandwidth zero division") return self.val desc = """ This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that).. Improve data accesses to reduce cacheline transfers from/to memory. Examples: 1) Consume all bytes of a each cacheline before it is evicted (e.g. reorder structure elements and split non-hot ones), 2) merge computed-limited with BW-limited loops, 3) NUMA optimizations in multi-socket system. Note: software prefetches will not help BW-limited application..""" class MEM_Latency: name = "MEM_Latency" domain = "Clocks" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvML', 'MemoryLat', 'Offcore']) maxval = None def compute(self, EV): try: self.val = ORO_DRD_Any_Cycles(self, EV, 4) / CLKS(self, EV, 4) - self.MEM_Bandwidth.compute(EV) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "MEM_Latency zero division") return self.val desc = """ This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that).. Improve data accesses or interleave them with compute. Examples: 1) Data layout re-structuring, 2) Software Prefetches (also through the compiler)..""" class Local_MEM: name = "Local_MEM" domain = "Clocks_Estimated" area = "BE/Mem" level = 5 htoff = False sample = ['MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM'] errcount = 0 sibling = None metricgroup = frozenset(['Server']) maxval = 1.0 def compute(self, EV): try: self.val = (Mem_Local_DRAM_Cost(self, EV, 5) - Mem_XSNP_None_Cost(self, EV, 5)) * EV("MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM", 5) * FB_Factor(self, EV, 5) / CLKS(self, EV, 5) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Local_MEM zero division") return self.val desc = """ This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory. Caching will improve the latency and increase performance.""" class Remote_MEM: name = "Remote_MEM" domain = "Clocks_Estimated" area = "BE/Mem" level = 5 htoff = False sample = ['MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM:pp'] errcount = 0 sibling = None metricgroup = frozenset(['Server', 'Snoop']) maxval = 1.0 def compute(self, EV): try: self.val = (Mem_Remote_DRAM_Cost(self, EV, 5) - Mem_XSNP_None_Cost(self, EV, 5)) * EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM", 5) * FB_Factor(self, EV, 5) / CLKS(self, EV, 5) if DS else 0 EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM", 5) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Remote_MEM zero division") return self.val desc = """ This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory. This is caused often due to non-optimal NUMA allocations.""" class Remote_Cache: name = "Remote_Cache" domain = "Clocks_Estimated" area = "BE/Mem" level = 5 htoff = False sample = ['MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM:pp', 'MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD:pp'] errcount = 0 sibling = None metricgroup = frozenset(['Offcore', 'Server', 'Snoop']) maxval = 1.0 def compute(self, EV): try: self.val = ((Mem_Remote_HitM_Cost(self, EV, 5) - Mem_XSNP_None_Cost(self, EV, 5)) * EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM", 5) + (Mem_Remote_Fwd_Cost(self, EV, 5) - Mem_XSNP_None_Cost(self, EV, 5)) * EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD", 5)) * FB_Factor(self, EV, 5) / CLKS(self, EV, 5) if DS else 0 EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM", 5) EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD", 5) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Remote_Cache zero division") return self.val desc = """ This metric estimates fraction of cycles while the memory subsystem was handling loads from remote cache in other sockets including synchronizations issues. This is caused often due to non-optimal NUMA allocations.""" class PMM_Bound: name = "PMM_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['MemoryBound', 'Server', 'TmaL3mem']) maxval = 1.0 def compute(self, EV): try: self.val = (((1 - Mem_DDR_Hit_Fraction(self, EV, 3)) * MEM_Bound_Ratio(self, EV, 3)) if (OneMillion *(EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM", 3) + EV("MEM_LOAD_RETIRED.LOCAL_PMM", 3))> EV("MEM_LOAD_RETIRED.L1_MISS", 3)) else 0) if PMM_App_Direct else 0 EV("MEM_LOAD_RETIRED.LOCAL_PMM", 3) EV("MEM_LOAD_RETIRED.L1_MISS", 3) EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM", 3) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "PMM_Bound zero division") return self.val desc = """ This metric roughly estimates (based on idle latencies) how often the CPU was stalled on accesses to external 3D-Xpoint (Crystal Ridge, a.k.a. IXP) memory by loads, PMM stands for Persistent Memory Module. . Consider moving data-structure from AEP to DDR memory for better latency/bandwidth.""" class Store_Bound: name = "Store_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = ['MEM_INST_RETIRED.ALL_STORES:pp'] errcount = 0 sibling = None metricgroup = frozenset(['MemoryBound', 'TmaL3mem']) maxval = None def compute(self, EV): try: self.val = EV("EXE_ACTIVITY.BOUND_ON_STORES", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Store_Bound zero division") return self.val desc = """ This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck.""" class Store_Latency: name = "Store_Latency" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvML', 'MemoryLat', 'Offcore']) maxval = 1.0 def compute(self, EV): try: self.val = (Store_L2_Hit_Cycles(self, EV, 4) + (1 - Mem_Lock_St_Fraction(self, EV, 4)) * ORO_Demand_RFO_C1(self, EV, 4)) / CLKS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Store_Latency zero division") return self.val desc = """ This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Consider to avoid/reduce unnecessary (or easily load-able/computable) memory store.""" class False_Sharing: name = "False_Sharing" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM:pp', 'OCR.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE'] errcount = 0 sibling = None metricgroup = frozenset(['BvMS', 'DataSharing', 'Offcore', 'Snoop']) maxval = 1.0 def compute(self, EV): try: self.val = Mem_XSNP_HitM_Cost(self, EV, 4) * OCR_all_rfo_l3_hit_snoop_hitm(self, EV, 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "False_Sharing zero division") return self.val desc = """ This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. . False Sharing can be easily avoided by padding to make Logical Processors access different lines.""" class Split_Stores: name = "Split_Stores" domain = "Core_Utilization" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_INST_RETIRED.SPLIT_STORES:pp'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("MEM_INST_RETIRED.SPLIT_STORES", 4) / CORE_CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Split_Stores zero division") return self.val desc = """ This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity.""" class DTLB_Store: name = "DTLB_Store" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_INST_RETIRED.STLB_MISS_STORES:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvMT', 'MemoryTLB']) maxval = 1.0 def compute(self, EV): try: self.val = (Mem_STLB_Hit_Cost * EV("DTLB_STORE_MISSES.STLB_HIT:c1", 4) + EV("DTLB_STORE_MISSES.WALK_ACTIVE", 4)) / CORE_CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "DTLB_Store zero division") return self.val desc = """ This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently- used data.""" class Store_STLB_Hit: name = "Store_STLB_Hit" domain = "Clocks_Estimated" area = "BE/Mem" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['MemoryTLB']) maxval = 1.0 def compute(self, EV): try: self.val = self.DTLB_Store.compute(EV) - self.Store_STLB_Miss.compute(EV) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Store_STLB_Hit zero division") return self.val desc = """ This metric roughly estimates the fraction of cycles where the TLB was missed by store accesses, hitting in the second- level TLB (STLB)""" class Store_STLB_Miss: name = "Store_STLB_Miss" domain = "Clocks_Calculated" area = "BE/Mem" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['MemoryTLB']) maxval = 1.0 def compute(self, EV): try: self.val = EV("DTLB_STORE_MISSES.WALK_ACTIVE", 5) / CORE_CLKS(self, EV, 5) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Store_STLB_Miss zero division") return self.val desc = """ This metric estimates the fraction of cycles where the STLB was missed by store accesses, performing a hardware page walk""" class Core_Bound: name = "Core_Bound" domain = "Slots" area = "BE/Core" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Backend', 'TmaL2', 'Compute']) maxval = None def compute(self, EV): try: self.val = self.Backend_Bound.compute(EV) - self.Memory_Bound.compute(EV) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Core_Bound zero division") return self.val desc = """ This metric represents fraction of slots where Core non- memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).. Tip: consider Port Saturation analysis as next step.""" class Divider: name = "Divider" domain = "Clocks" area = "BE/Core" level = 3 htoff = False sample = ['ARITH.DIVIDER_ACTIVE'] errcount = 0 sibling = None metricgroup = frozenset(['BvCB']) maxval = 1.0 def compute(self, EV): try: self.val = EV("ARITH.DIVIDER_ACTIVE", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Divider zero division") return self.val desc = """ This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication.""" class Serializing_Operation: name = "Serializing_Operation" domain = "Clocks" area = "BE/Core" level = 3 htoff = False sample = ['PARTIAL_RAT_STALLS.SCOREBOARD'] errcount = 0 sibling = None metricgroup = frozenset(['BvIO', 'PortsUtil']) maxval = None def compute(self, EV): try: self.val = EV("PARTIAL_RAT_STALLS.SCOREBOARD", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Serializing_Operation zero division") return self.val desc = """ This metric represents fraction of cycles the CPU issue- pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out- of-order execution which may limit performance.""" class Slow_Pause: name = "Slow_Pause" domain = "Clocks" area = "BE/Core" level = 4 htoff = False sample = ['ROB_MISC_EVENTS.PAUSE_INST'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = 40 * EV("ROB_MISC_EVENTS.PAUSE_INST", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Slow_Pause zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions.""" class Ports_Utilization: name = "Ports_Utilization" domain = "Clocks" area = "BE/Core" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['PortsUtil']) maxval = None def compute(self, EV): try: self.val = Core_Bound_Cycles(self, EV, 3) / CLKS(self, EV, 3) if (EV("ARITH.DIVIDER_ACTIVE", 3)<(EV("CYCLE_ACTIVITY.STALLS_TOTAL", 3) - EV("CYCLE_ACTIVITY.STALLS_MEM_ANY", 3))) else Few_Uops_Executed_Threshold(self, EV, 3) / CLKS(self, EV, 3) EV("CYCLE_ACTIVITY.STALLS_MEM_ANY", 3) EV("ARITH.DIVIDER_ACTIVE", 3) EV("CYCLE_ACTIVITY.STALLS_TOTAL", 3) self.thresh = (self.val > 0.15) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Ports_Utilization zero division") return self.val desc = """ This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.. Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop.""" class Ports_Utilized_0: name = "Ports_Utilized_0" domain = "Clocks" area = "BE/Core" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['PortsUtil']) maxval = None def compute(self, EV): try: self.val = EV("EXE_ACTIVITY.EXE_BOUND_0_PORTS", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Ports_Utilized_0 zero division") return self.val desc = """ This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric.. Check assembly view and Appendix C in Optimization Manual to find out instructions with say 5 or more cycles latency.. http://www.intel.com/content/www/us/en/architecture-and- technology/64-ia-32-architectures-optimization-manual.html""" class Mixing_Vectors: name = "Mixing_Vectors" domain = "Clocks" area = "BE/Core" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = 1.0 def compute(self, EV): try: self.val = EV("UOPS_ISSUED.VECTOR_WIDTH_MISMATCH", 5) / EV("UOPS_ISSUED.ANY", 5) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "Mixing_Vectors zero division") return self.val desc = """ This metric estimates penalty in terms of percentage of injected blend uops out of all Uops Issued -- the Count Domain. Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic.""" class Ports_Utilized_1: name = "Ports_Utilized_1" domain = "Clocks" area = "BE/Core" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['PortsUtil']) maxval = None def compute(self, EV): try: self.val = Cycles_1_Port_Utilized(self, EV, 4) / CORE_CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Ports_Utilized_1 zero division") return self.val desc = """ This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). This can be due to heavy data-dependency among software instructions; or over oversubscribing a particular hardware resource. In some other cases with high 1_Port_Utilized and L1_Bound; this metric can point to L1 data-cache latency bottleneck that may not necessarily manifest with complete execution starvation (due to the short L1 latency e.g. walking a linked list) - looking at the assembly can be helpful.""" class Ports_Utilized_2: name = "Ports_Utilized_2" domain = "Clocks" area = "BE/Core" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['PortsUtil']) maxval = None def compute(self, EV): try: self.val = Cycles_2_Ports_Utilized(self, EV, 4) / CORE_CLKS(self, EV, 4) self.thresh = (self.val > 0.15) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Ports_Utilized_2 zero division") return self.val desc = """ This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto- Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop.""" class Ports_Utilized_3m: name = "Ports_Utilized_3m" domain = "Clocks" area = "BE/Core" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvCB', 'PortsUtil']) maxval = None def compute(self, EV): try: self.val = Cycles_3m_Ports_Utilized(self, EV, 4) / CORE_CLKS(self, EV, 4) self.thresh = (self.val > 0.4) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Ports_Utilized_3m zero division") return self.val desc = """ This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise).""" class ALU_Op_Utilization: name = "ALU_Op_Utilization" domain = "Core_Execution" area = "BE/Core" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = (EV("UOPS_DISPATCHED_PORT.PORT_0", 5) + EV("UOPS_DISPATCHED_PORT.PORT_1", 5) + EV("UOPS_DISPATCHED_PORT.PORT_5", 5) + EV("UOPS_DISPATCHED_PORT.PORT_6", 5)) / (4 * CORE_CLKS(self, EV, 5)) self.thresh = (self.val > 0.4) except ZeroDivisionError: handle_error(self, "ALU_Op_Utilization zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.""" class Port_0: name = "Port_0" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED_PORT.PORT_0'] errcount = 0 sibling = None metricgroup = frozenset(['Compute']) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_0", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_0 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ALU and 2nd branch""" class Port_1: name = "Port_1" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED_PORT.PORT_1'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_1", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_1 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU)""" class Port_5: name = "Port_5" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED_PORT.PORT_5'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_5", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_5 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ALU. See section 'Handling Port 5 Pressure' in Optimization Manual:. http://www.intel.com/content/www/us/en/architecture-and- technology/64-ia-32-architectures-optimization-manual.html""" class Port_6: name = "Port_6" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED_PORT.PORT_6'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_6", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_6 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 Primary Branch and simple ALU""" class Load_Op_Utilization: name = "Load_Op_Utilization" domain = "Core_Execution" area = "BE/Core" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = (EV("UOPS_DISPATCHED_PORT.PORT_2", 5) + EV("UOPS_DISPATCHED_PORT.PORT_3", 5) + EV("UOPS_DISPATCHED_PORT.PORT_7", 5) - EV("UOPS_DISPATCHED_PORT.PORT_4", 5)) / (2 * CORE_CLKS(self, EV, 5)) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Load_Op_Utilization zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations""" class Port_2: name = "Port_2" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED_PORT.PORT_2'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_2", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_2 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 2 Loads and Store-address""" class Port_3: name = "Port_3" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED_PORT.PORT_3'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_3", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_3 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 3 Loads and Store-address""" class Store_Op_Utilization: name = "Store_Op_Utilization" domain = "Core_Execution" area = "BE/Core" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_4", 5) / CORE_CLKS(self, EV, 5) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Store_Op_Utilization zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port for Store operations""" class Port_4: name = "Port_4" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED_PORT.PORT_4'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_4", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_4 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 4 (Store-data)""" class Port_7: name = "Port_7" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED_PORT.PORT_7'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_7", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_7 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 7 simple Store-address""" class Retiring: name = "Retiring" domain = "Slots" area = "RET" level = 1 htoff = False sample = ['UOPS_RETIRED.RETIRE_SLOTS'] errcount = 0 sibling = None metricgroup = frozenset(['BvUW', 'TmaL1']) maxval = None def compute(self, EV): try: self.val = Retired_Slots(self, EV, 1) / SLOTS(self, EV, 1) self.thresh = (self.val > 0.7) or self.Heavy_Operations.thresh except ZeroDivisionError: handle_error(self, "Retiring zero division") return self.val desc = """ This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved. Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. . A high Retiring value for non-vectorized code may be a good hint for programmer to consider vectorizing his code. Doing so essentially lets more computations be done without significantly increasing number of instructions thus improving the performance.""" class Light_Operations: name = "Light_Operations" domain = "Slots" area = "RET" level = 2 htoff = False sample = ['INST_RETIRED.PREC_DIST'] errcount = 0 sibling = None metricgroup = frozenset(['Retire', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = self.Retiring.compute(EV) - self.Heavy_Operations.compute(EV) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Light_Operations zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. . Focus on techniques that reduce instruction count or result in more efficient instructions generation such as vectorization.""" class FP_Arith: name = "FP_Arith" domain = "Uops" area = "RET" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['HPC']) maxval = None def compute(self, EV): try: self.val = self.X87_Use.compute(EV) + self.FP_Scalar.compute(EV) + self.FP_Vector.compute(EV) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "FP_Arith zero division") return self.val desc = """ This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.""" class X87_Use: name = "X87_Use" domain = "Uops" area = "RET" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Compute']) maxval = None def compute(self, EV): try: self.val = self.Retiring.compute(EV) * EV("UOPS_EXECUTED.X87", 4) / EV("UOPS_EXECUTED.THREAD", 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "X87_Use zero division") return self.val desc = """ This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.. Tip: consider compiler flags to generate newer AVX (or SSE) instruction sets; which typically perform better and feature vectors.""" class FP_Scalar: name = "FP_Scalar" domain = "Uops" area = "RET" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Compute', 'Flops']) maxval = None def compute(self, EV): try: self.val = FP_Arith_Scalar(self, EV, 4) / Retired_Slots(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "FP_Scalar zero division") return self.val desc = """ This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting.. Investigate what limits (compiler) generation of vector code.""" class FP_Vector: name = "FP_Vector" domain = "Uops" area = "RET" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Compute', 'Flops']) maxval = 1.0 def compute(self, EV): try: self.val = FP_Arith_Vector(self, EV, 4) / Retired_Slots(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "FP_Vector zero division") return self.val desc = """ This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting.. Check if vector width is expected""" class FP_Vector_128b: name = "FP_Vector_128b" domain = "Uops" area = "RET" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Compute', 'Flops']) maxval = 1.0 def compute(self, EV): try: self.val = (EV("FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", 5) + EV("FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE", 5)) / Retired_Slots(self, EV, 5) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "FP_Vector_128b zero division") return self.val desc = """ This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting.. Try to exploit wider vector length""" class FP_Vector_256b: name = "FP_Vector_256b" domain = "Uops" area = "RET" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Compute', 'Flops']) maxval = 1.0 def compute(self, EV): try: self.val = (EV("FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE", 5) + EV("FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE", 5)) / Retired_Slots(self, EV, 5) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "FP_Vector_256b zero division") return self.val desc = """ This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting.. Try to exploit wider vector length""" class FP_Vector_512b: name = "FP_Vector_512b" domain = "Uops" area = "RET" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Compute', 'Flops']) maxval = 1.0 def compute(self, EV): try: self.val = (EV("FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE", 5) + EV("FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE", 5)) / Retired_Slots(self, EV, 5) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "FP_Vector_512b zero division") return self.val desc = """ This metric approximates arithmetic FP vector uops fraction the CPU has retired for 512-bit wide vectors. May overcount due to FMA double counting.""" class Memory_Operations: name = "Memory_Operations" domain = "Slots" area = "RET" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Pipeline']) maxval = None def compute(self, EV): try: self.val = self.Light_Operations.compute(EV) * EV("MEM_INST_RETIRED.ANY", 3) / EV("INST_RETIRED.ANY", 3) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Memory_Operations zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring memory operations -- uops for memory load or store accesses.""" class Fused_Instructions: name = "Fused_Instructions" domain = "Slots" area = "RET" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Branches', 'BvBO', 'Pipeline']) maxval = None def compute(self, EV): try: self.val = self.Light_Operations.compute(EV) * EV("UOPS_RETIRED.MACRO_FUSED", 3) / Retired_Slots(self, EV, 3) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Fused_Instructions zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions. CMP+JCC or DEC+JCC are common examples of legacy fusions. {}. See section 'Optimizing for Macro-fusion' in Optimization Manual:""" class Non_Fused_Branches: name = "Non_Fused_Branches" domain = "Slots" area = "RET" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Branches', 'BvBO', 'Pipeline']) maxval = None def compute(self, EV): try: self.val = self.Light_Operations.compute(EV) * (EV("BR_INST_RETIRED.ALL_BRANCHES", 3) - EV("UOPS_RETIRED.MACRO_FUSED", 3)) / Retired_Slots(self, EV, 3) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Non_Fused_Branches zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused. Non- conditional branches like direct JMP or CALL would count here. Can be used to examine fusible conditional jumps that were not fused.""" class Other_Light_Ops: name = "Other_Light_Ops" domain = "Slots" area = "RET" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Pipeline']) maxval = None def compute(self, EV): try: self.val = max(0 , self.Light_Operations.compute(EV) - Light_Ops_Sum(self, EV, 3)) self.thresh = (self.val > 0.3) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Other_Light_Ops zero division") return self.val desc = """ This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes. May undercount due to FMA double counting""" class Nop_Instructions: name = "Nop_Instructions" domain = "Slots" area = "RET" level = 4 htoff = False sample = ['INST_RETIRED.NOP'] errcount = 0 sibling = None metricgroup = frozenset(['BvBO', 'Pipeline']) maxval = None def compute(self, EV): try: self.val = self.Light_Operations.compute(EV) * EV("INST_RETIRED.NOP", 4) / Retired_Slots(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Nop_Instructions zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body.. Improve Codegen by correctly placing NOPs outside hot sections (e.g. outside loop body).""" class Heavy_Operations: name = "Heavy_Operations" domain = "Slots" area = "RET" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Retire', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = (Retired_Slots(self, EV, 2) + EV("UOPS_RETIRED.MACRO_FUSED", 2) - EV("INST_RETIRED.ANY", 2)) / SLOTS(self, EV, 2) self.thresh = (self.val > 0.1) except ZeroDivisionError: handle_error(self, "Heavy_Operations zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.""" class Few_Uops_Instructions: name = "Few_Uops_Instructions" domain = "Slots" area = "RET" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = self.Heavy_Operations.compute(EV) - self.Microcode_Sequencer.compute(EV) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Few_Uops_Instructions zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring instructions that that are decoder into two or up to four uops. This highly-correlates with the number of uops in such instructions.""" class Microcode_Sequencer: name = "Microcode_Sequencer" domain = "Slots" area = "RET" level = 3 htoff = False sample = ['IDQ.MS_UOPS'] errcount = 0 sibling = None metricgroup = frozenset(['MicroSeq']) maxval = None def compute(self, EV): try: self.val = Retire_Fraction(self, EV, 3) * EV("IDQ.MS_UOPS", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Microcode_Sequencer zero division") return self.val desc = """ This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided..""" class Assists: name = "Assists" domain = "Slots_Estimated" area = "RET" level = 4 htoff = False sample = ['OTHER_ASSISTS.ANY'] errcount = 0 sibling = None metricgroup = frozenset(['BvIO']) maxval = 1.0 def compute(self, EV): try: self.val = Avg_Assist_Cost *(EV("FP_ASSIST.ANY", 4) + EV("OTHER_ASSISTS.ANY", 4)) / SLOTS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Assists zero division") return self.val desc = """ This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases.""" class FP_Assists: name = "FP_Assists" domain = "Slots_Estimated" area = "RET" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['HPC']) maxval = None def compute(self, EV): try: self.val = 34 * EV("FP_ASSIST.ANY", 5) / SLOTS(self, EV, 5) self.thresh = (self.val > 0.1) except ZeroDivisionError: handle_error(self, "FP_Assists zero division") return self.val desc = """ This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists. FP Assist may apply when working with very small floating point values (so-called Denormals).. Consider DAZ (Denormals Are Zero) and/or FTZ (Flush To Zero) options in your compiler; \"-ffast-math\" with -O2 in GCC for example. This option may improve performance if the denormal values are not critical in your application. Also note that the DAZ and FTZ modes are not compatible with the IEEE Standard 754.. https://www.intel.com/content/www/us/en/develop/docume ntation/vtune-help/top/reference/cpu-metrics-reference/bad- speculation-back-end-bound-pipeline-slots/fp-assists.html""" class CISC: name = "CISC" domain = "Slots" area = "RET" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = max(0 , self.Microcode_Sequencer.compute(EV) - self.Assists.compute(EV)) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "CISC zero division") return self.val desc = """ This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources.""" class Metric_Mispredictions: name = "Mispredictions" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['Bad', 'BadSpec', 'BrMispredicts', 'BvMP']) sibling = None def compute(self, EV): try: self.val = Mispredictions(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Mispredictions zero division") desc = """ Total pipeline cost of Branch Misprediction related bottlenecks""" class Metric_Big_Code: name = "Big_Code" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvBC', 'BigFootprint', 'Fed', 'Frontend', 'IcMiss', 'MemoryTLB']) sibling = None def compute(self, EV): try: self.val = Big_Code(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Big_Code zero division") desc = """ Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)""" class Metric_Instruction_Fetch_BW: name = "Instruction_Fetch_BW" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvFB', 'Fed', 'FetchBW', 'Frontend']) sibling = None def compute(self, EV): try: self.val = Instruction_Fetch_BW(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Instruction_Fetch_BW zero division") desc = """ Total pipeline cost of instruction fetch bandwidth related bottlenecks (when the front-end could not sustain operations delivery to the back-end)""" class Metric_Cache_Memory_Bandwidth: name = "Cache_Memory_Bandwidth" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvMB', 'Mem', 'MemoryBW', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Cache_Memory_Bandwidth(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Cache_Memory_Bandwidth zero division") desc = """ Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks""" class Metric_Cache_Memory_Latency: name = "Cache_Memory_Latency" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvML', 'Mem', 'MemoryLat', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Cache_Memory_Latency(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Cache_Memory_Latency zero division") desc = """ Total pipeline cost of external Memory- or Cache-Latency related bottlenecks""" class Metric_Memory_Data_TLBs: name = "Memory_Data_TLBs" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvMT', 'Mem', 'MemoryTLB', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Memory_Data_TLBs(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Memory_Data_TLBs zero division") desc = """ Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)""" class Metric_Memory_Synchronization: name = "Memory_Synchronization" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvMS', 'Mem', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Memory_Synchronization(self, EV, 0) self.thresh = (self.val > 10) except ZeroDivisionError: handle_error_metric(self, "Memory_Synchronization zero division") desc = """ Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors)""" class Metric_Compute_Bound_Est: name = "Compute_Bound_Est" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvCB', 'Cor']) sibling = None def compute(self, EV): try: self.val = Compute_Bound_Est(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Compute_Bound_Est zero division") desc = """ Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy.""" class Metric_Irregular_Overhead: name = "Irregular_Overhead" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['Bad', 'BvIO', 'Cor', 'Ret']) sibling = None def compute(self, EV): try: self.val = Irregular_Overhead(self, EV, 0) self.thresh = (self.val > 10) except ZeroDivisionError: handle_error_metric(self, "Irregular_Overhead zero division") desc = """ Total pipeline cost of irregular execution (e.g. FP-assists in HPC, Wait time with work imbalance multithreaded workloads, overhead in system services or virtualized environments)""" class Metric_Other_Bottlenecks: name = "Other_Bottlenecks" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvOB', 'Cor', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Other_Bottlenecks(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Other_Bottlenecks zero division") desc = """ Total pipeline cost of remaining bottlenecks in the back- end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls.""" class Metric_Branching_Overhead: name = "Branching_Overhead" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvBO', 'Ret']) sibling = None def compute(self, EV): try: self.val = Branching_Overhead(self, EV, 0) self.thresh = (self.val > 5) except ZeroDivisionError: handle_error_metric(self, "Branching_Overhead zero division") desc = """ Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA. Examples include function calls; loops and alignments. (A lower bound). Consider Loop Unrolling or function inlining optimizations""" class Metric_Useful_Work: name = "Useful_Work" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvUW', 'Ret']) sibling = None def compute(self, EV): try: self.val = Useful_Work(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Useful_Work zero division") desc = """ Total pipeline cost of \"useful operations\" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead.""" class Metric_Core_Bound_Likely: name = "Core_Bound_Likely" domain = "Metric" maxval = 1.0 errcount = 0 area = "Info.Botlnk.L0" metricgroup = frozenset(['Cor', 'SMT']) sibling = None def compute(self, EV): try: self.val = Core_Bound_Likely(self, EV, 0) self.thresh = (self.val > 0.5) except ZeroDivisionError: handle_error_metric(self, "Core_Bound_Likely zero division") desc = """ Probability of Core Bound bottleneck hidden by SMT-profiling artifacts. Tip: consider analysis with SMT disabled""" class Metric_IPC: name = "IPC" domain = "Metric" maxval = Pipeline_Width + 2 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Ret', 'Summary']) sibling = None def compute(self, EV): try: self.val = IPC(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IPC zero division") desc = """ Instructions Per Cycle (per Logical Processor)""" class Metric_UopPI: name = "UopPI" domain = "Metric" maxval = 2.0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Pipeline', 'Ret', 'Retire']) sibling = None def compute(self, EV): try: self.val = UopPI(self, EV, 0) self.thresh = (self.val > 1.05) except ZeroDivisionError: handle_error_metric(self, "UopPI zero division") desc = """ Uops Per Instruction""" class Metric_UpTB: name = "UpTB" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Branches', 'Fed', 'FetchBW']) sibling = None def compute(self, EV): try: self.val = UpTB(self, EV, 0) self.thresh = self.val < Pipeline_Width * 1.5 except ZeroDivisionError: handle_error_metric(self, "UpTB zero division") desc = """ Uops per taken branch""" class Metric_CPI: name = "CPI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Pipeline', 'Mem']) sibling = None def compute(self, EV): try: self.val = CPI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CPI zero division") desc = """ Cycles Per Instruction (per Logical Processor)""" class Metric_CLKS: name = "CLKS" domain = "Count" maxval = 0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Pipeline']) sibling = None def compute(self, EV): try: self.val = CLKS(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CLKS zero division") desc = """ Per-Logical Processor actual clocks when the Logical Processor is active.""" class Metric_SLOTS: name = "SLOTS" domain = "Count" maxval = 0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['TmaL1']) sibling = None def compute(self, EV): try: self.val = SLOTS(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "SLOTS zero division") desc = """ Total issue-pipeline slots (per-Physical Core till ICL; per- Logical Processor ICL onward)""" class Metric_Execute_per_Issue: name = "Execute_per_Issue" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Cor', 'Pipeline']) sibling = None def compute(self, EV): try: self.val = Execute_per_Issue(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Execute_per_Issue zero division") desc = """ The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage.""" class Metric_CoreIPC: name = "CoreIPC" domain = "Core_Metric" maxval = Pipeline_Width + 2 errcount = 0 area = "Info.Core" metricgroup = frozenset(['Ret', 'SMT', 'TmaL1']) sibling = None def compute(self, EV): try: self.val = CoreIPC(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CoreIPC zero division") desc = """ Instructions Per Cycle across hyper-threads (per physical core)""" class Metric_FLOPc: name = "FLOPc" domain = "Core_Metric" maxval = 10.0 errcount = 0 area = "Info.Core" metricgroup = frozenset(['Ret', 'Flops']) sibling = None def compute(self, EV): try: self.val = FLOPc(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "FLOPc zero division") desc = """ Floating Point Operations Per Cycle""" class Metric_FP_Arith_Utilization: name = "FP_Arith_Utilization" domain = "Core_Metric" maxval = 2.0 errcount = 0 area = "Info.Core" metricgroup = frozenset(['Cor', 'Flops', 'HPC']) sibling = None def compute(self, EV): try: self.val = FP_Arith_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "FP_Arith_Utilization zero division") desc = """ Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to Fused-Multiply Add FMA counting - common.""" class Metric_ILP: name = "ILP" domain = "Metric" maxval = Exe_Ports errcount = 0 area = "Info.Core" metricgroup = frozenset(['Backend', 'Cor', 'Pipeline', 'PortsUtil']) sibling = None def compute(self, EV): try: self.val = ILP(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "ILP zero division") desc = """ Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical- processor)""" class Metric_EPC: name = "EPC" domain = "Metric" maxval = 20.0 errcount = 0 area = "Info.Core" metricgroup = frozenset(['Power']) sibling = None def compute(self, EV): try: self.val = EPC(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "EPC zero division") desc = """ uops Executed per Cycle""" class Metric_CORE_CLKS: name = "CORE_CLKS" domain = "Count" maxval = 0 errcount = 0 area = "Info.Core" metricgroup = frozenset(['SMT']) sibling = None def compute(self, EV): try: self.val = CORE_CLKS(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CORE_CLKS zero division") desc = """ Core actual clocks when any Logical Processor is active on the Physical Core""" class Metric_IpLoad: name = "IpLoad" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['InsType']) sibling = None def compute(self, EV): try: self.val = IpLoad(self, EV, 0) self.thresh = (self.val < 3) except ZeroDivisionError: handle_error_metric(self, "IpLoad zero division") desc = """ Instructions per Load (lower number means higher occurrence rate). Tip: reduce memory accesses.""" class Metric_IpStore: name = "IpStore" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['InsType']) sibling = None def compute(self, EV): try: self.val = IpStore(self, EV, 0) self.thresh = (self.val < 8) except ZeroDivisionError: handle_error_metric(self, "IpStore zero division") desc = """ Instructions per Store (lower number means higher occurrence rate). Tip: reduce memory accesses.""" class Metric_IpBranch: name = "IpBranch" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Branches', 'Fed', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpBranch(self, EV, 0) self.thresh = (self.val < 8) except ZeroDivisionError: handle_error_metric(self, "IpBranch zero division") desc = """ Instructions per Branch (lower number means higher occurrence rate)""" class Metric_IpCall: name = "IpCall" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Branches', 'Fed', 'PGO']) sibling = None def compute(self, EV): try: self.val = IpCall(self, EV, 0) self.thresh = (self.val < 200) except ZeroDivisionError: handle_error_metric(self, "IpCall zero division") desc = """ Instructions per (near) call (lower number means higher occurrence rate)""" class Metric_IpTB: name = "IpTB" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Branches', 'Fed', 'FetchBW', 'Frontend', 'PGO']) sibling = None def compute(self, EV): try: self.val = IpTB(self, EV, 0) self.thresh = self.val < Pipeline_Width * 2 + 1 except ZeroDivisionError: handle_error_metric(self, "IpTB zero division") desc = """ Instructions per taken branch""" class Metric_BpTkBranch: name = "BpTkBranch" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Branches', 'Fed', 'PGO']) sibling = None def compute(self, EV): try: self.val = BpTkBranch(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "BpTkBranch zero division") desc = """ Branch instructions per taken branch. . Can be used to approximate PGO-likelihood for non-loopy codes.""" class Metric_IpFLOP: name = "IpFLOP" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Flops', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpFLOP(self, EV, 0) self.thresh = (self.val < 10) except ZeroDivisionError: handle_error_metric(self, "IpFLOP zero division") desc = """ Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate). Reference: Tuning Performance via Metrics with Expectations. https://doi.org/10.1109/LCA.2019.2916408""" class Metric_IpArith: name = "IpArith" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Flops', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpArith(self, EV, 0) self.thresh = (self.val < 10) except ZeroDivisionError: handle_error_metric(self, "IpArith zero division") desc = """ Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW.""" class Metric_IpArith_Scalar_SP: name = "IpArith_Scalar_SP" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Flops', 'FpScalar', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpArith_Scalar_SP(self, EV, 0) self.thresh = (self.val < 10) except ZeroDivisionError: handle_error_metric(self, "IpArith_Scalar_SP zero division") desc = """ Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.""" class Metric_IpArith_Scalar_DP: name = "IpArith_Scalar_DP" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Flops', 'FpScalar', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpArith_Scalar_DP(self, EV, 0) self.thresh = (self.val < 10) except ZeroDivisionError: handle_error_metric(self, "IpArith_Scalar_DP zero division") desc = """ Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.""" class Metric_IpArith_AVX128: name = "IpArith_AVX128" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Flops', 'FpVector', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpArith_AVX128(self, EV, 0) self.thresh = (self.val < 10) except ZeroDivisionError: handle_error_metric(self, "IpArith_AVX128 zero division") desc = """ Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.""" class Metric_IpArith_AVX256: name = "IpArith_AVX256" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Flops', 'FpVector', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpArith_AVX256(self, EV, 0) self.thresh = (self.val < 10) except ZeroDivisionError: handle_error_metric(self, "IpArith_AVX256 zero division") desc = """ Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.""" class Metric_IpArith_AVX512: name = "IpArith_AVX512" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Flops', 'FpVector', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpArith_AVX512(self, EV, 0) self.thresh = (self.val < 10) except ZeroDivisionError: handle_error_metric(self, "IpArith_AVX512 zero division") desc = """ Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.""" class Metric_IpPause: name = "IpPause" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Flops', 'FpVector', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpPause(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IpPause zero division") desc = """ Instructions per PAUSE (lower number means higher occurrence rate)""" class Metric_IpSWPF: name = "IpSWPF" domain = "Inst_Metric" maxval = 1000 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Prefetches']) sibling = None def compute(self, EV): try: self.val = IpSWPF(self, EV, 0) self.thresh = (self.val < 100) except ZeroDivisionError: handle_error_metric(self, "IpSWPF zero division") desc = """ Instructions per Software prefetch instruction (of any type: NTA/T0/T1/T2/Prefetch) (lower number means higher occurrence rate)""" class Metric_Instructions: name = "Instructions" domain = "Count" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Summary', 'TmaL1']) sibling = None def compute(self, EV): try: self.val = Instructions(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Instructions zero division") desc = """ Total number of retired Instructions""" class Metric_Retire: name = "Retire" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Pipeline" metricgroup = frozenset(['Pipeline', 'Ret']) sibling = None def compute(self, EV): try: self.val = Retire(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Retire zero division") desc = """ Average number of Uops retired in cycles where at least one uop has retired.""" class Metric_IpAssist: name = "IpAssist" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Pipeline" metricgroup = frozenset(['MicroSeq', 'Pipeline', 'Ret', 'Retire']) sibling = None def compute(self, EV): try: self.val = IpAssist(self, EV, 0) self.thresh = (self.val < 100000) except ZeroDivisionError: handle_error_metric(self, "IpAssist zero division") desc = """ Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate)""" class Metric_Execute: name = "Execute" domain = "Metric" maxval = Exe_Ports errcount = 0 area = "Info.Pipeline" metricgroup = frozenset(['Cor', 'Pipeline', 'PortsUtil', 'SMT']) sibling = None def compute(self, EV): try: self.val = Execute(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Execute zero division") desc = """ """ class Metric_Fetch_DSB: name = "Fetch_DSB" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Pipeline" metricgroup = frozenset(['Fed', 'FetchBW']) sibling = None def compute(self, EV): try: self.val = Fetch_DSB(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Fetch_DSB zero division") desc = """ Average number of uops fetched from DSB per cycle""" class Metric_Fetch_MITE: name = "Fetch_MITE" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Pipeline" metricgroup = frozenset(['Fed', 'FetchBW']) sibling = None def compute(self, EV): try: self.val = Fetch_MITE(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Fetch_MITE zero division") desc = """ Average number of uops fetched from MITE per cycle""" class Metric_Fetch_UpC: name = "Fetch_UpC" domain = "Metric" maxval = 6.0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['Fed', 'FetchBW']) sibling = None def compute(self, EV): try: self.val = Fetch_UpC(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Fetch_UpC zero division") desc = """ Average number of Uops issued by front-end when it issued something""" class Metric_DSB_Coverage: name = "DSB_Coverage" domain = "Metric" maxval = 1.0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['DSB', 'Fed', 'FetchBW']) sibling = None def compute(self, EV): try: self.val = DSB_Coverage(self, EV, 0) self.thresh = (self.val < 0.7) and HighIPC(self, EV, 1) except ZeroDivisionError: handle_error_metric(self, "DSB_Coverage zero division") desc = """ Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). See section 'Decoded ICache' in Optimization Manual. http://www.intel.com/content/www/us/en/architecture- and-technology/64-ia-32-architectures-optimization- manual.html""" class Metric_DSB_Switch_Cost: name = "DSB_Switch_Cost" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['DSBmiss']) sibling = None def compute(self, EV): try: self.val = DSB_Switch_Cost(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "DSB_Switch_Cost zero division") desc = """ Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.""" class Metric_DSB_Misses: name = "DSB_Misses" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Botlnk.L2" metricgroup = frozenset(['DSBmiss', 'Fed']) sibling = None def compute(self, EV): try: self.val = DSB_Misses(self, EV, 0) self.thresh = (self.val > 10) except ZeroDivisionError: handle_error_metric(self, "DSB_Misses zero division") desc = """ Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck.""" class Metric_DSB_Bandwidth: name = "DSB_Bandwidth" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Botlnk.L2" metricgroup = frozenset(['DSB', 'FetchBW']) sibling = None def compute(self, EV): try: self.val = DSB_Bandwidth(self, EV, 0) self.thresh = (self.val > 10) except ZeroDivisionError: handle_error_metric(self, "DSB_Bandwidth zero division") desc = """ Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck.""" class Metric_ICache_Miss_Latency: name = "ICache_Miss_Latency" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['Fed', 'FetchLat', 'IcMiss']) sibling = None def compute(self, EV): try: self.val = ICache_Miss_Latency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "ICache_Miss_Latency zero division") desc = """ Average Latency for L1 instruction cache misses""" class Metric_IC_Misses: name = "IC_Misses" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Botlnk.L2" metricgroup = frozenset(['Fed', 'FetchLat', 'IcMiss']) sibling = None def compute(self, EV): try: self.val = IC_Misses(self, EV, 0) self.thresh = (self.val > 5) except ZeroDivisionError: handle_error_metric(self, "IC_Misses zero division") desc = """ Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck.""" class Metric_IpDSB_Miss_Ret: name = "IpDSB_Miss_Ret" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['DSBmiss', 'Fed']) sibling = None def compute(self, EV): try: self.val = IpDSB_Miss_Ret(self, EV, 0) self.thresh = (self.val < 50) except ZeroDivisionError: handle_error_metric(self, "IpDSB_Miss_Ret zero division") desc = """ Instructions per non-speculative DSB miss (lower number means higher occurrence rate)""" class Metric_IpUnknown_Branch: name = "IpUnknown_Branch" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['Fed']) sibling = None def compute(self, EV): try: self.val = IpUnknown_Branch(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IpUnknown_Branch zero division") desc = """ Instructions per speculative Unknown Branch Misprediction (BAClear) (lower number means higher occurrence rate)""" class Metric_L2MPKI_Code: name = "L2MPKI_Code" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['IcMiss']) sibling = None def compute(self, EV): try: self.val = L2MPKI_Code(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2MPKI_Code zero division") desc = """ L2 cache true code cacheline misses per kilo instruction""" class Metric_L2MPKI_Code_All: name = "L2MPKI_Code_All" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['IcMiss']) sibling = None def compute(self, EV): try: self.val = L2MPKI_Code_All(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2MPKI_Code_All zero division") desc = """ L2 cache speculative code cacheline misses per kilo instruction""" class Metric_IpMispredict: name = "IpMispredict" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Bad_Spec" metricgroup = frozenset(['Bad', 'BadSpec', 'BrMispredicts']) sibling = None def compute(self, EV): try: self.val = IpMispredict(self, EV, 0) self.thresh = (self.val < 200) except ZeroDivisionError: handle_error_metric(self, "IpMispredict zero division") desc = """ Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)""" class Metric_IpMisp_Indirect: name = "IpMisp_Indirect" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Bad_Spec" metricgroup = frozenset(['Bad', 'BrMispredicts']) sibling = None def compute(self, EV): try: self.val = IpMisp_Indirect(self, EV, 0) self.thresh = (self.val < 1000) except ZeroDivisionError: handle_error_metric(self, "IpMisp_Indirect zero division") desc = """ Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).""" class Metric_Branch_Misprediction_Cost: name = "Branch_Misprediction_Cost" domain = "Core_Metric" maxval = 300 errcount = 0 area = "Info.Bad_Spec" metricgroup = frozenset(['Bad', 'BrMispredicts']) sibling = None def compute(self, EV): try: self.val = Branch_Misprediction_Cost(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Branch_Misprediction_Cost zero division") desc = """ Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)""" class Metric_Spec_Clears_Ratio: name = "Spec_Clears_Ratio" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Bad_Spec" metricgroup = frozenset(['BrMispredicts']) sibling = None def compute(self, EV): try: self.val = Spec_Clears_Ratio(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Spec_Clears_Ratio zero division") desc = """ Speculative to Retired ratio of all clears (covering Mispredicts and nukes)""" class Metric_Cond_NT: name = "Cond_NT" domain = "Fraction" maxval = 1.0 errcount = 0 area = "Info.Branches" metricgroup = frozenset(['Bad', 'Branches', 'CodeGen', 'PGO']) sibling = None def compute(self, EV): try: self.val = Cond_NT(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Cond_NT zero division") desc = """ Fraction of branches that are non-taken conditionals""" class Metric_Cond_TK: name = "Cond_TK" domain = "Fraction" maxval = 1.0 errcount = 0 area = "Info.Branches" metricgroup = frozenset(['Bad', 'Branches', 'CodeGen', 'PGO']) sibling = None def compute(self, EV): try: self.val = Cond_TK(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Cond_TK zero division") desc = """ Fraction of branches that are taken conditionals""" class Metric_CallRet: name = "CallRet" domain = "Fraction" maxval = 1.0 errcount = 0 area = "Info.Branches" metricgroup = frozenset(['Bad', 'Branches']) sibling = None def compute(self, EV): try: self.val = CallRet(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CallRet zero division") desc = """ Fraction of branches that are CALL or RET""" class Metric_Jump: name = "Jump" domain = "Fraction" maxval = 1.0 errcount = 0 area = "Info.Branches" metricgroup = frozenset(['Bad', 'Branches']) sibling = None def compute(self, EV): try: self.val = Jump(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Jump zero division") desc = """ Fraction of branches that are unconditional (direct or indirect) jumps""" class Metric_Load_Miss_Real_Latency: name = "Load_Miss_Real_Latency" domain = "Clocks_Latency" maxval = 1000 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBound', 'MemoryLat']) sibling = None def compute(self, EV): try: self.val = Load_Miss_Real_Latency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Load_Miss_Real_Latency zero division") desc = """ Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)""" class Metric_MLP: name = "MLP" domain = "Metric" maxval = 10.0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBound', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = MLP(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "MLP zero division") desc = """ Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)""" class Metric_L1MPKI: name = "L1MPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheHits', 'Mem']) sibling = None def compute(self, EV): try: self.val = L1MPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L1MPKI zero division") desc = """ L1 cache true misses per kilo instruction for retired demand loads""" class Metric_L1MPKI_Load: name = "L1MPKI_Load" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheHits', 'Mem']) sibling = None def compute(self, EV): try: self.val = L1MPKI_Load(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L1MPKI_Load zero division") desc = """ L1 cache true misses per kilo instruction for all demand loads (including speculative)""" class Metric_L2MPKI: name = "L2MPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'Backend', 'CacheHits']) sibling = None def compute(self, EV): try: self.val = L2MPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2MPKI zero division") desc = """ L2 cache true misses per kilo instruction for retired demand loads""" class Metric_L2MPKI_All: name = "L2MPKI_All" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheHits', 'Mem', 'Offcore']) sibling = None def compute(self, EV): try: self.val = L2MPKI_All(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2MPKI_All zero division") desc = """ L2 cache misses per kilo instruction for all request types (including speculative)""" class Metric_L2MPKI_Load: name = "L2MPKI_Load" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheHits', 'Mem']) sibling = None def compute(self, EV): try: self.val = L2MPKI_Load(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2MPKI_Load zero division") desc = """ L2 cache misses per kilo instruction for all demand loads (including speculative)""" class Metric_L2MPKI_RFO: name = "L2MPKI_RFO" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheMisses', 'Offcore']) sibling = None def compute(self, EV): try: self.val = L2MPKI_RFO(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2MPKI_RFO zero division") desc = """ Offcore requests (L2 cache miss) per kilo instruction for demand RFOs""" class Metric_L2HPKI_All: name = "L2HPKI_All" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheHits', 'Mem']) sibling = None def compute(self, EV): try: self.val = L2HPKI_All(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2HPKI_All zero division") desc = """ L2 cache hits per kilo instruction for all request types (including speculative)""" class Metric_L2HPKI_Load: name = "L2HPKI_Load" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheHits', 'Mem']) sibling = None def compute(self, EV): try: self.val = L2HPKI_Load(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2HPKI_Load zero division") desc = """ L2 cache hits per kilo instruction for all demand loads (including speculative)""" class Metric_L3MPKI: name = "L3MPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem']) sibling = None def compute(self, EV): try: self.val = L3MPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L3MPKI zero division") desc = """ L3 cache true misses per kilo instruction for retired demand loads""" class Metric_FB_HPKI: name = "FB_HPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheHits', 'Mem']) sibling = None def compute(self, EV): try: self.val = FB_HPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "FB_HPKI zero division") desc = """ Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss- handling entries)""" class Metric_L1D_Cache_Fill_BW: name = "L1D_Cache_Fill_BW" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L1D_Cache_Fill_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L1D_Cache_Fill_BW zero division") desc = """ """ class Metric_L2_Cache_Fill_BW: name = "L2_Cache_Fill_BW" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L2_Cache_Fill_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2_Cache_Fill_BW zero division") desc = """ """ class Metric_L3_Cache_Fill_BW: name = "L3_Cache_Fill_BW" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L3_Cache_Fill_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L3_Cache_Fill_BW zero division") desc = """ """ class Metric_L3_Cache_Access_BW: name = "L3_Cache_Access_BW" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBW', 'Offcore']) sibling = None def compute(self, EV): try: self.val = L3_Cache_Access_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L3_Cache_Access_BW zero division") desc = """ """ class Metric_Page_Walks_Utilization: name = "Page_Walks_Utilization" domain = "Core_Metric" maxval = 1.0 errcount = 0 area = "Info.Memory.TLB" metricgroup = frozenset(['Mem', 'MemoryTLB']) sibling = None def compute(self, EV): try: self.val = Page_Walks_Utilization(self, EV, 0) self.thresh = (self.val > 0.5) except ZeroDivisionError: handle_error_metric(self, "Page_Walks_Utilization zero division") desc = """ Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses""" class Metric_Code_STLB_MPKI: name = "Code_STLB_MPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory.TLB" metricgroup = frozenset(['Fed', 'MemoryTLB']) sibling = None def compute(self, EV): try: self.val = Code_STLB_MPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Code_STLB_MPKI zero division") desc = """ STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)""" class Metric_Load_STLB_MPKI: name = "Load_STLB_MPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory.TLB" metricgroup = frozenset(['Mem', 'MemoryTLB']) sibling = None def compute(self, EV): try: self.val = Load_STLB_MPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Load_STLB_MPKI zero division") desc = """ STLB (2nd level TLB) data load speculative misses per kilo instruction (misses of any page-size that complete the page walk)""" class Metric_Store_STLB_MPKI: name = "Store_STLB_MPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory.TLB" metricgroup = frozenset(['Mem', 'MemoryTLB']) sibling = None def compute(self, EV): try: self.val = Store_STLB_MPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Store_STLB_MPKI zero division") desc = """ STLB (2nd level TLB) data store speculative misses per kilo instruction (misses of any page-size that complete the page walk)""" class Metric_L1D_Cache_Fill_BW_2T: name = "L1D_Cache_Fill_BW_2T" domain = "Core_Metric" maxval = 0 errcount = 0 area = "Info.Memory.Core" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L1D_Cache_Fill_BW_2T(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L1D_Cache_Fill_BW_2T zero division") desc = """ Average per-core data fill bandwidth to the L1 data cache [GB / sec]""" class Metric_L2_Cache_Fill_BW_2T: name = "L2_Cache_Fill_BW_2T" domain = "Core_Metric" maxval = 0 errcount = 0 area = "Info.Memory.Core" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L2_Cache_Fill_BW_2T(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2_Cache_Fill_BW_2T zero division") desc = """ Average per-core data fill bandwidth to the L2 cache [GB / sec]""" class Metric_L3_Cache_Fill_BW_2T: name = "L3_Cache_Fill_BW_2T" domain = "Core_Metric" maxval = 0 errcount = 0 area = "Info.Memory.Core" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L3_Cache_Fill_BW_2T(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L3_Cache_Fill_BW_2T zero division") desc = """ Average per-core data fill bandwidth to the L3 cache [GB / sec]""" class Metric_L3_Cache_Access_BW_2T: name = "L3_Cache_Access_BW_2T" domain = "Core_Metric" maxval = 0 errcount = 0 area = "Info.Memory.Core" metricgroup = frozenset(['Mem', 'MemoryBW', 'Offcore']) sibling = None def compute(self, EV): try: self.val = L3_Cache_Access_BW_2T(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L3_Cache_Access_BW_2T zero division") desc = """ Average per-core data access bandwidth to the L3 cache [GB / sec]""" class Metric_L2_Evictions_Silent_PKI: name = "L2_Evictions_Silent_PKI" domain = "Core_Metric" maxval = 0 errcount = 0 area = "Info.Memory.Core" metricgroup = frozenset(['L2Evicts', 'Mem', 'Server']) sibling = None def compute(self, EV): try: self.val = L2_Evictions_Silent_PKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2_Evictions_Silent_PKI zero division") desc = """ Rate of silent evictions from the L2 cache per Kilo instruction where the evicted lines are dropped (no writeback to L3 or memory)""" class Metric_L2_Evictions_NonSilent_PKI: name = "L2_Evictions_NonSilent_PKI" domain = "Core_Metric" maxval = 0 errcount = 0 area = "Info.Memory.Core" metricgroup = frozenset(['L2Evicts', 'Mem', 'Server']) sibling = None def compute(self, EV): try: self.val = L2_Evictions_NonSilent_PKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2_Evictions_NonSilent_PKI zero division") desc = """ Rate of non silent evictions from the L2 cache per Kilo instruction""" class Metric_Load_L2_Miss_Latency: name = "Load_L2_Miss_Latency" domain = "Clocks_Latency" maxval = 1000 errcount = 0 area = "Info.Memory.Latency" metricgroup = frozenset(['Memory_Lat', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Load_L2_Miss_Latency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Load_L2_Miss_Latency zero division") desc = """ Average Latency for L2 cache miss demand Loads""" class Metric_Load_L2_MLP: name = "Load_L2_MLP" domain = "Metric" maxval = 100 errcount = 0 area = "Info.Memory.Latency" metricgroup = frozenset(['Memory_BW', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Load_L2_MLP(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Load_L2_MLP zero division") desc = """ Average Parallel L2 cache miss demand Loads""" class Metric_Data_L2_MLP: name = "Data_L2_MLP" domain = "Metric" maxval = 100 errcount = 0 area = "Info.Memory.Latency" metricgroup = frozenset(['Memory_BW', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Data_L2_MLP(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Data_L2_MLP zero division") desc = """ Average Parallel L2 cache miss data reads""" class Metric_UC_Load_PKI: name = "UC_Load_PKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory.Mix" metricgroup = frozenset(['Mem']) sibling = None def compute(self, EV): try: self.val = UC_Load_PKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "UC_Load_PKI zero division") desc = """ Un-cacheable retired load per kilo instruction""" class Metric_CPU_Utilization: name = "CPU_Utilization" domain = "Metric" maxval = 1 errcount = 0 area = "Info.System" metricgroup = frozenset(['HPC', 'Summary']) sibling = None def compute(self, EV): try: self.val = CPU_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CPU_Utilization zero division") desc = """ Average CPU Utilization (percentage)""" class Metric_CPUs_Utilized: name = "CPUs_Utilized" domain = "Metric" maxval = 300 errcount = 0 area = "Info.System" metricgroup = frozenset(['Summary']) sibling = None def compute(self, EV): try: self.val = CPUs_Utilized(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CPUs_Utilized zero division") desc = """ Average number of utilized CPUs""" class Metric_Core_Frequency: name = "Core_Frequency" domain = "SystemMetric" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Summary', 'Power']) sibling = None def compute(self, EV): try: self.val = Core_Frequency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Core_Frequency zero division") desc = """ Measured Average Core Frequency for unhalted processors [GHz]""" class Metric_Uncore_Frequency: name = "Uncore_Frequency" domain = "SystemMetric" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['SoC']) sibling = None def compute(self, EV): try: self.val = Uncore_Frequency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Uncore_Frequency zero division") desc = """ Measured Average Uncore Frequency for the SoC [GHz]""" class Metric_GFLOPs: name = "GFLOPs" domain = "Metric" maxval = 200 errcount = 0 area = "Info.System" metricgroup = frozenset(['Cor', 'Flops', 'HPC']) sibling = None def compute(self, EV): try: self.val = GFLOPs(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "GFLOPs zero division") desc = """ Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width""" class Metric_Turbo_Utilization: name = "Turbo_Utilization" domain = "Core_Metric" maxval = 10.0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Power']) sibling = None def compute(self, EV): try: self.val = Turbo_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Turbo_Utilization zero division") desc = """ Average Frequency Utilization relative nominal frequency""" class Metric_Power_License0_Utilization: name = "Power_License0_Utilization" domain = "Core_Metric" maxval = 1.0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Power']) sibling = None def compute(self, EV): try: self.val = Power_License0_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Power_License0_Utilization zero division") desc = """ Fraction of Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes.""" class Metric_Power_License1_Utilization: name = "Power_License1_Utilization" domain = "Core_Metric" maxval = 1.0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Power']) sibling = None def compute(self, EV): try: self.val = Power_License1_Utilization(self, EV, 0) self.thresh = (self.val > 0.5) except ZeroDivisionError: handle_error_metric(self, "Power_License1_Utilization zero division") desc = """ Fraction of Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions.""" class Metric_Power_License2_Utilization: name = "Power_License2_Utilization" domain = "Core_Metric" maxval = 1.0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Power']) sibling = None def compute(self, EV): try: self.val = Power_License2_Utilization(self, EV, 0) self.thresh = (self.val > 0.5) except ZeroDivisionError: handle_error_metric(self, "Power_License2_Utilization zero division") desc = """ Fraction of Core cycles where the core was running with power-delivery for license level 2 (introduced in SKX). This includes high current AVX 512-bit instructions.""" class Metric_SMT_2T_Utilization: name = "SMT_2T_Utilization" domain = "Core_Metric" maxval = 1.0 errcount = 0 area = "Info.System" metricgroup = frozenset(['SMT']) sibling = None def compute(self, EV): try: self.val = SMT_2T_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "SMT_2T_Utilization zero division") desc = """ Fraction of cycles where both hardware Logical Processors were active""" class Metric_Kernel_Utilization: name = "Kernel_Utilization" domain = "Metric" maxval = 1.0 errcount = 0 area = "Info.System" metricgroup = frozenset(['OS']) sibling = None def compute(self, EV): try: self.val = Kernel_Utilization(self, EV, 0) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error_metric(self, "Kernel_Utilization zero division") desc = """ Fraction of cycles spent in the Operating System (OS) Kernel mode""" class Metric_Kernel_CPI: name = "Kernel_CPI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['OS']) sibling = None def compute(self, EV): try: self.val = Kernel_CPI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Kernel_CPI zero division") desc = """ Cycles Per Instruction for the Operating System (OS) Kernel mode""" class Metric_DRAM_BW_Use: name = "DRAM_BW_Use" domain = "GB/sec" maxval = 200 errcount = 0 area = "Info.System" metricgroup = frozenset(['HPC', 'MemOffcore', 'MemoryBW', 'SoC']) sibling = None def compute(self, EV): try: self.val = DRAM_BW_Use(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "DRAM_BW_Use zero division") desc = """ Average external Memory Bandwidth Use for reads and writes [GB / sec]""" class Metric_MEM_Read_Latency: name = "MEM_Read_Latency" domain = "NanoSeconds" maxval = 1000 errcount = 0 area = "Info.System" metricgroup = frozenset(['Mem', 'MemoryLat', 'SoC']) sibling = None def compute(self, EV): try: self.val = MEM_Read_Latency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "MEM_Read_Latency zero division") desc = """ Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches.""" class Metric_MEM_Parallel_Reads: name = "MEM_Parallel_Reads" domain = "SystemMetric" maxval = 100 errcount = 0 area = "Info.System" metricgroup = frozenset(['Mem', 'MemoryBW', 'SoC']) sibling = None def compute(self, EV): try: self.val = MEM_Parallel_Reads(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "MEM_Parallel_Reads zero division") desc = """ Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches""" class Metric_MEM_PMM_Read_Latency: name = "MEM_PMM_Read_Latency" domain = "NanoSeconds" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['MemOffcore', 'MemoryLat', 'SoC', 'Server']) sibling = None def compute(self, EV): try: self.val = MEM_PMM_Read_Latency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "MEM_PMM_Read_Latency zero division") desc = """ Average latency of data read request to external 3D X-Point memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches""" class Metric_MEM_DRAM_Read_Latency: name = "MEM_DRAM_Read_Latency" domain = "NanoSeconds" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['MemOffcore', 'MemoryLat', 'SoC', 'Server']) sibling = None def compute(self, EV): try: self.val = MEM_DRAM_Read_Latency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "MEM_DRAM_Read_Latency zero division") desc = """ Average latency of data read request to external DRAM memory [in nanoseconds]. Accounts for demand loads and L1/L2 data- read prefetches""" class Metric_PMM_Read_BW: name = "PMM_Read_BW" domain = "GB/sec" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['MemOffcore', 'MemoryBW', 'SoC', 'Server']) sibling = None def compute(self, EV): try: self.val = PMM_Read_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "PMM_Read_BW zero division") desc = """ Average 3DXP Memory Bandwidth Use for reads [GB / sec]""" class Metric_PMM_Write_BW: name = "PMM_Write_BW" domain = "GB/sec" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['MemOffcore', 'MemoryBW', 'SoC', 'Server']) sibling = None def compute(self, EV): try: self.val = PMM_Write_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "PMM_Write_BW zero division") desc = """ Average 3DXP Memory Bandwidth Use for Writes [GB / sec]""" class Metric_IO_Read_BW: name = "IO_Read_BW" domain = "GB/sec" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['IoBW', 'MemOffcore', 'SoC', 'Server']) sibling = None def compute(self, EV): try: self.val = IO_Read_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IO_Read_BW zero division") desc = """ Average IO (network or disk) Bandwidth Use for Reads [GB / sec]. Bandwidth of IO reads that are initiated by end device controllers that are requesting memory from the CPU""" class Metric_IO_Write_BW: name = "IO_Write_BW" domain = "GB/sec" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['IoBW', 'MemOffcore', 'SoC', 'Server']) sibling = None def compute(self, EV): try: self.val = IO_Write_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IO_Write_BW zero division") desc = """ Average IO (network or disk) Bandwidth Use for Writes [GB / sec]. Bandwidth of IO writes that are initiated by end device controllers that are writing memory to the CPU""" class Metric_Time: name = "Time" domain = "Seconds" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Summary']) sibling = None def compute(self, EV): try: self.val = Time(self, EV, 0) self.thresh = (self.val < 1) except ZeroDivisionError: handle_error_metric(self, "Time zero division") desc = """ Run duration time in seconds""" class Metric_Socket_CLKS: name = "Socket_CLKS" domain = "Count" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['SoC']) sibling = None def compute(self, EV): try: self.val = Socket_CLKS(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Socket_CLKS zero division") desc = """ Socket actual clocks when any core is active on that socket""" class Metric_IpFarBranch: name = "IpFarBranch" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Branches', 'OS']) sibling = None def compute(self, EV): try: self.val = IpFarBranch(self, EV, 0) self.thresh = (self.val < 1000000) except ZeroDivisionError: handle_error_metric(self, "IpFarBranch zero division") desc = """ Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]""" # Schedule class Setup: def __init__(self, r): o = dict() n = Frontend_Bound() ; r.run(n) ; o["Frontend_Bound"] = n n = Fetch_Latency() ; r.run(n) ; o["Fetch_Latency"] = n n = ICache_Misses() ; r.run(n) ; o["ICache_Misses"] = n n = ITLB_Misses() ; r.run(n) ; o["ITLB_Misses"] = n n = Branch_Resteers() ; r.run(n) ; o["Branch_Resteers"] = n n = Mispredicts_Resteers() ; r.run(n) ; o["Mispredicts_Resteers"] = n n = Clears_Resteers() ; r.run(n) ; o["Clears_Resteers"] = n n = Unknown_Branches() ; r.run(n) ; o["Unknown_Branches"] = n n = MS_Switches() ; r.run(n) ; o["MS_Switches"] = n n = LCP() ; r.run(n) ; o["LCP"] = n n = DSB_Switches() ; r.run(n) ; o["DSB_Switches"] = n n = Fetch_Bandwidth() ; r.run(n) ; o["Fetch_Bandwidth"] = n n = MITE() ; r.run(n) ; o["MITE"] = n n = Decoder0_Alone() ; r.run(n) ; o["Decoder0_Alone"] = n n = DSB() ; r.run(n) ; o["DSB"] = n n = Bad_Speculation() ; r.run(n) ; o["Bad_Speculation"] = n n = Branch_Mispredicts() ; r.run(n) ; o["Branch_Mispredicts"] = n n = Other_Mispredicts() ; r.run(n) ; o["Other_Mispredicts"] = n n = Machine_Clears() ; r.run(n) ; o["Machine_Clears"] = n n = Other_Nukes() ; r.run(n) ; o["Other_Nukes"] = n n = Backend_Bound() ; r.run(n) ; o["Backend_Bound"] = n n = Memory_Bound() ; r.run(n) ; o["Memory_Bound"] = n n = L1_Bound() ; r.run(n) ; o["L1_Bound"] = n n = DTLB_Load() ; r.run(n) ; o["DTLB_Load"] = n n = Load_STLB_Hit() ; r.run(n) ; o["Load_STLB_Hit"] = n n = Load_STLB_Miss() ; r.run(n) ; o["Load_STLB_Miss"] = n n = Store_Fwd_Blk() ; r.run(n) ; o["Store_Fwd_Blk"] = n n = L1_Hit_Latency() ; r.run(n) ; o["L1_Hit_Latency"] = n n = Lock_Latency() ; r.run(n) ; o["Lock_Latency"] = n n = Split_Loads() ; r.run(n) ; o["Split_Loads"] = n n = G4K_Aliasing() ; r.run(n) ; o["G4K_Aliasing"] = n n = FB_Full() ; r.run(n) ; o["FB_Full"] = n n = L2_Bound() ; r.run(n) ; o["L2_Bound"] = n n = L3_Bound() ; r.run(n) ; o["L3_Bound"] = n n = Contested_Accesses() ; r.run(n) ; o["Contested_Accesses"] = n n = Data_Sharing() ; r.run(n) ; o["Data_Sharing"] = n n = L3_Hit_Latency() ; r.run(n) ; o["L3_Hit_Latency"] = n n = SQ_Full() ; r.run(n) ; o["SQ_Full"] = n n = DRAM_Bound() ; r.run(n) ; o["DRAM_Bound"] = n n = MEM_Bandwidth() ; r.run(n) ; o["MEM_Bandwidth"] = n n = MEM_Latency() ; r.run(n) ; o["MEM_Latency"] = n n = Local_MEM() ; r.run(n) ; o["Local_MEM"] = n n = Remote_MEM() ; r.run(n) ; o["Remote_MEM"] = n n = Remote_Cache() ; r.run(n) ; o["Remote_Cache"] = n n = PMM_Bound() ; r.run(n) ; o["PMM_Bound"] = n n = Store_Bound() ; r.run(n) ; o["Store_Bound"] = n n = Store_Latency() ; r.run(n) ; o["Store_Latency"] = n n = False_Sharing() ; r.run(n) ; o["False_Sharing"] = n n = Split_Stores() ; r.run(n) ; o["Split_Stores"] = n n = DTLB_Store() ; r.run(n) ; o["DTLB_Store"] = n n = Store_STLB_Hit() ; r.run(n) ; o["Store_STLB_Hit"] = n n = Store_STLB_Miss() ; r.run(n) ; o["Store_STLB_Miss"] = n n = Core_Bound() ; r.run(n) ; o["Core_Bound"] = n n = Divider() ; r.run(n) ; o["Divider"] = n n = Serializing_Operation() ; r.run(n) ; o["Serializing_Operation"] = n n = Slow_Pause() ; r.run(n) ; o["Slow_Pause"] = n n = Ports_Utilization() ; r.run(n) ; o["Ports_Utilization"] = n n = Ports_Utilized_0() ; r.run(n) ; o["Ports_Utilized_0"] = n n = Mixing_Vectors() ; r.run(n) ; o["Mixing_Vectors"] = n n = Ports_Utilized_1() ; r.run(n) ; o["Ports_Utilized_1"] = n n = Ports_Utilized_2() ; r.run(n) ; o["Ports_Utilized_2"] = n n = Ports_Utilized_3m() ; r.run(n) ; o["Ports_Utilized_3m"] = n n = ALU_Op_Utilization() ; r.run(n) ; o["ALU_Op_Utilization"] = n n = Port_0() ; r.run(n) ; o["Port_0"] = n n = Port_1() ; r.run(n) ; o["Port_1"] = n n = Port_5() ; r.run(n) ; o["Port_5"] = n n = Port_6() ; r.run(n) ; o["Port_6"] = n n = Load_Op_Utilization() ; r.run(n) ; o["Load_Op_Utilization"] = n n = Port_2() ; r.run(n) ; o["Port_2"] = n n = Port_3() ; r.run(n) ; o["Port_3"] = n n = Store_Op_Utilization() ; r.run(n) ; o["Store_Op_Utilization"] = n n = Port_4() ; r.run(n) ; o["Port_4"] = n n = Port_7() ; r.run(n) ; o["Port_7"] = n n = Retiring() ; r.run(n) ; o["Retiring"] = n n = Light_Operations() ; r.run(n) ; o["Light_Operations"] = n n = FP_Arith() ; r.run(n) ; o["FP_Arith"] = n n = X87_Use() ; r.run(n) ; o["X87_Use"] = n n = FP_Scalar() ; r.run(n) ; o["FP_Scalar"] = n n = FP_Vector() ; r.run(n) ; o["FP_Vector"] = n n = FP_Vector_128b() ; r.run(n) ; o["FP_Vector_128b"] = n n = FP_Vector_256b() ; r.run(n) ; o["FP_Vector_256b"] = n n = FP_Vector_512b() ; r.run(n) ; o["FP_Vector_512b"] = n n = Memory_Operations() ; r.run(n) ; o["Memory_Operations"] = n n = Fused_Instructions() ; r.run(n) ; o["Fused_Instructions"] = n n = Non_Fused_Branches() ; r.run(n) ; o["Non_Fused_Branches"] = n n = Other_Light_Ops() ; r.run(n) ; o["Other_Light_Ops"] = n n = Nop_Instructions() ; r.run(n) ; o["Nop_Instructions"] = n n = Heavy_Operations() ; r.run(n) ; o["Heavy_Operations"] = n n = Few_Uops_Instructions() ; r.run(n) ; o["Few_Uops_Instructions"] = n n = Microcode_Sequencer() ; r.run(n) ; o["Microcode_Sequencer"] = n n = Assists() ; r.run(n) ; o["Assists"] = n n = FP_Assists() ; r.run(n) ; o["FP_Assists"] = n n = CISC() ; r.run(n) ; o["CISC"] = n # parents o["Fetch_Latency"].parent = o["Frontend_Bound"] o["ICache_Misses"].parent = o["Fetch_Latency"] o["ITLB_Misses"].parent = o["Fetch_Latency"] o["Branch_Resteers"].parent = o["Fetch_Latency"] o["Mispredicts_Resteers"].parent = o["Branch_Resteers"] o["Clears_Resteers"].parent = o["Branch_Resteers"] o["Unknown_Branches"].parent = o["Branch_Resteers"] o["MS_Switches"].parent = o["Fetch_Latency"] o["LCP"].parent = o["Fetch_Latency"] o["DSB_Switches"].parent = o["Fetch_Latency"] o["Fetch_Bandwidth"].parent = o["Frontend_Bound"] o["MITE"].parent = o["Fetch_Bandwidth"] o["Decoder0_Alone"].parent = o["MITE"] o["DSB"].parent = o["Fetch_Bandwidth"] o["Branch_Mispredicts"].parent = o["Bad_Speculation"] o["Other_Mispredicts"].parent = o["Branch_Mispredicts"] o["Machine_Clears"].parent = o["Bad_Speculation"] o["Other_Nukes"].parent = o["Machine_Clears"] o["Memory_Bound"].parent = o["Backend_Bound"] o["L1_Bound"].parent = o["Memory_Bound"] o["DTLB_Load"].parent = o["L1_Bound"] o["Load_STLB_Hit"].parent = o["DTLB_Load"] o["Load_STLB_Miss"].parent = o["DTLB_Load"] o["Store_Fwd_Blk"].parent = o["L1_Bound"] o["L1_Hit_Latency"].parent = o["L1_Bound"] o["Lock_Latency"].parent = o["L1_Bound"] o["Split_Loads"].parent = o["L1_Bound"] o["G4K_Aliasing"].parent = o["L1_Bound"] o["FB_Full"].parent = o["L1_Bound"] o["L2_Bound"].parent = o["Memory_Bound"] o["L3_Bound"].parent = o["Memory_Bound"] o["Contested_Accesses"].parent = o["L3_Bound"] o["Data_Sharing"].parent = o["L3_Bound"] o["L3_Hit_Latency"].parent = o["L3_Bound"] o["SQ_Full"].parent = o["L3_Bound"] o["DRAM_Bound"].parent = o["Memory_Bound"] o["MEM_Bandwidth"].parent = o["DRAM_Bound"] o["MEM_Latency"].parent = o["DRAM_Bound"] o["Local_MEM"].parent = o["MEM_Latency"] o["Remote_MEM"].parent = o["MEM_Latency"] o["Remote_Cache"].parent = o["MEM_Latency"] o["PMM_Bound"].parent = o["Memory_Bound"] o["Store_Bound"].parent = o["Memory_Bound"] o["Store_Latency"].parent = o["Store_Bound"] o["False_Sharing"].parent = o["Store_Bound"] o["Split_Stores"].parent = o["Store_Bound"] o["DTLB_Store"].parent = o["Store_Bound"] o["Store_STLB_Hit"].parent = o["DTLB_Store"] o["Store_STLB_Miss"].parent = o["DTLB_Store"] o["Core_Bound"].parent = o["Backend_Bound"] o["Divider"].parent = o["Core_Bound"] o["Serializing_Operation"].parent = o["Core_Bound"] o["Slow_Pause"].parent = o["Serializing_Operation"] o["Ports_Utilization"].parent = o["Core_Bound"] o["Ports_Utilized_0"].parent = o["Ports_Utilization"] o["Mixing_Vectors"].parent = o["Ports_Utilized_0"] o["Ports_Utilized_1"].parent = o["Ports_Utilization"] o["Ports_Utilized_2"].parent = o["Ports_Utilization"] o["Ports_Utilized_3m"].parent = o["Ports_Utilization"] o["ALU_Op_Utilization"].parent = o["Ports_Utilized_3m"] o["Port_0"].parent = o["ALU_Op_Utilization"] o["Port_1"].parent = o["ALU_Op_Utilization"] o["Port_5"].parent = o["ALU_Op_Utilization"] o["Port_6"].parent = o["ALU_Op_Utilization"] o["Load_Op_Utilization"].parent = o["Ports_Utilized_3m"] o["Port_2"].parent = o["Load_Op_Utilization"] o["Port_3"].parent = o["Load_Op_Utilization"] o["Store_Op_Utilization"].parent = o["Ports_Utilized_3m"] o["Port_4"].parent = o["Store_Op_Utilization"] o["Port_7"].parent = o["Store_Op_Utilization"] o["Light_Operations"].parent = o["Retiring"] o["FP_Arith"].parent = o["Light_Operations"] o["X87_Use"].parent = o["FP_Arith"] o["FP_Scalar"].parent = o["FP_Arith"] o["FP_Vector"].parent = o["FP_Arith"] o["FP_Vector_128b"].parent = o["FP_Vector"] o["FP_Vector_256b"].parent = o["FP_Vector"] o["FP_Vector_512b"].parent = o["FP_Vector"] o["Memory_Operations"].parent = o["Light_Operations"] o["Fused_Instructions"].parent = o["Light_Operations"] o["Non_Fused_Branches"].parent = o["Light_Operations"] o["Other_Light_Ops"].parent = o["Light_Operations"] o["Nop_Instructions"].parent = o["Other_Light_Ops"] o["Heavy_Operations"].parent = o["Retiring"] o["Few_Uops_Instructions"].parent = o["Heavy_Operations"] o["Microcode_Sequencer"].parent = o["Heavy_Operations"] o["Assists"].parent = o["Microcode_Sequencer"] o["FP_Assists"].parent = o["Assists"] o["CISC"].parent = o["Microcode_Sequencer"] # user visible metrics n = Metric_Mispredictions() ; r.metric(n) ; o["Mispredictions"] = n n = Metric_Big_Code() ; r.metric(n) ; o["Big_Code"] = n n = Metric_Instruction_Fetch_BW() ; r.metric(n) ; o["Instruction_Fetch_BW"] = n n = Metric_Cache_Memory_Bandwidth() ; r.metric(n) ; o["Cache_Memory_Bandwidth"] = n n = Metric_Cache_Memory_Latency() ; r.metric(n) ; o["Cache_Memory_Latency"] = n n = Metric_Memory_Data_TLBs() ; r.metric(n) ; o["Memory_Data_TLBs"] = n n = Metric_Memory_Synchronization() ; r.metric(n) ; o["Memory_Synchronization"] = n n = Metric_Compute_Bound_Est() ; r.metric(n) ; o["Compute_Bound_Est"] = n n = Metric_Irregular_Overhead() ; r.metric(n) ; o["Irregular_Overhead"] = n n = Metric_Other_Bottlenecks() ; r.metric(n) ; o["Other_Bottlenecks"] = n n = Metric_Branching_Overhead() ; r.metric(n) ; o["Branching_Overhead"] = n n = Metric_Useful_Work() ; r.metric(n) ; o["Useful_Work"] = n n = Metric_Core_Bound_Likely() ; r.metric(n) ; o["Core_Bound_Likely"] = n n = Metric_IPC() ; r.metric(n) ; o["IPC"] = n n = Metric_UopPI() ; r.metric(n) ; o["UopPI"] = n n = Metric_UpTB() ; r.metric(n) ; o["UpTB"] = n n = Metric_CPI() ; r.metric(n) ; o["CPI"] = n n = Metric_CLKS() ; r.metric(n) ; o["CLKS"] = n n = Metric_SLOTS() ; r.metric(n) ; o["SLOTS"] = n n = Metric_Execute_per_Issue() ; r.metric(n) ; o["Execute_per_Issue"] = n n = Metric_CoreIPC() ; r.metric(n) ; o["CoreIPC"] = n n = Metric_FLOPc() ; r.metric(n) ; o["FLOPc"] = n n = Metric_FP_Arith_Utilization() ; r.metric(n) ; o["FP_Arith_Utilization"] = n n = Metric_ILP() ; r.metric(n) ; o["ILP"] = n n = Metric_EPC() ; r.metric(n) ; o["EPC"] = n n = Metric_CORE_CLKS() ; r.metric(n) ; o["CORE_CLKS"] = n n = Metric_IpLoad() ; r.metric(n) ; o["IpLoad"] = n n = Metric_IpStore() ; r.metric(n) ; o["IpStore"] = n n = Metric_IpBranch() ; r.metric(n) ; o["IpBranch"] = n n = Metric_IpCall() ; r.metric(n) ; o["IpCall"] = n n = Metric_IpTB() ; r.metric(n) ; o["IpTB"] = n n = Metric_BpTkBranch() ; r.metric(n) ; o["BpTkBranch"] = n n = Metric_IpFLOP() ; r.metric(n) ; o["IpFLOP"] = n n = Metric_IpArith() ; r.metric(n) ; o["IpArith"] = n n = Metric_IpArith_Scalar_SP() ; r.metric(n) ; o["IpArith_Scalar_SP"] = n n = Metric_IpArith_Scalar_DP() ; r.metric(n) ; o["IpArith_Scalar_DP"] = n n = Metric_IpArith_AVX128() ; r.metric(n) ; o["IpArith_AVX128"] = n n = Metric_IpArith_AVX256() ; r.metric(n) ; o["IpArith_AVX256"] = n n = Metric_IpArith_AVX512() ; r.metric(n) ; o["IpArith_AVX512"] = n n = Metric_IpPause() ; r.metric(n) ; o["IpPause"] = n n = Metric_IpSWPF() ; r.metric(n) ; o["IpSWPF"] = n n = Metric_Instructions() ; r.metric(n) ; o["Instructions"] = n n = Metric_Retire() ; r.metric(n) ; o["Retire"] = n n = Metric_IpAssist() ; r.metric(n) ; o["IpAssist"] = n n = Metric_Execute() ; r.metric(n) ; o["Execute"] = n n = Metric_Fetch_DSB() ; r.metric(n) ; o["Fetch_DSB"] = n n = Metric_Fetch_MITE() ; r.metric(n) ; o["Fetch_MITE"] = n n = Metric_Fetch_UpC() ; r.metric(n) ; o["Fetch_UpC"] = n n = Metric_DSB_Coverage() ; r.metric(n) ; o["DSB_Coverage"] = n n = Metric_DSB_Switch_Cost() ; r.metric(n) ; o["DSB_Switch_Cost"] = n n = Metric_DSB_Misses() ; r.metric(n) ; o["DSB_Misses"] = n n = Metric_DSB_Bandwidth() ; r.metric(n) ; o["DSB_Bandwidth"] = n n = Metric_ICache_Miss_Latency() ; r.metric(n) ; o["ICache_Miss_Latency"] = n n = Metric_IC_Misses() ; r.metric(n) ; o["IC_Misses"] = n n = Metric_IpDSB_Miss_Ret() ; r.metric(n) ; o["IpDSB_Miss_Ret"] = n n = Metric_IpUnknown_Branch() ; r.metric(n) ; o["IpUnknown_Branch"] = n n = Metric_L2MPKI_Code() ; r.metric(n) ; o["L2MPKI_Code"] = n n = Metric_L2MPKI_Code_All() ; r.metric(n) ; o["L2MPKI_Code_All"] = n n = Metric_IpMispredict() ; r.metric(n) ; o["IpMispredict"] = n n = Metric_IpMisp_Indirect() ; r.metric(n) ; o["IpMisp_Indirect"] = n n = Metric_Branch_Misprediction_Cost() ; r.metric(n) ; o["Branch_Misprediction_Cost"] = n n = Metric_Spec_Clears_Ratio() ; r.metric(n) ; o["Spec_Clears_Ratio"] = n n = Metric_Cond_NT() ; r.metric(n) ; o["Cond_NT"] = n n = Metric_Cond_TK() ; r.metric(n) ; o["Cond_TK"] = n n = Metric_CallRet() ; r.metric(n) ; o["CallRet"] = n n = Metric_Jump() ; r.metric(n) ; o["Jump"] = n n = Metric_Load_Miss_Real_Latency() ; r.metric(n) ; o["Load_Miss_Real_Latency"] = n n = Metric_MLP() ; r.metric(n) ; o["MLP"] = n n = Metric_L1MPKI() ; r.metric(n) ; o["L1MPKI"] = n n = Metric_L1MPKI_Load() ; r.metric(n) ; o["L1MPKI_Load"] = n n = Metric_L2MPKI() ; r.metric(n) ; o["L2MPKI"] = n n = Metric_L2MPKI_All() ; r.metric(n) ; o["L2MPKI_All"] = n n = Metric_L2MPKI_Load() ; r.metric(n) ; o["L2MPKI_Load"] = n n = Metric_L2MPKI_RFO() ; r.metric(n) ; o["L2MPKI_RFO"] = n n = Metric_L2HPKI_All() ; r.metric(n) ; o["L2HPKI_All"] = n n = Metric_L2HPKI_Load() ; r.metric(n) ; o["L2HPKI_Load"] = n n = Metric_L3MPKI() ; r.metric(n) ; o["L3MPKI"] = n n = Metric_FB_HPKI() ; r.metric(n) ; o["FB_HPKI"] = n n = Metric_L1D_Cache_Fill_BW() ; r.metric(n) ; o["L1D_Cache_Fill_BW"] = n n = Metric_L2_Cache_Fill_BW() ; r.metric(n) ; o["L2_Cache_Fill_BW"] = n n = Metric_L3_Cache_Fill_BW() ; r.metric(n) ; o["L3_Cache_Fill_BW"] = n n = Metric_L3_Cache_Access_BW() ; r.metric(n) ; o["L3_Cache_Access_BW"] = n n = Metric_Page_Walks_Utilization() ; r.metric(n) ; o["Page_Walks_Utilization"] = n n = Metric_Code_STLB_MPKI() ; r.metric(n) ; o["Code_STLB_MPKI"] = n n = Metric_Load_STLB_MPKI() ; r.metric(n) ; o["Load_STLB_MPKI"] = n n = Metric_Store_STLB_MPKI() ; r.metric(n) ; o["Store_STLB_MPKI"] = n n = Metric_L1D_Cache_Fill_BW_2T() ; r.metric(n) ; o["L1D_Cache_Fill_BW_2T"] = n n = Metric_L2_Cache_Fill_BW_2T() ; r.metric(n) ; o["L2_Cache_Fill_BW_2T"] = n n = Metric_L3_Cache_Fill_BW_2T() ; r.metric(n) ; o["L3_Cache_Fill_BW_2T"] = n n = Metric_L3_Cache_Access_BW_2T() ; r.metric(n) ; o["L3_Cache_Access_BW_2T"] = n n = Metric_L2_Evictions_Silent_PKI() ; r.metric(n) ; o["L2_Evictions_Silent_PKI"] = n n = Metric_L2_Evictions_NonSilent_PKI() ; r.metric(n) ; o["L2_Evictions_NonSilent_PKI"] = n n = Metric_Load_L2_Miss_Latency() ; r.metric(n) ; o["Load_L2_Miss_Latency"] = n n = Metric_Load_L2_MLP() ; r.metric(n) ; o["Load_L2_MLP"] = n n = Metric_Data_L2_MLP() ; r.metric(n) ; o["Data_L2_MLP"] = n n = Metric_UC_Load_PKI() ; r.metric(n) ; o["UC_Load_PKI"] = n n = Metric_CPU_Utilization() ; r.metric(n) ; o["CPU_Utilization"] = n n = Metric_CPUs_Utilized() ; r.metric(n) ; o["CPUs_Utilized"] = n n = Metric_Core_Frequency() ; r.metric(n) ; o["Core_Frequency"] = n n = Metric_Uncore_Frequency() ; r.metric(n) ; o["Uncore_Frequency"] = n n = Metric_GFLOPs() ; r.metric(n) ; o["GFLOPs"] = n n = Metric_Turbo_Utilization() ; r.metric(n) ; o["Turbo_Utilization"] = n n = Metric_Power_License0_Utilization() ; r.metric(n) ; o["Power_License0_Utilization"] = n n = Metric_Power_License1_Utilization() ; r.metric(n) ; o["Power_License1_Utilization"] = n n = Metric_Power_License2_Utilization() ; r.metric(n) ; o["Power_License2_Utilization"] = n n = Metric_SMT_2T_Utilization() ; r.metric(n) ; o["SMT_2T_Utilization"] = n n = Metric_Kernel_Utilization() ; r.metric(n) ; o["Kernel_Utilization"] = n n = Metric_Kernel_CPI() ; r.metric(n) ; o["Kernel_CPI"] = n n = Metric_DRAM_BW_Use() ; r.metric(n) ; o["DRAM_BW_Use"] = n n = Metric_MEM_Read_Latency() ; r.metric(n) ; o["MEM_Read_Latency"] = n n = Metric_MEM_Parallel_Reads() ; r.metric(n) ; o["MEM_Parallel_Reads"] = n n = Metric_MEM_PMM_Read_Latency() ; r.metric(n) ; o["MEM_PMM_Read_Latency"] = n n = Metric_MEM_DRAM_Read_Latency() ; r.metric(n) ; o["MEM_DRAM_Read_Latency"] = n n = Metric_PMM_Read_BW() ; r.metric(n) ; o["PMM_Read_BW"] = n n = Metric_PMM_Write_BW() ; r.metric(n) ; o["PMM_Write_BW"] = n n = Metric_IO_Read_BW() ; r.metric(n) ; o["IO_Read_BW"] = n n = Metric_IO_Write_BW() ; r.metric(n) ; o["IO_Write_BW"] = n n = Metric_Time() ; r.metric(n) ; o["Time"] = n n = Metric_Socket_CLKS() ; r.metric(n) ; o["Socket_CLKS"] = n n = Metric_IpFarBranch() ; r.metric(n) ; o["IpFarBranch"] = n # references between groups o["Branch_Resteers"].Unknown_Branches = o["Unknown_Branches"] o["Fetch_Bandwidth"].Frontend_Bound = o["Frontend_Bound"] o["Fetch_Bandwidth"].Fetch_Latency = o["Fetch_Latency"] o["Branch_Mispredicts"].Bad_Speculation = o["Bad_Speculation"] o["Other_Mispredicts"].Bad_Speculation = o["Bad_Speculation"] o["Other_Mispredicts"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Machine_Clears"].Bad_Speculation = o["Bad_Speculation"] o["Machine_Clears"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Other_Nukes"].Machine_Clears = o["Machine_Clears"] o["Other_Nukes"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Other_Nukes"].Bad_Speculation = o["Bad_Speculation"] o["Backend_Bound"].Frontend_Bound = o["Frontend_Bound"] o["Memory_Bound"].Retiring = o["Retiring"] o["Memory_Bound"].Frontend_Bound = o["Frontend_Bound"] o["Memory_Bound"].Backend_Bound = o["Backend_Bound"] o["Load_STLB_Hit"].Load_STLB_Miss = o["Load_STLB_Miss"] o["Load_STLB_Hit"].DTLB_Load = o["DTLB_Load"] o["DRAM_Bound"].PMM_Bound = o["PMM_Bound"] o["DRAM_Bound"].L2_Bound = o["L2_Bound"] o["MEM_Latency"].MEM_Bandwidth = o["MEM_Bandwidth"] o["PMM_Bound"].L2_Bound = o["L2_Bound"] o["Store_STLB_Hit"].DTLB_Store = o["DTLB_Store"] o["Store_STLB_Hit"].Store_STLB_Miss = o["Store_STLB_Miss"] o["Core_Bound"].Memory_Bound = o["Memory_Bound"] o["Core_Bound"].Backend_Bound = o["Backend_Bound"] o["Core_Bound"].Retiring = o["Retiring"] o["Core_Bound"].Frontend_Bound = o["Frontend_Bound"] o["Ports_Utilization"].Ports_Utilized_0 = o["Ports_Utilized_0"] o["Ports_Utilization"].Retiring = o["Retiring"] o["Retiring"].Heavy_Operations = o["Heavy_Operations"] o["Light_Operations"].Retiring = o["Retiring"] o["Light_Operations"].Heavy_Operations = o["Heavy_Operations"] o["FP_Arith"].Retiring = o["Retiring"] o["FP_Arith"].FP_Scalar = o["FP_Scalar"] o["FP_Arith"].X87_Use = o["X87_Use"] o["FP_Arith"].FP_Vector = o["FP_Vector"] o["X87_Use"].Retiring = o["Retiring"] o["Memory_Operations"].Retiring = o["Retiring"] o["Memory_Operations"].Light_Operations = o["Light_Operations"] o["Memory_Operations"].Heavy_Operations = o["Heavy_Operations"] o["Fused_Instructions"].Retiring = o["Retiring"] o["Fused_Instructions"].Light_Operations = o["Light_Operations"] o["Fused_Instructions"].Heavy_Operations = o["Heavy_Operations"] o["Non_Fused_Branches"].Retiring = o["Retiring"] o["Non_Fused_Branches"].Light_Operations = o["Light_Operations"] o["Non_Fused_Branches"].Heavy_Operations = o["Heavy_Operations"] o["Other_Light_Ops"].Light_Operations = o["Light_Operations"] o["Other_Light_Ops"].Retiring = o["Retiring"] o["Other_Light_Ops"].Heavy_Operations = o["Heavy_Operations"] o["Other_Light_Ops"].Fused_Instructions = o["Fused_Instructions"] o["Other_Light_Ops"].Non_Fused_Branches = o["Non_Fused_Branches"] o["Other_Light_Ops"].FP_Vector = o["FP_Vector"] o["Other_Light_Ops"].FP_Scalar = o["FP_Scalar"] o["Other_Light_Ops"].FP_Arith = o["FP_Arith"] o["Other_Light_Ops"].X87_Use = o["X87_Use"] o["Other_Light_Ops"].Memory_Operations = o["Memory_Operations"] o["Nop_Instructions"].Retiring = o["Retiring"] o["Nop_Instructions"].Light_Operations = o["Light_Operations"] o["Nop_Instructions"].Heavy_Operations = o["Heavy_Operations"] o["Few_Uops_Instructions"].Heavy_Operations = o["Heavy_Operations"] o["Few_Uops_Instructions"].Microcode_Sequencer = o["Microcode_Sequencer"] o["CISC"].Microcode_Sequencer = o["Microcode_Sequencer"] o["CISC"].Assists = o["Assists"] o["Mispredictions"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Mispredictions"].LCP = o["LCP"] o["Mispredictions"].Other_Mispredicts = o["Other_Mispredicts"] o["Mispredictions"].Microcode_Sequencer = o["Microcode_Sequencer"] o["Mispredictions"].DSB_Switches = o["DSB_Switches"] o["Mispredictions"].Branch_Resteers = o["Branch_Resteers"] o["Mispredictions"].ICache_Misses = o["ICache_Misses"] o["Mispredictions"].MS_Switches = o["MS_Switches"] o["Mispredictions"].Bad_Speculation = o["Bad_Speculation"] o["Mispredictions"].ITLB_Misses = o["ITLB_Misses"] o["Mispredictions"].Unknown_Branches = o["Unknown_Branches"] o["Mispredictions"].Fetch_Latency = o["Fetch_Latency"] o["Mispredictions"].Mispredicts_Resteers = o["Mispredicts_Resteers"] o["Big_Code"].LCP = o["LCP"] o["Big_Code"].ICache_Misses = o["ICache_Misses"] o["Big_Code"].DSB_Switches = o["DSB_Switches"] o["Big_Code"].Branch_Resteers = o["Branch_Resteers"] o["Big_Code"].MS_Switches = o["MS_Switches"] o["Big_Code"].ITLB_Misses = o["ITLB_Misses"] o["Big_Code"].Unknown_Branches = o["Unknown_Branches"] o["Big_Code"].Fetch_Latency = o["Fetch_Latency"] o["Instruction_Fetch_BW"].Heavy_Operations = o["Heavy_Operations"] o["Instruction_Fetch_BW"].ICache_Misses = o["ICache_Misses"] o["Instruction_Fetch_BW"].Microcode_Sequencer = o["Microcode_Sequencer"] o["Instruction_Fetch_BW"].Frontend_Bound = o["Frontend_Bound"] o["Instruction_Fetch_BW"].Bad_Speculation = o["Bad_Speculation"] o["Instruction_Fetch_BW"].ITLB_Misses = o["ITLB_Misses"] o["Instruction_Fetch_BW"].Mispredicts_Resteers = o["Mispredicts_Resteers"] o["Instruction_Fetch_BW"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Instruction_Fetch_BW"].LCP = o["LCP"] o["Instruction_Fetch_BW"].Other_Mispredicts = o["Other_Mispredicts"] o["Instruction_Fetch_BW"].Few_Uops_Instructions = o["Few_Uops_Instructions"] o["Instruction_Fetch_BW"].DSB_Switches = o["DSB_Switches"] o["Instruction_Fetch_BW"].Assists = o["Assists"] o["Instruction_Fetch_BW"].Branch_Resteers = o["Branch_Resteers"] o["Instruction_Fetch_BW"].Clears_Resteers = o["Clears_Resteers"] o["Instruction_Fetch_BW"].MS_Switches = o["MS_Switches"] o["Instruction_Fetch_BW"].Unknown_Branches = o["Unknown_Branches"] o["Instruction_Fetch_BW"].Fetch_Latency = o["Fetch_Latency"] o["Cache_Memory_Bandwidth"].L1_Bound = o["L1_Bound"] o["Cache_Memory_Bandwidth"].G4K_Aliasing = o["G4K_Aliasing"] o["Cache_Memory_Bandwidth"].Retiring = o["Retiring"] o["Cache_Memory_Bandwidth"].PMM_Bound = o["PMM_Bound"] o["Cache_Memory_Bandwidth"].Data_Sharing = o["Data_Sharing"] o["Cache_Memory_Bandwidth"].L2_Bound = o["L2_Bound"] o["Cache_Memory_Bandwidth"].Contested_Accesses = o["Contested_Accesses"] o["Cache_Memory_Bandwidth"].L1_Hit_Latency = o["L1_Hit_Latency"] o["Cache_Memory_Bandwidth"].MEM_Bandwidth = o["MEM_Bandwidth"] o["Cache_Memory_Bandwidth"].Backend_Bound = o["Backend_Bound"] o["Cache_Memory_Bandwidth"].L3_Hit_Latency = o["L3_Hit_Latency"] o["Cache_Memory_Bandwidth"].DTLB_Load = o["DTLB_Load"] o["Cache_Memory_Bandwidth"].Frontend_Bound = o["Frontend_Bound"] o["Cache_Memory_Bandwidth"].Memory_Bound = o["Memory_Bound"] o["Cache_Memory_Bandwidth"].SQ_Full = o["SQ_Full"] o["Cache_Memory_Bandwidth"].Store_Bound = o["Store_Bound"] o["Cache_Memory_Bandwidth"].Split_Loads = o["Split_Loads"] o["Cache_Memory_Bandwidth"].L3_Bound = o["L3_Bound"] o["Cache_Memory_Bandwidth"].FB_Full = o["FB_Full"] o["Cache_Memory_Bandwidth"].Store_Fwd_Blk = o["Store_Fwd_Blk"] o["Cache_Memory_Bandwidth"].Lock_Latency = o["Lock_Latency"] o["Cache_Memory_Bandwidth"].MEM_Latency = o["MEM_Latency"] o["Cache_Memory_Bandwidth"].DRAM_Bound = o["DRAM_Bound"] o["Cache_Memory_Latency"].L1_Bound = o["L1_Bound"] o["Cache_Memory_Latency"].DTLB_Load = o["DTLB_Load"] o["Cache_Memory_Latency"].False_Sharing = o["False_Sharing"] o["Cache_Memory_Latency"].L1_Hit_Latency = o["L1_Hit_Latency"] o["Cache_Memory_Latency"].Retiring = o["Retiring"] o["Cache_Memory_Latency"].PMM_Bound = o["PMM_Bound"] o["Cache_Memory_Latency"].Frontend_Bound = o["Frontend_Bound"] o["Cache_Memory_Latency"].Data_Sharing = o["Data_Sharing"] o["Cache_Memory_Latency"].L2_Bound = o["L2_Bound"] o["Cache_Memory_Latency"].Memory_Bound = o["Memory_Bound"] o["Cache_Memory_Latency"].SQ_Full = o["SQ_Full"] o["Cache_Memory_Latency"].Store_Bound = o["Store_Bound"] o["Cache_Memory_Latency"].Split_Loads = o["Split_Loads"] o["Cache_Memory_Latency"].L3_Bound = o["L3_Bound"] o["Cache_Memory_Latency"].FB_Full = o["FB_Full"] o["Cache_Memory_Latency"].Contested_Accesses = o["Contested_Accesses"] o["Cache_Memory_Latency"].Store_Fwd_Blk = o["Store_Fwd_Blk"] o["Cache_Memory_Latency"].DTLB_Store = o["DTLB_Store"] o["Cache_Memory_Latency"].MEM_Bandwidth = o["MEM_Bandwidth"] o["Cache_Memory_Latency"].Store_Latency = o["Store_Latency"] o["Cache_Memory_Latency"].Split_Stores = o["Split_Stores"] o["Cache_Memory_Latency"].G4K_Aliasing = o["G4K_Aliasing"] o["Cache_Memory_Latency"].Lock_Latency = o["Lock_Latency"] o["Cache_Memory_Latency"].MEM_Latency = o["MEM_Latency"] o["Cache_Memory_Latency"].Backend_Bound = o["Backend_Bound"] o["Cache_Memory_Latency"].L3_Hit_Latency = o["L3_Hit_Latency"] o["Cache_Memory_Latency"].DRAM_Bound = o["DRAM_Bound"] o["Memory_Data_TLBs"].L1_Bound = o["L1_Bound"] o["Memory_Data_TLBs"].Store_Fwd_Blk = o["Store_Fwd_Blk"] o["Memory_Data_TLBs"].L1_Hit_Latency = o["L1_Hit_Latency"] o["Memory_Data_TLBs"].DTLB_Load = o["DTLB_Load"] o["Memory_Data_TLBs"].Store_Latency = o["Store_Latency"] o["Memory_Data_TLBs"].G4K_Aliasing = o["G4K_Aliasing"] o["Memory_Data_TLBs"].Retiring = o["Retiring"] o["Memory_Data_TLBs"].Split_Stores = o["Split_Stores"] o["Memory_Data_TLBs"].PMM_Bound = o["PMM_Bound"] o["Memory_Data_TLBs"].Frontend_Bound = o["Frontend_Bound"] o["Memory_Data_TLBs"].DTLB_Store = o["DTLB_Store"] o["Memory_Data_TLBs"].L2_Bound = o["L2_Bound"] o["Memory_Data_TLBs"].Memory_Bound = o["Memory_Bound"] o["Memory_Data_TLBs"].Lock_Latency = o["Lock_Latency"] o["Memory_Data_TLBs"].Backend_Bound = o["Backend_Bound"] o["Memory_Data_TLBs"].Store_Bound = o["Store_Bound"] o["Memory_Data_TLBs"].False_Sharing = o["False_Sharing"] o["Memory_Data_TLBs"].Split_Loads = o["Split_Loads"] o["Memory_Data_TLBs"].L3_Bound = o["L3_Bound"] o["Memory_Data_TLBs"].FB_Full = o["FB_Full"] o["Memory_Data_TLBs"].DRAM_Bound = o["DRAM_Bound"] o["Memory_Synchronization"].L1_Bound = o["L1_Bound"] o["Memory_Synchronization"].Retiring = o["Retiring"] o["Memory_Synchronization"].PMM_Bound = o["PMM_Bound"] o["Memory_Synchronization"].Data_Sharing = o["Data_Sharing"] o["Memory_Synchronization"].L2_Bound = o["L2_Bound"] o["Memory_Synchronization"].Contested_Accesses = o["Contested_Accesses"] o["Memory_Synchronization"].L3_Bound = o["L3_Bound"] o["Memory_Synchronization"].Machine_Clears = o["Machine_Clears"] o["Memory_Synchronization"].Store_Latency = o["Store_Latency"] o["Memory_Synchronization"].Backend_Bound = o["Backend_Bound"] o["Memory_Synchronization"].L3_Hit_Latency = o["L3_Hit_Latency"] o["Memory_Synchronization"].False_Sharing = o["False_Sharing"] o["Memory_Synchronization"].Frontend_Bound = o["Frontend_Bound"] o["Memory_Synchronization"].Memory_Bound = o["Memory_Bound"] o["Memory_Synchronization"].SQ_Full = o["SQ_Full"] o["Memory_Synchronization"].Store_Bound = o["Store_Bound"] o["Memory_Synchronization"].Bad_Speculation = o["Bad_Speculation"] o["Memory_Synchronization"].DTLB_Store = o["DTLB_Store"] o["Memory_Synchronization"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Memory_Synchronization"].Split_Stores = o["Split_Stores"] o["Memory_Synchronization"].Other_Nukes = o["Other_Nukes"] o["Memory_Synchronization"].DRAM_Bound = o["DRAM_Bound"] o["Compute_Bound_Est"].Serializing_Operation = o["Serializing_Operation"] o["Compute_Bound_Est"].Ports_Utilization = o["Ports_Utilization"] o["Compute_Bound_Est"].Retiring = o["Retiring"] o["Compute_Bound_Est"].Frontend_Bound = o["Frontend_Bound"] o["Compute_Bound_Est"].Ports_Utilized_2 = o["Ports_Utilized_2"] o["Compute_Bound_Est"].Memory_Bound = o["Memory_Bound"] o["Compute_Bound_Est"].Ports_Utilized_1 = o["Ports_Utilized_1"] o["Compute_Bound_Est"].Core_Bound = o["Core_Bound"] o["Compute_Bound_Est"].Backend_Bound = o["Backend_Bound"] o["Compute_Bound_Est"].Ports_Utilized_3m = o["Ports_Utilized_3m"] o["Compute_Bound_Est"].Divider = o["Divider"] o["Compute_Bound_Est"].Ports_Utilized_0 = o["Ports_Utilized_0"] o["Irregular_Overhead"].Heavy_Operations = o["Heavy_Operations"] o["Irregular_Overhead"].Ports_Utilization = o["Ports_Utilization"] o["Irregular_Overhead"].Retiring = o["Retiring"] o["Irregular_Overhead"].ICache_Misses = o["ICache_Misses"] o["Irregular_Overhead"].Microcode_Sequencer = o["Microcode_Sequencer"] o["Irregular_Overhead"].Frontend_Bound = o["Frontend_Bound"] o["Irregular_Overhead"].Serializing_Operation = o["Serializing_Operation"] o["Irregular_Overhead"].Core_Bound = o["Core_Bound"] o["Irregular_Overhead"].Bad_Speculation = o["Bad_Speculation"] o["Irregular_Overhead"].ITLB_Misses = o["ITLB_Misses"] o["Irregular_Overhead"].Divider = o["Divider"] o["Irregular_Overhead"].Mispredicts_Resteers = o["Mispredicts_Resteers"] o["Irregular_Overhead"].Memory_Bound = o["Memory_Bound"] o["Irregular_Overhead"].Machine_Clears = o["Machine_Clears"] o["Irregular_Overhead"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Irregular_Overhead"].LCP = o["LCP"] o["Irregular_Overhead"].Other_Mispredicts = o["Other_Mispredicts"] o["Irregular_Overhead"].Few_Uops_Instructions = o["Few_Uops_Instructions"] o["Irregular_Overhead"].DSB_Switches = o["DSB_Switches"] o["Irregular_Overhead"].Ports_Utilized_0 = o["Ports_Utilized_0"] o["Irregular_Overhead"].Assists = o["Assists"] o["Irregular_Overhead"].Backend_Bound = o["Backend_Bound"] o["Irregular_Overhead"].Branch_Resteers = o["Branch_Resteers"] o["Irregular_Overhead"].Clears_Resteers = o["Clears_Resteers"] o["Irregular_Overhead"].MS_Switches = o["MS_Switches"] o["Irregular_Overhead"].Other_Nukes = o["Other_Nukes"] o["Irregular_Overhead"].Unknown_Branches = o["Unknown_Branches"] o["Irregular_Overhead"].Fetch_Latency = o["Fetch_Latency"] o["Other_Bottlenecks"].Retiring = o["Retiring"] o["Other_Bottlenecks"].Data_Sharing = o["Data_Sharing"] o["Other_Bottlenecks"].L2_Bound = o["L2_Bound"] o["Other_Bottlenecks"].Contested_Accesses = o["Contested_Accesses"] o["Other_Bottlenecks"].L3_Bound = o["L3_Bound"] o["Other_Bottlenecks"].Machine_Clears = o["Machine_Clears"] o["Other_Bottlenecks"].MEM_Bandwidth = o["MEM_Bandwidth"] o["Other_Bottlenecks"].Store_Latency = o["Store_Latency"] o["Other_Bottlenecks"].Other_Mispredicts = o["Other_Mispredicts"] o["Other_Bottlenecks"].DSB_Switches = o["DSB_Switches"] o["Other_Bottlenecks"].Ports_Utilized_0 = o["Ports_Utilized_0"] o["Other_Bottlenecks"].Ports_Utilized_1 = o["Ports_Utilized_1"] o["Other_Bottlenecks"].Ports_Utilized_2 = o["Ports_Utilized_2"] o["Other_Bottlenecks"].DTLB_Load = o["DTLB_Load"] o["Other_Bottlenecks"].ICache_Misses = o["ICache_Misses"] o["Other_Bottlenecks"].Memory_Bound = o["Memory_Bound"] o["Other_Bottlenecks"].SQ_Full = o["SQ_Full"] o["Other_Bottlenecks"].Store_Bound = o["Store_Bound"] o["Other_Bottlenecks"].Bad_Speculation = o["Bad_Speculation"] o["Other_Bottlenecks"].FB_Full = o["FB_Full"] o["Other_Bottlenecks"].Mispredicts_Resteers = o["Mispredicts_Resteers"] o["Other_Bottlenecks"].Store_Fwd_Blk = o["Store_Fwd_Blk"] o["Other_Bottlenecks"].Split_Stores = o["Split_Stores"] o["Other_Bottlenecks"].Few_Uops_Instructions = o["Few_Uops_Instructions"] o["Other_Bottlenecks"].Other_Nukes = o["Other_Nukes"] o["Other_Bottlenecks"].Unknown_Branches = o["Unknown_Branches"] o["Other_Bottlenecks"].DRAM_Bound = o["DRAM_Bound"] o["Other_Bottlenecks"].L1_Bound = o["L1_Bound"] o["Other_Bottlenecks"].G4K_Aliasing = o["G4K_Aliasing"] o["Other_Bottlenecks"].PMM_Bound = o["PMM_Bound"] o["Other_Bottlenecks"].Core_Bound = o["Core_Bound"] o["Other_Bottlenecks"].Divider = o["Divider"] o["Other_Bottlenecks"].L1_Hit_Latency = o["L1_Hit_Latency"] o["Other_Bottlenecks"].Assists = o["Assists"] o["Other_Bottlenecks"].Backend_Bound = o["Backend_Bound"] o["Other_Bottlenecks"].Branch_Resteers = o["Branch_Resteers"] o["Other_Bottlenecks"].L3_Hit_Latency = o["L3_Hit_Latency"] o["Other_Bottlenecks"].Microcode_Sequencer = o["Microcode_Sequencer"] o["Other_Bottlenecks"].Fetch_Latency = o["Fetch_Latency"] o["Other_Bottlenecks"].Ports_Utilization = o["Ports_Utilization"] o["Other_Bottlenecks"].False_Sharing = o["False_Sharing"] o["Other_Bottlenecks"].Heavy_Operations = o["Heavy_Operations"] o["Other_Bottlenecks"].Frontend_Bound = o["Frontend_Bound"] o["Other_Bottlenecks"].Serializing_Operation = o["Serializing_Operation"] o["Other_Bottlenecks"].MEM_Latency = o["MEM_Latency"] o["Other_Bottlenecks"].Split_Loads = o["Split_Loads"] o["Other_Bottlenecks"].ITLB_Misses = o["ITLB_Misses"] o["Other_Bottlenecks"].DTLB_Store = o["DTLB_Store"] o["Other_Bottlenecks"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Other_Bottlenecks"].LCP = o["LCP"] o["Other_Bottlenecks"].Lock_Latency = o["Lock_Latency"] o["Other_Bottlenecks"].Clears_Resteers = o["Clears_Resteers"] o["Other_Bottlenecks"].MS_Switches = o["MS_Switches"] o["Other_Bottlenecks"].Ports_Utilized_3m = o["Ports_Utilized_3m"] o["Useful_Work"].Retiring = o["Retiring"] o["Useful_Work"].Heavy_Operations = o["Heavy_Operations"] o["Useful_Work"].Microcode_Sequencer = o["Microcode_Sequencer"] o["Useful_Work"].Few_Uops_Instructions = o["Few_Uops_Instructions"] o["Useful_Work"].Assists = o["Assists"] o["Core_Bound_Likely"].Ports_Utilized_0 = o["Ports_Utilized_0"] o["Core_Bound_Likely"].Ports_Utilization = o["Ports_Utilization"] o["Core_Bound_Likely"].Retiring = o["Retiring"] o["Core_Bound_Likely"].Frontend_Bound = o["Frontend_Bound"] o["Core_Bound_Likely"].Memory_Bound = o["Memory_Bound"] o["Core_Bound_Likely"].Core_Bound = o["Core_Bound"] o["Core_Bound_Likely"].Backend_Bound = o["Backend_Bound"] o["DSB_Misses"].MITE = o["MITE"] o["DSB_Misses"].LCP = o["LCP"] o["DSB_Misses"].Fetch_Bandwidth = o["Fetch_Bandwidth"] o["DSB_Misses"].Frontend_Bound = o["Frontend_Bound"] o["DSB_Misses"].DSB_Switches = o["DSB_Switches"] o["DSB_Misses"].Branch_Resteers = o["Branch_Resteers"] o["DSB_Misses"].ICache_Misses = o["ICache_Misses"] o["DSB_Misses"].MS_Switches = o["MS_Switches"] o["DSB_Misses"].ITLB_Misses = o["ITLB_Misses"] o["DSB_Misses"].DSB = o["DSB"] o["DSB_Misses"].Unknown_Branches = o["Unknown_Branches"] o["DSB_Misses"].Fetch_Latency = o["Fetch_Latency"] o["DSB_Bandwidth"].Fetch_Bandwidth = o["Fetch_Bandwidth"] o["DSB_Bandwidth"].Frontend_Bound = o["Frontend_Bound"] o["DSB_Bandwidth"].DSB = o["DSB"] o["DSB_Bandwidth"].MITE = o["MITE"] o["DSB_Bandwidth"].Fetch_Latency = o["Fetch_Latency"] o["IC_Misses"].Fetch_Latency = o["Fetch_Latency"] o["IC_Misses"].LCP = o["LCP"] o["IC_Misses"].MS_Switches = o["MS_Switches"] o["IC_Misses"].ICache_Misses = o["ICache_Misses"] o["IC_Misses"].ITLB_Misses = o["ITLB_Misses"] o["IC_Misses"].Unknown_Branches = o["Unknown_Branches"] o["IC_Misses"].DSB_Switches = o["DSB_Switches"] o["IC_Misses"].Branch_Resteers = o["Branch_Resteers"] o["Branch_Misprediction_Cost"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Branch_Misprediction_Cost"].LCP = o["LCP"] o["Branch_Misprediction_Cost"].Other_Mispredicts = o["Other_Mispredicts"] o["Branch_Misprediction_Cost"].Microcode_Sequencer = o["Microcode_Sequencer"] o["Branch_Misprediction_Cost"].DSB_Switches = o["DSB_Switches"] o["Branch_Misprediction_Cost"].Branch_Resteers = o["Branch_Resteers"] o["Branch_Misprediction_Cost"].ICache_Misses = o["ICache_Misses"] o["Branch_Misprediction_Cost"].MS_Switches = o["MS_Switches"] o["Branch_Misprediction_Cost"].Bad_Speculation = o["Bad_Speculation"] o["Branch_Misprediction_Cost"].ITLB_Misses = o["ITLB_Misses"] o["Branch_Misprediction_Cost"].Unknown_Branches = o["Unknown_Branches"] o["Branch_Misprediction_Cost"].Fetch_Latency = o["Fetch_Latency"] o["Branch_Misprediction_Cost"].Mispredicts_Resteers = o["Mispredicts_Resteers"] # siblings cross-tree o["Mispredicts_Resteers"].sibling = (o["Branch_Mispredicts"],) o["Clears_Resteers"].sibling = (o["MS_Switches"], o["Machine_Clears"], o["L1_Bound"], o["Microcode_Sequencer"],) o["MS_Switches"].sibling = (o["Clears_Resteers"], o["Machine_Clears"], o["L1_Bound"], o["Serializing_Operation"], o["Mixing_Vectors"], o["Microcode_Sequencer"],) o["LCP"].sibling = (o["DSB_Switches"], o["Fetch_Bandwidth"],) o["DSB_Switches"].sibling = (o["LCP"], o["Fetch_Bandwidth"],) o["Fetch_Bandwidth"].sibling = (o["LCP"], o["DSB_Switches"],) o["Decoder0_Alone"].sibling = (o["Few_Uops_Instructions"],) o["Branch_Mispredicts"].sibling = (o["Mispredicts_Resteers"],) o["Machine_Clears"].sibling = (o["Clears_Resteers"], o["MS_Switches"], o["L1_Bound"], o["Contested_Accesses"], o["Data_Sharing"], o["Remote_Cache"], o["False_Sharing"], o["Microcode_Sequencer"],) o["L1_Bound"].sibling = (o["Clears_Resteers"], o["MS_Switches"], o["Machine_Clears"], o["Ports_Utilized_1"], o["Microcode_Sequencer"],) o["DTLB_Load"].sibling = (o["DTLB_Store"],) o["Lock_Latency"].sibling = (o["Store_Latency"],) o["FB_Full"].sibling = (o["SQ_Full"], o["MEM_Bandwidth"], o["Store_Latency"],) o["Contested_Accesses"].sibling = (o["Machine_Clears"], o["Data_Sharing"], o["Remote_Cache"], o["False_Sharing"],) o["Data_Sharing"].sibling = (o["Machine_Clears"], o["Contested_Accesses"], o["Remote_Cache"], o["False_Sharing"],) o["L3_Hit_Latency"].sibling = (o["MEM_Latency"],) o["L3_Hit_Latency"].overlap = True o["SQ_Full"].sibling = (o["FB_Full"], o["MEM_Bandwidth"],) o["MEM_Bandwidth"].sibling = (o["FB_Full"], o["SQ_Full"],) o["MEM_Latency"].sibling = (o["L3_Hit_Latency"],) o["Remote_Cache"].sibling = (o["Machine_Clears"], o["Contested_Accesses"], o["Data_Sharing"], o["False_Sharing"],) o["Store_Latency"].sibling = (o["Lock_Latency"], o["FB_Full"],) o["Store_Latency"].overlap = True o["False_Sharing"].sibling = (o["Machine_Clears"], o["Contested_Accesses"], o["Data_Sharing"], o["Remote_Cache"],) o["Split_Stores"].sibling = (o["Port_4"],) o["DTLB_Store"].sibling = (o["DTLB_Load"],) o["Serializing_Operation"].sibling = (o["MS_Switches"],) o["Mixing_Vectors"].sibling = (o["MS_Switches"],) o["Ports_Utilized_1"].sibling = (o["L1_Bound"],) o["Ports_Utilized_2"].sibling = (o["Port_0"], o["Port_1"], o["Port_5"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["FP_Vector_512b"],) o["Port_0"].sibling = (o["Ports_Utilized_2"], o["Port_1"], o["Port_5"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["FP_Vector_512b"],) o["Port_1"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_5"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["FP_Vector_512b"],) o["Port_5"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["FP_Vector_512b"],) o["Port_6"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["FP_Vector_512b"],) o["Port_4"].sibling = (o["Split_Stores"],) o["FP_Scalar"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"], o["Port_6"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["FP_Vector_512b"],) o["FP_Vector"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"], o["Port_6"], o["FP_Scalar"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["FP_Vector_512b"],) o["FP_Vector_128b"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_256b"], o["FP_Vector_512b"],) o["FP_Vector_256b"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_512b"],) o["FP_Vector_512b"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"],) o["Few_Uops_Instructions"].sibling = (o["Decoder0_Alone"],) o["Microcode_Sequencer"].sibling = (o["Clears_Resteers"], o["MS_Switches"], o["Machine_Clears"], o["L1_Bound"],) o["Mispredictions"].sibling = (o["Mispredicts_Resteers"], o["Branch_Mispredicts"],) o["Cache_Memory_Bandwidth"].sibling = (o["FB_Full"], o["SQ_Full"], o["MEM_Bandwidth"],) o["Cache_Memory_Latency"].sibling = (o["L3_Hit_Latency"], o["MEM_Latency"],) o["Memory_Data_TLBs"].sibling = (o["DTLB_Load"], o["DTLB_Store"],) o["Memory_Synchronization"].sibling = (o["DTLB_Load"], o["DTLB_Store"],) o["Irregular_Overhead"].sibling = (o["MS_Switches"], o["Microcode_Sequencer"],) o["IpTB"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],) o["DSB_Coverage"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],) o["DSB_Misses"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],) o["DSB_Bandwidth"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],) o["Branch_Misprediction_Cost"].sibling = (o["Mispredicts_Resteers"], o["Branch_Mispredicts"],) o["DRAM_BW_Use"].sibling = (o["FB_Full"], o["SQ_Full"], o["MEM_Bandwidth"],)
249,635
Python
.py
5,708
37.797828
1,906
0.656699
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,917
icx_server_ratios.py
andikleen_pmu-tools/icx_server_ratios.py
# -*- coding: latin-1 -*- # # auto generated TopDown/TMA 4.8-full-perf description for Intel Xeon Scalable Processors 3rd gen (code name Icelake Server) # Please see http://ark.intel.com for more details on these CPUs. # # References: # http://bit.ly/tma-ispass14 # http://halobates.de/blog/p/262 # https://sites.google.com/site/analysismethods/yasin-pubs # https://download.01.org/perfmon/ # https://github.com/andikleen/pmu-tools/wiki/toplev-manual # # Helpers print_error = lambda msg: False smt_enabled = False ebs_mode = False version = "4.8-full-perf" base_frequency = -1.0 Memory = 0 Average_Frequency = 0.0 num_cores = 1 num_threads = 1 num_sockets = 1 topdown_use_fixed = False def handle_error(obj, msg): print_error(msg) obj.errcount += 1 obj.val = 0 obj.thresh = False def handle_error_metric(obj, msg): print_error(msg) obj.errcount += 1 obj.val = 0 # Constants Exe_Ports = 10 Mem_L2_Store_Cost = 10 Mem_STLB_Hit_Cost = 7 BAClear_Cost = 10 MS_Switches_Cost = 3 Avg_Assist_Cost = 34 Pipeline_Width = 5 OneMillion = 1000000 OneBillion = 1000000000 Energy_Unit = 61 Memory = 1 PMM_App_Direct = 1 if Memory == 1 else 0 PERF_METRICS_MSR = 1 DS = 1 # Aux. formulas def Backend_Bound_Cycles(self, EV, level): return EV("CYCLE_ACTIVITY.STALLS_TOTAL", level) + Few_Uops_Executed_Threshold(self, EV, level) + EV("EXE_ACTIVITY.BOUND_ON_STORES", level) def Br_DoI_Jumps(self, EV, level): return EV("BR_INST_RETIRED.NEAR_TAKEN", level) - EV("BR_INST_RETIRED.COND_TAKEN", level) - 2 * EV("BR_INST_RETIRED.NEAR_CALL", level) def Branching_Retired(self, EV, level): return (EV("BR_INST_RETIRED.ALL_BRANCHES", level) + 2 * EV("BR_INST_RETIRED.NEAR_CALL", level) + EV("INST_RETIRED.NOP", level)) / SLOTS(self, EV, level) def Serialize_Core(self, EV, level): return self.Core_Bound.compute(EV) * (self.Serializing_Operation.compute(EV) + self.Core_Bound.compute(EV) * EV("RS_EVENTS.EMPTY_CYCLES", level) / CLKS(self, EV, level) * self.Ports_Utilized_0.compute(EV)) / (self.Serializing_Operation.compute(EV) + self.Ports_Utilization.compute(EV) + self.Divider.compute(EV)) def Umisp(self, EV, level): return 10 * self.Microcode_Sequencer.compute(EV) * self.Other_Mispredicts.compute(EV) / self.Branch_Mispredicts.compute(EV) def Assist(self, EV, level): return (self.Microcode_Sequencer.compute(EV) / (self.Microcode_Sequencer.compute(EV) + self.Few_Uops_Instructions.compute(EV))) * (self.Assists.compute(EV) / self.Microcode_Sequencer.compute(EV)) def Assist_Frontend(self, EV, level): return Assist(self, EV, level) * self.Fetch_Latency.compute(EV) * (self.MS_Switches.compute(EV) + self.Branch_Resteers.compute(EV) * (self.Clears_Resteers.compute(EV) + self.Mispredicts_Resteers.compute(EV) * Umisp(self, EV, level)) / (self.Clears_Resteers.compute(EV) + self.Unknown_Branches.compute(EV) + self.Mispredicts_Resteers.compute(EV))) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV)) def Assist_Retired(self, EV, level): return Assist(self, EV, level) * self.Heavy_Operations.compute(EV) def Core_Bound_Cycles(self, EV, level): return self.Ports_Utilized_0.compute(EV) * CLKS(self, EV, level) + Few_Uops_Executed_Threshold(self, EV, level) def DurationTimeInSeconds(self, EV, level): return EV("interval-ms", 0) / 1000 def Execute_Cycles(self, EV, level): return (EV("UOPS_EXECUTED.CORE_CYCLES_GE_1", level) / 2) if smt_enabled else EV("UOPS_EXECUTED.THREAD:c1", level) # factor used for metrics associating fixed costs for FB Hits - according to probability theory if all FB Hits come at a random rate in original L1_Miss cost interval then the average cost for each one is 0.5 of the fixed cost def FB_Factor(self, EV, level): return 1 + FBHit_per_L1Miss(self, EV, level) / 2 def FBHit_per_L1Miss(self, EV, level): return EV("MEM_LOAD_RETIRED.FB_HIT", level) / EV("MEM_LOAD_RETIRED.L1_MISS", level) def Fetched_Uops(self, EV, level): return EV("UOPS_ISSUED.ANY", level) def Few_Uops_Executed_Threshold(self, EV, level): return EV("EXE_ACTIVITY.1_PORTS_UTIL", level) + self.Retiring.compute(EV) * EV("EXE_ACTIVITY.2_PORTS_UTIL", level) # Floating Point computational (arithmetic) Operations Count def FLOP_Count(self, EV, level): return EV("FP_ARITH_INST_RETIRED.SCALAR", level) + 2 * EV("FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", level) + 4 * EV("FP_ARITH_INST_RETIRED.4_FLOPS", level) + 8 * EV("FP_ARITH_INST_RETIRED.8_FLOPS", level) + 16 * EV("FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE", level) # Floating Point computational (arithmetic) Operations Count def FP_Arith_Scalar(self, EV, level): return EV("FP_ARITH_INST_RETIRED.SCALAR", level) # Floating Point computational (arithmetic) Operations Count def FP_Arith_Vector(self, EV, level): return EV("FP_ARITH_INST_RETIRED.VECTOR", level) def HighIPC(self, EV, level): val = IPC(self, EV, level) / Pipeline_Width return val def L2_Bound_Ratio(self, EV, level): return (EV("CYCLE_ACTIVITY.STALLS_L1D_MISS", level) - EV("CYCLE_ACTIVITY.STALLS_L2_MISS", level)) / CLKS(self, EV, level) def Light_Ops_Sum(self, EV, level): return self.FP_Arith.compute(EV) + self.Memory_Operations.compute(EV) + self.Branch_Instructions.compute(EV) def LOAD_L2_HIT(self, EV, level): return EV("MEM_LOAD_RETIRED.L2_HIT", level) * (1 + FBHit_per_L1Miss(self, EV, level)) def LOAD_L3_HIT(self, EV, level): return EV("MEM_LOAD_RETIRED.L3_HIT", level) * FB_Factor(self, EV, level) def LOAD_LCL_MEM(self, EV, level): return EV("MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM", level) * (1 + FBHit_per_L1Miss(self, EV, level)) def LOAD_LCL_PMM(self, EV, level): EV("MEM_LOAD_RETIRED.LOCAL_PMM", level) return EV("MEM_LOAD_RETIRED.LOCAL_PMM", level) * (1 + FBHit_per_L1Miss(self, EV, level)) if DS else 0 def LOAD_RMT_FWD(self, EV, level): EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD", level) return EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD", level) * (1 + FBHit_per_L1Miss(self, EV, level)) if DS else 0 def LOAD_RMT_HITM(self, EV, level): EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM", level) return EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM", level) * (1 + FBHit_per_L1Miss(self, EV, level)) if DS else 0 def LOAD_RMT_MEM(self, EV, level): EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM", level) return EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM", level) * (1 + FBHit_per_L1Miss(self, EV, level)) if DS else 0 def LOAD_RMT_PMM(self, EV, level): EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM", level) return EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM", level) * (1 + FBHit_per_L1Miss(self, EV, level)) if DS else 0 def LOAD_XSNP_HIT(self, EV, level): return EV("MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT", level) def LOAD_XSNP_HITM(self, EV, level): return EV("MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM", level) def LOAD_XSNP_MISS(self, EV, level): return EV("MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS", level) def MEM_Bound_Ratio(self, EV, level): return EV("CYCLE_ACTIVITY.STALLS_L3_MISS", level) / CLKS(self, EV, level) + L2_Bound_Ratio(self, EV, level) - self.L2_Bound.compute(EV) def Mem_DDR_Hit_Fraction(self, EV, level): return (19 * LOAD_RMT_MEM(self, EV, level) + 10 *(LOAD_LCL_MEM(self, EV, level) + LOAD_RMT_FWD(self, EV, level) + LOAD_RMT_HITM(self, EV, level))) / ((19 * LOAD_RMT_MEM(self, EV, level) + 10 *(LOAD_LCL_MEM(self, EV, level) + LOAD_RMT_FWD(self, EV, level) + LOAD_RMT_HITM(self, EV, level))) + (25 * LOAD_LCL_PMM(self, EV, level) + 33 * LOAD_RMT_PMM(self, EV, level))) if DS else 1 def Mem_Lock_St_Fraction(self, EV, level): return EV("MEM_INST_RETIRED.LOCK_LOADS", level) / EV("MEM_INST_RETIRED.ALL_STORES", level) def Memory_Bound_Fraction(self, EV, level): return (EV("CYCLE_ACTIVITY.STALLS_MEM_ANY", level) + EV("EXE_ACTIVITY.BOUND_ON_STORES", level)) / Backend_Bound_Cycles(self, EV, level) def Mispred_Clears_Fraction(self, EV, level): return EV("BR_MISP_RETIRED.ALL_BRANCHES", level) / (EV("BR_MISP_RETIRED.ALL_BRANCHES", level) + EV("MACHINE_CLEARS.COUNT", level)) def ORO_Demand_RFO_C1(self, EV, level): return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO", level)) , level ) def ORO_DRD_Any_Cycles(self, EV, level): return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", level)) , level ) def ORO_DRD_BW_Cycles(self, EV, level): return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD:c4", level)) , level ) def Store_L2_Hit_Cycles(self, EV, level): return EV("L2_RQSTS.RFO_HIT", level) * Mem_L2_Store_Cost *(1 - Mem_Lock_St_Fraction(self, EV, level)) def Mem_XSNP_HitM_Cost(self, EV, level): return 48 * Core_Frequency(self, EV, level) def Mem_XSNP_Hit_Cost(self, EV, level): return 47.5 * Core_Frequency(self, EV, level) def Mem_XSNP_None_Cost(self, EV, level): return 23 * Core_Frequency(self, EV, level) def Mem_Local_DRAM_Cost(self, EV, level): return 66.5 * Core_Frequency(self, EV, level) def Mem_Remote_DRAM_Cost(self, EV, level): return 131 * Core_Frequency(self, EV, level) def Mem_Remote_HitM_Cost(self, EV, level): return 120 * Core_Frequency(self, EV, level) def Mem_Remote_Fwd_Cost(self, EV, level): return 120 * Core_Frequency(self, EV, level) def Mem_L2_Hit_Cost(self, EV, level): return 4 * Core_Frequency(self, EV, level) def PERF_METRICS_SUM(self, EV, level): return (EV("PERF_METRICS.FRONTEND_BOUND", level) / EV("TOPDOWN.SLOTS", level)) + (EV("PERF_METRICS.BAD_SPECULATION", level) / EV("TOPDOWN.SLOTS", level)) + (EV("PERF_METRICS.RETIRING", level) / EV("TOPDOWN.SLOTS", level)) + (EV("PERF_METRICS.BACKEND_BOUND", level) / EV("TOPDOWN.SLOTS", level)) def Retire_Fraction(self, EV, level): return EV("UOPS_RETIRED.SLOTS", level) / EV("UOPS_ISSUED.ANY", level) # Retired slots per Logical Processor def Retired_Slots(self, EV, level): return self.Retiring.compute(EV) * SLOTS(self, EV, level) # Number of logical processors (enabled or online) on the target system def Num_CPUs(self, EV, level): return num_cores * num_sockets * num_threads if num_cores else 160 /(2 - smt_enabled ) # A system parameter for dependent-loads (pointer chasing like access pattern) of the workload. An integer fraction in range from 0 (no dependent loads) to 100 (all loads are dependent loads) def Dependent_Loads_Weight(self, EV, level): return 20 # Total pipeline cost of Branch Misprediction related bottlenecks def Mispredictions(self, EV, level): val = 100 *(1 - Umisp(self, EV, level)) * (self.Branch_Mispredicts.compute(EV) + self.Fetch_Latency.compute(EV) * self.Mispredicts_Resteers.compute(EV) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV))) self.thresh = (val > 20) return val # Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses) def Big_Code(self, EV, level): val = 100 * self.Fetch_Latency.compute(EV) * (self.ITLB_Misses.compute(EV) + self.ICache_Misses.compute(EV) + self.Unknown_Branches.compute(EV)) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV)) self.thresh = (val > 20) return val # Total pipeline cost of instruction fetch bandwidth related bottlenecks (when the front-end could not sustain operations delivery to the back-end) def Instruction_Fetch_BW(self, EV, level): val = 100 *(self.Frontend_Bound.compute(EV) - (1 - Umisp(self, EV, level)) * self.Fetch_Latency.compute(EV) * self.Mispredicts_Resteers.compute(EV) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV)) - Assist_Frontend(self, EV, level)) - Big_Code(self, EV, level) self.thresh = (val > 20) return val # Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks def Cache_Memory_Bandwidth(self, EV, level): val = 100 *((self.Memory_Bound.compute(EV) * (self.DRAM_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV))) * (self.MEM_Bandwidth.compute(EV) / (self.MEM_Latency.compute(EV) + self.MEM_Bandwidth.compute(EV)))) + (self.Memory_Bound.compute(EV) * (self.L3_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV))) * (self.SQ_Full.compute(EV) / (self.L3_Hit_Latency.compute(EV) + self.Contested_Accesses.compute(EV) + self.SQ_Full.compute(EV) + self.Data_Sharing.compute(EV)))) + (self.Memory_Bound.compute(EV) * (self.L1_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV))) * (self.FB_Full.compute(EV) / (self.Store_Fwd_Blk.compute(EV) + self.L1_Hit_Latency.compute(EV) + self.DTLB_Load.compute(EV) + self.G4K_Aliasing.compute(EV) + self.Lock_Latency.compute(EV) + self.Split_Loads.compute(EV) + self.FB_Full.compute(EV))))) self.thresh = (val > 20) return val # Total pipeline cost of external Memory- or Cache-Latency related bottlenecks def Cache_Memory_Latency(self, EV, level): val = 100 *((self.Memory_Bound.compute(EV) * (self.DRAM_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV))) * (self.MEM_Latency.compute(EV) / (self.MEM_Latency.compute(EV) + self.MEM_Bandwidth.compute(EV)))) + (self.Memory_Bound.compute(EV) * (self.L3_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV))) * (self.L3_Hit_Latency.compute(EV) / (self.L3_Hit_Latency.compute(EV) + self.Contested_Accesses.compute(EV) + self.SQ_Full.compute(EV) + self.Data_Sharing.compute(EV)))) + (self.Memory_Bound.compute(EV) * self.L2_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV))) + (self.Memory_Bound.compute(EV) * (self.Store_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV))) * (self.Store_Latency.compute(EV) / (self.Split_Stores.compute(EV) + self.DTLB_Store.compute(EV) + self.Streaming_Stores.compute(EV) + self.Store_Latency.compute(EV) + self.False_Sharing.compute(EV)))) + (self.Memory_Bound.compute(EV) * (self.L1_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV))) * (self.L1_Hit_Latency.compute(EV) / (self.Store_Fwd_Blk.compute(EV) + self.L1_Hit_Latency.compute(EV) + self.DTLB_Load.compute(EV) + self.G4K_Aliasing.compute(EV) + self.Lock_Latency.compute(EV) + self.Split_Loads.compute(EV) + self.FB_Full.compute(EV))))) self.thresh = (val > 20) return val # Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs) def Memory_Data_TLBs(self, EV, level): val = 100 *(self.Memory_Bound.compute(EV) * (self.L1_Bound.compute(EV) / max(self.Memory_Bound.compute(EV) , (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV)))) * (self.DTLB_Load.compute(EV) / max(self.L1_Bound.compute(EV) , (self.Store_Fwd_Blk.compute(EV) + self.L1_Hit_Latency.compute(EV) + self.DTLB_Load.compute(EV) + self.G4K_Aliasing.compute(EV) + self.Lock_Latency.compute(EV) + self.Split_Loads.compute(EV) + self.FB_Full.compute(EV)))) + (self.Memory_Bound.compute(EV) * (self.Store_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV))) * (self.DTLB_Store.compute(EV) / (self.Split_Stores.compute(EV) + self.DTLB_Store.compute(EV) + self.Streaming_Stores.compute(EV) + self.Store_Latency.compute(EV) + self.False_Sharing.compute(EV))))) self.thresh = (val > 20) return val # Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors) def Memory_Synchronization(self, EV, level): val = 100 *(self.Memory_Bound.compute(EV) * ((self.L3_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV))) * (self.Contested_Accesses.compute(EV) + self.Data_Sharing.compute(EV)) / (self.L3_Hit_Latency.compute(EV) + self.Contested_Accesses.compute(EV) + self.SQ_Full.compute(EV) + self.Data_Sharing.compute(EV)) + (self.Store_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV))) * self.False_Sharing.compute(EV) / ((self.Split_Stores.compute(EV) + self.DTLB_Store.compute(EV) + self.Streaming_Stores.compute(EV) + self.Store_Latency.compute(EV) + self.False_Sharing.compute(EV)) - self.Store_Latency.compute(EV))) + self.Machine_Clears.compute(EV) * (1 - self.Other_Nukes.compute(EV) / (self.Other_Nukes.compute(EV)))) self.thresh = (val > 10) return val # Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy. def Compute_Bound_Est(self, EV, level): val = 100 *((self.Core_Bound.compute(EV) * self.Divider.compute(EV) / (self.Serializing_Operation.compute(EV) + self.Ports_Utilization.compute(EV) + self.Divider.compute(EV))) + (self.Core_Bound.compute(EV) * (self.Ports_Utilization.compute(EV) / (self.Serializing_Operation.compute(EV) + self.Ports_Utilization.compute(EV) + self.Divider.compute(EV))) * (self.Ports_Utilized_3m.compute(EV) / (self.Ports_Utilized_0.compute(EV) + self.Ports_Utilized_1.compute(EV) + self.Ports_Utilized_2.compute(EV) + self.Ports_Utilized_3m.compute(EV))))) self.thresh = (val > 20) return val # Total pipeline cost of irregular execution (e.g. FP-assists in HPC, Wait time with work imbalance multithreaded workloads, overhead in system services or virtualized environments) def Irregular_Overhead(self, EV, level): val = 100 *(Assist_Frontend(self, EV, level) + Umisp(self, EV, level) * self.Branch_Mispredicts.compute(EV) + (self.Machine_Clears.compute(EV) * self.Other_Nukes.compute(EV) / (self.Other_Nukes.compute(EV))) + Serialize_Core(self, EV, level) + Assist_Retired(self, EV, level)) self.thresh = (val > 10) return val # Total pipeline cost of remaining bottlenecks in the back-end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls. def Other_Bottlenecks(self, EV, level): val = 100 -(Big_Code(self, EV, level) + Instruction_Fetch_BW(self, EV, level) + Mispredictions(self, EV, level) + Cache_Memory_Bandwidth(self, EV, level) + Cache_Memory_Latency(self, EV, level) + Memory_Data_TLBs(self, EV, level) + Memory_Synchronization(self, EV, level) + Compute_Bound_Est(self, EV, level) + Irregular_Overhead(self, EV, level) + Branching_Overhead(self, EV, level) + Useful_Work(self, EV, level)) self.thresh = (val > 20) return val # Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA. Examples include function calls; loops and alignments. (A lower bound). Consider Loop Unrolling or function inlining optimizations def Branching_Overhead(self, EV, level): val = 100 * Branching_Retired(self, EV, level) self.thresh = (val > 5) return val # Total pipeline cost of "useful operations" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead. def Useful_Work(self, EV, level): val = 100 *(self.Retiring.compute(EV) - Branching_Retired(self, EV, level) - Assist_Retired(self, EV, level)) self.thresh = (val > 20) return val # Probability of Core Bound bottleneck hidden by SMT-profiling artifacts. Tip: consider analysis with SMT disabled def Core_Bound_Likely(self, EV, level): val = 100 *(1 - self.Core_Bound.compute(EV) / self.Ports_Utilization.compute(EV) if self.Core_Bound.compute(EV)< self.Ports_Utilization.compute(EV) else 1) if SMT_2T_Utilization(self, EV, level)> 0.5 else 0 self.thresh = (val > 0.5) return val # Instructions Per Cycle (per Logical Processor) def IPC(self, EV, level): return EV("INST_RETIRED.ANY", level) / CLKS(self, EV, level) # Uops Per Instruction def UopPI(self, EV, level): val = Retired_Slots(self, EV, level) / EV("INST_RETIRED.ANY", level) self.thresh = (val > 1.05) return val # Uops per taken branch def UpTB(self, EV, level): val = Retired_Slots(self, EV, level) / EV("BR_INST_RETIRED.NEAR_TAKEN", level) self.thresh = val < Pipeline_Width * 1.5 return val # Cycles Per Instruction (per Logical Processor) def CPI(self, EV, level): return 1 / IPC(self, EV, level) # Per-Logical Processor actual clocks when the Logical Processor is active. def CLKS(self, EV, level): return EV("CPU_CLK_UNHALTED.THREAD", level) # Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward) def SLOTS(self, EV, level): return EV("TOPDOWN.SLOTS", level) if topdown_use_fixed else EV("TOPDOWN.SLOTS", level) # Fraction of Physical Core issue-slots utilized by this Logical Processor def Slots_Utilization(self, EV, level): return SLOTS(self, EV, level) / (EV("TOPDOWN.SLOTS:percore", level) / 2) if smt_enabled else 1 # The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of "execute" at rename stage. def Execute_per_Issue(self, EV, level): return EV("UOPS_EXECUTED.THREAD", level) / EV("UOPS_ISSUED.ANY", level) # Instructions Per Cycle across hyper-threads (per physical core) def CoreIPC(self, EV, level): return EV("INST_RETIRED.ANY", level) / CORE_CLKS(self, EV, level) # Floating Point Operations Per Cycle def FLOPc(self, EV, level): return FLOP_Count(self, EV, level) / CORE_CLKS(self, EV, level) # Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to Fused-Multiply Add FMA counting - common. def FP_Arith_Utilization(self, EV, level): return (FP_Arith_Scalar(self, EV, level) + FP_Arith_Vector(self, EV, level)) / (2 * CORE_CLKS(self, EV, level)) # Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor) def ILP(self, EV, level): return EV("UOPS_EXECUTED.THREAD", level) / EV("UOPS_EXECUTED.THREAD:c1", level) # uops Executed per Cycle def EPC(self, EV, level): return EV("UOPS_EXECUTED.THREAD", level) / CLKS(self, EV, level) # Core actual clocks when any Logical Processor is active on the Physical Core def CORE_CLKS(self, EV, level): return EV("CPU_CLK_UNHALTED.DISTRIBUTED", level) if smt_enabled else CLKS(self, EV, level) # Instructions per Load (lower number means higher occurrence rate). Tip: reduce memory accesses. #Link Opt Guide section: Minimize Register Spills def IpLoad(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("MEM_INST_RETIRED.ALL_LOADS", level) self.thresh = (val < 3) return val # Instructions per Store (lower number means higher occurrence rate). Tip: reduce memory accesses. #Link Opt Guide section: Minimize Register Spills def IpStore(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("MEM_INST_RETIRED.ALL_STORES", level) self.thresh = (val < 8) return val # Instructions per Branch (lower number means higher occurrence rate) def IpBranch(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.ALL_BRANCHES", level) self.thresh = (val < 8) return val # Instructions per (near) call (lower number means higher occurrence rate) def IpCall(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.NEAR_CALL", level) self.thresh = (val < 200) return val # Instructions per taken branch def IpTB(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.NEAR_TAKEN", level) self.thresh = val < Pipeline_Width * 2 + 1 return val # Branch instructions per taken branch. . Can be used to approximate PGO-likelihood for non-loopy codes. def BpTkBranch(self, EV, level): return EV("BR_INST_RETIRED.ALL_BRANCHES", level) / EV("BR_INST_RETIRED.NEAR_TAKEN", level) # Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate). Reference: Tuning Performance via Metrics with Expectations. https://doi.org/10.1109/LCA.2019.2916408 def IpFLOP(self, EV, level): val = EV("INST_RETIRED.ANY", level) / FLOP_Count(self, EV, level) self.thresh = (val < 10) return val # Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW. def IpArith(self, EV, level): val = EV("INST_RETIRED.ANY", level) / (FP_Arith_Scalar(self, EV, level) + FP_Arith_Vector(self, EV, level)) self.thresh = (val < 10) return val # Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. def IpArith_Scalar_SP(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("FP_ARITH_INST_RETIRED.SCALAR_SINGLE", level) self.thresh = (val < 10) return val # Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. def IpArith_Scalar_DP(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("FP_ARITH_INST_RETIRED.SCALAR_DOUBLE", level) self.thresh = (val < 10) return val # Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. def IpArith_AVX128(self, EV, level): val = EV("INST_RETIRED.ANY", level) / (EV("FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", level) + EV("FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE", level)) self.thresh = (val < 10) return val # Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. def IpArith_AVX256(self, EV, level): val = EV("INST_RETIRED.ANY", level) / (EV("FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE", level) + EV("FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE", level)) self.thresh = (val < 10) return val # Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. def IpArith_AVX512(self, EV, level): val = EV("INST_RETIRED.ANY", level) / (EV("FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE", level) + EV("FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE", level)) self.thresh = (val < 10) return val # Instructions per PAUSE (lower number means higher occurrence rate) def IpPause(self, EV, level): return Instructions(self, EV, level) / EV("MISC_RETIRED.PAUSE_INST", level) # Instructions per Software prefetch instruction (of any type: NTA/T0/T1/T2/Prefetch) (lower number means higher occurrence rate) def IpSWPF(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("SW_PREFETCH_ACCESS.T0:u0xF", level) self.thresh = (val < 100) return val # Total number of retired Instructions def Instructions(self, EV, level): return EV("INST_RETIRED.ANY", level) # Average number of Uops retired in cycles where at least one uop has retired. def Retire(self, EV, level): return Retired_Slots(self, EV, level) / EV("UOPS_RETIRED.SLOTS:c1", level) # Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate) def IpAssist(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("ASSISTS.ANY", level) self.thresh = (val < 100000) return val def Execute(self, EV, level): return EV("UOPS_EXECUTED.THREAD", level) / Execute_Cycles(self, EV, level) # Average number of uops fetched from LSD per cycle def Fetch_LSD(self, EV, level): return EV("LSD.UOPS", level) / EV("LSD.CYCLES_ACTIVE", level) # Average number of uops fetched from DSB per cycle def Fetch_DSB(self, EV, level): return EV("IDQ.DSB_UOPS", level) / EV("IDQ.DSB_CYCLES_ANY", level) # Average number of uops fetched from MITE per cycle def Fetch_MITE(self, EV, level): return EV("IDQ.MITE_UOPS", level) / EV("IDQ.MITE_CYCLES_ANY", level) # Average number of Uops issued by front-end when it issued something def Fetch_UpC(self, EV, level): return EV("UOPS_ISSUED.ANY", level) / EV("UOPS_ISSUED.ANY:c1", level) # Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). See section 'Decoded ICache' in Optimization Manual. http://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-optimization-manual.html def DSB_Coverage(self, EV, level): val = EV("IDQ.DSB_UOPS", level) / Fetched_Uops(self, EV, level) self.thresh = (val < 0.7) and HighIPC(self, EV, 1) return val # Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details. def DSB_Switch_Cost(self, EV, level): return EV("DSB2MITE_SWITCHES.PENALTY_CYCLES", level) / EV("DSB2MITE_SWITCHES.PENALTY_CYCLES:c1:e1", level) # Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck. def DSB_Misses(self, EV, level): val = 100 *(self.Fetch_Latency.compute(EV) * self.DSB_Switches.compute(EV) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV)) + self.Fetch_Bandwidth.compute(EV) * self.MITE.compute(EV) / (self.MITE.compute(EV) + self.DSB.compute(EV))) self.thresh = (val > 10) return val # Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck. def DSB_Bandwidth(self, EV, level): val = 100 *(self.Frontend_Bound.compute(EV) * (self.Fetch_Bandwidth.compute(EV) / (self.Fetch_Bandwidth.compute(EV) + self.Fetch_Latency.compute(EV))) * (self.DSB.compute(EV) / (self.MITE.compute(EV) + self.DSB.compute(EV)))) self.thresh = (val > 10) return val # Average Latency for L1 instruction cache misses def ICache_Miss_Latency(self, EV, level): return EV("ICACHE_16B.IFDATA_STALL", level) / EV("ICACHE_16B.IFDATA_STALL:c1:e1", level) # Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck. def IC_Misses(self, EV, level): val = 100 *(self.Fetch_Latency.compute(EV) * self.ICache_Misses.compute(EV) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV))) self.thresh = (val > 5) return val # Instructions per non-speculative DSB miss (lower number means higher occurrence rate) def IpDSB_Miss_Ret(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("FRONTEND_RETIRED.ANY_DSB_MISS", level) self.thresh = (val < 50) return val # Instructions per speculative Unknown Branch Misprediction (BAClear) (lower number means higher occurrence rate) def IpUnknown_Branch(self, EV, level): return Instructions(self, EV, level) / EV("BACLEARS.ANY", level) # L2 cache true code cacheline misses per kilo instruction def L2MPKI_Code(self, EV, level): return 1000 * EV("FRONTEND_RETIRED.L2_MISS", level) / EV("INST_RETIRED.ANY", level) # L2 cache speculative code cacheline misses per kilo instruction def L2MPKI_Code_All(self, EV, level): return 1000 * EV("L2_RQSTS.CODE_RD_MISS", level) / EV("INST_RETIRED.ANY", level) # Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate) def IpMispredict(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_MISP_RETIRED.ALL_BRANCHES", level) self.thresh = (val < 200) return val # Instructions per retired Mispredicts for conditional non-taken branches (lower number means higher occurrence rate). def IpMisp_Cond_Ntaken(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_MISP_RETIRED.COND_NTAKEN", level) self.thresh = (val < 200) return val # Instructions per retired Mispredicts for conditional taken branches (lower number means higher occurrence rate). def IpMisp_Cond_Taken(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_MISP_RETIRED.COND_TAKEN", level) self.thresh = (val < 200) return val # Instructions per retired Mispredicts for return branches (lower number means higher occurrence rate). def IpMisp_Ret(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_MISP_RETIRED.RET", level) self.thresh = (val < 500) return val # Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate). def IpMisp_Indirect(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_MISP_RETIRED.INDIRECT", level) self.thresh = (val < 1000) return val # Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear) def Branch_Misprediction_Cost(self, EV, level): return Mispredictions(self, EV, level) * SLOTS(self, EV, level) / EV("BR_MISP_RETIRED.ALL_BRANCHES", level) / 100 # Speculative to Retired ratio of all clears (covering Mispredicts and nukes) def Spec_Clears_Ratio(self, EV, level): return EV("INT_MISC.CLEARS_COUNT", level) / (EV("BR_MISP_RETIRED.ALL_BRANCHES", level) + EV("MACHINE_CLEARS.COUNT", level)) # Fraction of branches that are non-taken conditionals def Cond_NT(self, EV, level): return EV("BR_INST_RETIRED.COND_NTAKEN", level) / EV("BR_INST_RETIRED.ALL_BRANCHES", level) # Fraction of branches that are taken conditionals def Cond_TK(self, EV, level): return EV("BR_INST_RETIRED.COND_TAKEN", level) / EV("BR_INST_RETIRED.ALL_BRANCHES", level) # Fraction of branches that are CALL or RET def CallRet(self, EV, level): return (EV("BR_INST_RETIRED.NEAR_CALL", level) + EV("BR_INST_RETIRED.NEAR_RETURN", level)) / EV("BR_INST_RETIRED.ALL_BRANCHES", level) # Fraction of branches that are unconditional (direct or indirect) jumps def Jump(self, EV, level): return Br_DoI_Jumps(self, EV, level) / EV("BR_INST_RETIRED.ALL_BRANCHES", level) # Fraction of branches of other types (not individually covered by other metrics in Info.Branches group) def Other_Branches(self, EV, level): return 1 -(Cond_NT(self, EV, level) + Cond_TK(self, EV, level) + CallRet(self, EV, level) + Jump(self, EV, level)) # Actual Average Latency for L1 data-cache miss demand load operations (in core cycles) def Load_Miss_Real_Latency(self, EV, level): return EV("L1D_PEND_MISS.PENDING", level) / (EV("MEM_LOAD_RETIRED.L1_MISS", level) + EV("MEM_LOAD_RETIRED.FB_HIT", level)) # Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor) def MLP(self, EV, level): return EV("L1D_PEND_MISS.PENDING", level) / EV("L1D_PEND_MISS.PENDING_CYCLES", level) # L1 cache true misses per kilo instruction for retired demand loads def L1MPKI(self, EV, level): return 1000 * EV("MEM_LOAD_RETIRED.L1_MISS", level) / EV("INST_RETIRED.ANY", level) # L1 cache true misses per kilo instruction for all demand loads (including speculative) def L1MPKI_Load(self, EV, level): return 1000 * EV("L2_RQSTS.ALL_DEMAND_DATA_RD", level) / EV("INST_RETIRED.ANY", level) # L2 cache true misses per kilo instruction for retired demand loads def L2MPKI(self, EV, level): return 1000 * EV("MEM_LOAD_RETIRED.L2_MISS", level) / EV("INST_RETIRED.ANY", level) # L2 cache misses per kilo instruction for all request types (including speculative) def L2MPKI_All(self, EV, level): return 1000 *((EV("OFFCORE_REQUESTS.ALL_DATA_RD", level) - EV("OFFCORE_REQUESTS.DEMAND_DATA_RD", level)) + EV("L2_RQSTS.ALL_DEMAND_MISS", level) + EV("L2_RQSTS.SWPF_MISS", level)) / Instructions(self, EV, level) # L2 cache misses per kilo instruction for all demand loads (including speculative) def L2MPKI_Load(self, EV, level): return 1000 * EV("L2_RQSTS.DEMAND_DATA_RD_MISS", level) / EV("INST_RETIRED.ANY", level) # Offcore requests (L2 cache miss) per kilo instruction for demand RFOs def L2MPKI_RFO(self, EV, level): return 1000 * EV("L2_RQSTS.RFO_MISS", level) / EV("INST_RETIRED.ANY", level) # L2 cache hits per kilo instruction for all demand loads (including speculative) def L2HPKI_Load(self, EV, level): return 1000 * EV("L2_RQSTS.DEMAND_DATA_RD_HIT", level) / EV("INST_RETIRED.ANY", level) # L3 cache true misses per kilo instruction for retired demand loads def L3MPKI(self, EV, level): return 1000 * EV("MEM_LOAD_RETIRED.L3_MISS", level) / EV("INST_RETIRED.ANY", level) # Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries) def FB_HPKI(self, EV, level): return 1000 * EV("MEM_LOAD_RETIRED.FB_HIT", level) / EV("INST_RETIRED.ANY", level) def L1D_Cache_Fill_BW(self, EV, level): return 64 * EV("L1D.REPLACEMENT", level) / OneBillion / Time(self, EV, level) def L2_Cache_Fill_BW(self, EV, level): return 64 * EV("L2_LINES_IN.ALL", level) / OneBillion / Time(self, EV, level) def L3_Cache_Fill_BW(self, EV, level): return 64 * EV("LONGEST_LAT_CACHE.MISS", level) / OneBillion / Time(self, EV, level) def L3_Cache_Access_BW(self, EV, level): return 64 * EV("OFFCORE_REQUESTS.ALL_REQUESTS", level) / OneBillion / Time(self, EV, level) # Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses def Page_Walks_Utilization(self, EV, level): val = (EV("ITLB_MISSES.WALK_PENDING", level) + EV("DTLB_LOAD_MISSES.WALK_PENDING", level) + EV("DTLB_STORE_MISSES.WALK_PENDING", level)) / (2 * CORE_CLKS(self, EV, level)) self.thresh = (val > 0.5) return val # STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk) def Code_STLB_MPKI(self, EV, level): return 1000 * EV("ITLB_MISSES.WALK_COMPLETED", level) / EV("INST_RETIRED.ANY", level) # STLB (2nd level TLB) data load speculative misses per kilo instruction (misses of any page-size that complete the page walk) def Load_STLB_MPKI(self, EV, level): return 1000 * EV("DTLB_LOAD_MISSES.WALK_COMPLETED", level) / EV("INST_RETIRED.ANY", level) # STLB (2nd level TLB) data store speculative misses per kilo instruction (misses of any page-size that complete the page walk) def Store_STLB_MPKI(self, EV, level): return 1000 * EV("DTLB_STORE_MISSES.WALK_COMPLETED", level) / EV("INST_RETIRED.ANY", level) # Average per-core data fill bandwidth to the L1 data cache [GB / sec] def L1D_Cache_Fill_BW_2T(self, EV, level): return L1D_Cache_Fill_BW(self, EV, level) # Average per-core data fill bandwidth to the L2 cache [GB / sec] def L2_Cache_Fill_BW_2T(self, EV, level): return L2_Cache_Fill_BW(self, EV, level) # Average per-core data fill bandwidth to the L3 cache [GB / sec] def L3_Cache_Fill_BW_2T(self, EV, level): return L3_Cache_Fill_BW(self, EV, level) # Average per-core data access bandwidth to the L3 cache [GB / sec] def L3_Cache_Access_BW_2T(self, EV, level): return L3_Cache_Access_BW(self, EV, level) # Rate of silent evictions from the L2 cache per Kilo instruction where the evicted lines are dropped (no writeback to L3 or memory) def L2_Evictions_Silent_PKI(self, EV, level): return 1000 * EV("L2_LINES_OUT.SILENT", level) / Instructions(self, EV, level) # Rate of non silent evictions from the L2 cache per Kilo instruction def L2_Evictions_NonSilent_PKI(self, EV, level): return 1000 * EV("L2_LINES_OUT.NON_SILENT", level) / Instructions(self, EV, level) # Average Latency for L2 cache miss demand Loads def Load_L2_Miss_Latency(self, EV, level): return EV("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD", level) / EV("OFFCORE_REQUESTS.DEMAND_DATA_RD", level) # Average Latency for L3 cache miss demand Loads def Load_L3_Miss_Latency(self, EV, level): return EV("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD:u0x10", level) / EV("OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD", level) # Average Parallel L2 cache miss demand Loads def Load_L2_MLP(self, EV, level): return EV("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD", level) / EV("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD:c1", level) # Average Parallel L2 cache miss data reads def Data_L2_MLP(self, EV, level): return EV("OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD", level) / EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", level) # Un-cacheable retired load per kilo instruction def UC_Load_PKI(self, EV, level): return 1000 * EV("MEM_LOAD_MISC_RETIRED.UC", level) / EV("INST_RETIRED.ANY", level) # "Bus lock" per kilo instruction def Bus_Lock_PKI(self, EV, level): return 1000 * EV("SQ_MISC.BUS_LOCK", level) / EV("INST_RETIRED.ANY", level) # Average CPU Utilization (percentage) def CPU_Utilization(self, EV, level): return CPUs_Utilized(self, EV, level) / Num_CPUs(self, EV, level) # Average number of utilized CPUs def CPUs_Utilized(self, EV, level): return EV("CPU_CLK_UNHALTED.REF_TSC", level) / EV("msr/tsc/", 0) # Measured Average Core Frequency for unhalted processors [GHz] def Core_Frequency(self, EV, level): return Turbo_Utilization(self, EV, level) * EV("msr/tsc/", 0) / OneBillion / Time(self, EV, level) # Measured Average Uncore Frequency for the SoC [GHz] def Uncore_Frequency(self, EV, level): return Socket_CLKS(self, EV, level) / 1e9 / Time(self, EV, level) # Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width def GFLOPs(self, EV, level): return (FLOP_Count(self, EV, level) / OneBillion) / Time(self, EV, level) # Average Frequency Utilization relative nominal frequency def Turbo_Utilization(self, EV, level): return CLKS(self, EV, level) / EV("CPU_CLK_UNHALTED.REF_TSC", level) # Fraction of Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes. def Power_License0_Utilization(self, EV, level): return EV("CORE_POWER.LVL0_TURBO_LICENSE", level) / CORE_CLKS(self, EV, level) # Fraction of Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions. def Power_License1_Utilization(self, EV, level): val = EV("CORE_POWER.LVL1_TURBO_LICENSE", level) / CORE_CLKS(self, EV, level) self.thresh = (val > 0.5) return val # Fraction of Core cycles where the core was running with power-delivery for license level 2 (introduced in SKX). This includes high current AVX 512-bit instructions. def Power_License2_Utilization(self, EV, level): val = EV("CORE_POWER.LVL2_TURBO_LICENSE", level) / CORE_CLKS(self, EV, level) self.thresh = (val > 0.5) return val # Fraction of cycles where both hardware Logical Processors were active def SMT_2T_Utilization(self, EV, level): return 1 - EV("CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE", level) / EV("CPU_CLK_UNHALTED.REF_DISTRIBUTED", level) if smt_enabled else 0 # Fraction of cycles spent in the Operating System (OS) Kernel mode def Kernel_Utilization(self, EV, level): val = EV("CPU_CLK_UNHALTED.THREAD_P:SUP", level) / EV("CPU_CLK_UNHALTED.THREAD", level) self.thresh = (val > 0.05) return val # Cycles Per Instruction for the Operating System (OS) Kernel mode def Kernel_CPI(self, EV, level): return EV("CPU_CLK_UNHALTED.THREAD_P:SUP", level) / EV("INST_RETIRED.ANY_P:SUP", level) # Average external Memory Bandwidth Use for reads and writes [GB / sec] def DRAM_BW_Use(self, EV, level): return (64 *(EV("UNC_M_CAS_COUNT.RD", level) + EV("UNC_M_CAS_COUNT.WR", level)) / OneBillion) / Time(self, EV, level) # Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches. def MEM_Read_Latency(self, EV, level): return OneBillion *(EV("UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD", level) / EV("UNC_CHA_TOR_INSERTS.IA_MISS_DRD", level)) / (Socket_CLKS(self, EV, level) / Time(self, EV, level)) # Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches def MEM_Parallel_Reads(self, EV, level): return EV("UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD", level) / EV("UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD:c1", level) # Average latency of data read request to external 3D X-Point memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches def MEM_PMM_Read_Latency(self, EV, level): EV("UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PMM", level) EV("UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PMM", level) return (OneBillion *(EV("UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PMM", level) / EV("UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PMM", level)) / EV("UNC_CHA_CLOCKTICKS:one_unit", level)) if PMM_App_Direct else 0 # Average latency of data read request to external DRAM memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches def MEM_DRAM_Read_Latency(self, EV, level): return OneBillion *(EV("UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_DDR", level) / EV("UNC_CHA_TOR_INSERTS.IA_MISS_DRD_DDR", level)) / EV("UNC_CHA_CLOCKTICKS:one_unit", level) # Average 3DXP Memory Bandwidth Use for reads [GB / sec] def PMM_Read_BW(self, EV, level): return ((64 * EV("UNC_M_PMM_RPQ_INSERTS", level) / OneBillion) / Time(self, EV, level)) if PMM_App_Direct else 0 # Average 3DXP Memory Bandwidth Use for Writes [GB / sec] def PMM_Write_BW(self, EV, level): return ((64 * EV("UNC_M_PMM_WPQ_INSERTS", level) / OneBillion) / Time(self, EV, level)) if PMM_App_Direct else 0 # Average IO (network or disk) Bandwidth Use for Reads [GB / sec]. Bandwidth of IO reads that are initiated by end device controllers that are requesting memory from the CPU def IO_Read_BW(self, EV, level): return EV("UNC_CHA_TOR_INSERTS.IO_PCIRDCUR", level) * 64 / OneBillion / Time(self, EV, level) # Average IO (network or disk) Bandwidth Use for Writes [GB / sec]. Bandwidth of IO writes that are initiated by end device controllers that are writing memory to the CPU def IO_Write_BW(self, EV, level): return (EV("UNC_CHA_TOR_INSERTS.IO_HIT_ITOM", level) + EV("UNC_CHA_TOR_INSERTS.IO_MISS_ITOM", level) + EV("UNC_CHA_TOR_INSERTS.IO_HIT_ITOMCACHENEAR", level) + EV("UNC_CHA_TOR_INSERTS.IO_MISS_ITOMCACHENEAR", level)) * 64 / OneBillion / Time(self, EV, level) # Run duration time in seconds def Time(self, EV, level): val = EV("interval-s", 0) self.thresh = (val < 1) return val # Socket actual clocks when any core is active on that socket def Socket_CLKS(self, EV, level): return EV("UNC_CHA_CLOCKTICKS:one_unit", level) # Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate] def IpFarBranch(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.FAR_BRANCH:USER", level) self.thresh = (val < 1000000) return val # Event groups class Frontend_Bound: name = "Frontend_Bound" domain = "Slots" area = "FE" level = 1 htoff = False sample = ['FRONTEND_RETIRED.LATENCY_GE_4:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvFB', 'BvIO', 'TmaL1', 'PGO']) maxval = None def compute(self, EV): try: self.val = (EV("PERF_METRICS.FRONTEND_BOUND", 1) / EV("TOPDOWN.SLOTS", 1)) / PERF_METRICS_SUM(self, EV, 1) - EV("INT_MISC.UOP_DROPPING", 1) / SLOTS(self, EV, 1) if topdown_use_fixed else(EV("IDQ_UOPS_NOT_DELIVERED.CORE", 1) - EV("INT_MISC.UOP_DROPPING", 1)) / SLOTS(self, EV, 1) self.thresh = (self.val > 0.15) except ZeroDivisionError: handle_error(self, "Frontend_Bound zero division") return self.val desc = """ This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.""" class Fetch_Latency: name = "Fetch_Latency" domain = "Slots" area = "FE" level = 2 htoff = False sample = ['FRONTEND_RETIRED.LATENCY_GE_16:pp', 'FRONTEND_RETIRED.LATENCY_GE_8:pp'] errcount = 0 sibling = None metricgroup = frozenset(['Frontend', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = (Pipeline_Width * EV("IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE", 2) - EV("INT_MISC.UOP_DROPPING", 2)) / SLOTS(self, EV, 2) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Fetch_Latency zero division") return self.val desc = """ This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction- cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period.""" class ICache_Misses: name = "ICache_Misses" domain = "Clocks" area = "FE" level = 3 htoff = False sample = ['FRONTEND_RETIRED.L2_MISS:pp', 'FRONTEND_RETIRED.L1I_MISS:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BigFootprint', 'BvBC', 'FetchLat', 'IcMiss']) maxval = None def compute(self, EV): try: self.val = EV("ICACHE_DATA.STALLS", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "ICache_Misses zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to instruction cache misses.. Using compiler's Profile-Guided Optimization (PGO) can reduce i-cache misses through improved hot code layout.""" class ITLB_Misses: name = "ITLB_Misses" domain = "Clocks" area = "FE" level = 3 htoff = False sample = ['FRONTEND_RETIRED.STLB_MISS:pp', 'FRONTEND_RETIRED.ITLB_MISS:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BigFootprint', 'BvBC', 'FetchLat', 'MemoryTLB']) maxval = None def compute(self, EV): try: self.val = EV("ICACHE_TAG.STALLS", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "ITLB_Misses zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses.. Consider large 2M pages for code (selectively prefer hot large-size function, due to limited 2M entries). Linux options: standard binaries use libhugetlbfs; Hfsort.. https://github. com/libhugetlbfs/libhugetlbfs;https://research.fb.com/public ations/optimizing-function-placement-for-large-scale-data- center-applications-2/""" class Branch_Resteers: name = "Branch_Resteers" domain = "Clocks" area = "FE" level = 3 htoff = False sample = ['BR_MISP_RETIRED.ALL_BRANCHES'] errcount = 0 sibling = None metricgroup = frozenset(['FetchLat']) maxval = None def compute(self, EV): try: self.val = EV("INT_MISC.CLEAR_RESTEER_CYCLES", 3) / CLKS(self, EV, 3) + self.Unknown_Branches.compute(EV) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Branch_Resteers zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings.""" class Mispredicts_Resteers: name = "Mispredicts_Resteers" domain = "Clocks" area = "FE" level = 4 htoff = False sample = ['INT_MISC.CLEAR_RESTEER_CYCLES'] errcount = 0 sibling = None metricgroup = frozenset(['BadSpec', 'BrMispredicts', 'BvMP']) maxval = None def compute(self, EV): try: self.val = Mispred_Clears_Fraction(self, EV, 4) * EV("INT_MISC.CLEAR_RESTEER_CYCLES", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Mispredicts_Resteers zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage.""" class Clears_Resteers: name = "Clears_Resteers" domain = "Clocks" area = "FE" level = 4 htoff = False sample = ['INT_MISC.CLEAR_RESTEER_CYCLES'] errcount = 0 sibling = None metricgroup = frozenset(['BadSpec', 'MachineClears']) maxval = None def compute(self, EV): try: self.val = (1 - Mispred_Clears_Fraction(self, EV, 4)) * EV("INT_MISC.CLEAR_RESTEER_CYCLES", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Clears_Resteers zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears.""" class Unknown_Branches: name = "Unknown_Branches" domain = "Clocks" area = "FE" level = 4 htoff = False sample = ['BACLEARS.ANY'] errcount = 0 sibling = None metricgroup = frozenset(['BigFootprint', 'BvBC', 'FetchLat']) maxval = None def compute(self, EV): try: self.val = BAClear_Cost * EV("BACLEARS.ANY", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Unknown_Branches zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches""" class MS_Switches: name = "MS_Switches" domain = "Clocks_Estimated" area = "FE" level = 3 htoff = False sample = ['IDQ.MS_SWITCHES'] errcount = 0 sibling = None metricgroup = frozenset(['FetchLat', 'MicroSeq']) maxval = 1.0 def compute(self, EV): try: self.val = MS_Switches_Cost * EV("IDQ.MS_SWITCHES", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "MS_Switches zero division") return self.val desc = """ This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals.""" class LCP: name = "LCP" domain = "Clocks" area = "FE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['FetchLat']) maxval = None def compute(self, EV): try: self.val = EV("DECODE.LCP", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "LCP zero division") return self.val desc = """ This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this.""" class DSB_Switches: name = "DSB_Switches" domain = "Clocks" area = "FE" level = 3 htoff = False sample = ['FRONTEND_RETIRED.DSB_MISS:pp'] errcount = 0 sibling = None metricgroup = frozenset(['DSBmiss', 'FetchLat']) maxval = None def compute(self, EV): try: self.val = EV("DSB2MITE_SWITCHES.PENALTY_CYCLES", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "DSB_Switches zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty.. See section 'Optimization for Decoded Icache' in Optimization Manual:. http://www.intel.com/content/www/us/en /architecture-and-technology/64-ia-32-architectures- optimization-manual.html""" class Fetch_Bandwidth: name = "Fetch_Bandwidth" domain = "Slots" area = "FE" level = 2 htoff = False sample = ['FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1:pp', 'FRONTEND_RETIRED.LATENCY_GE_1:pp', 'FRONTEND_RETIRED.LATENCY_GE_2:pp'] errcount = 0 sibling = None metricgroup = frozenset(['FetchBW', 'Frontend', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = max(0 , self.Frontend_Bound.compute(EV) - self.Fetch_Latency.compute(EV)) self.thresh = (self.val > 0.2) except ZeroDivisionError: handle_error(self, "Fetch_Bandwidth zero division") return self.val desc = """ This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend.""" class MITE: name = "MITE" domain = "Slots_Estimated" area = "FE" level = 3 htoff = False sample = ['FRONTEND_RETIRED.ANY_DSB_MISS'] errcount = 0 sibling = None metricgroup = frozenset(['DSBmiss', 'FetchBW']) maxval = None def compute(self, EV): try: self.val = (EV("IDQ.MITE_CYCLES_ANY", 3) - EV("IDQ.MITE_CYCLES_OK", 3)) / CORE_CLKS(self, EV, 3) / 2 self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "MITE zero division") return self.val desc = """ This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck.. Consider tuning codegen of 'small hotspots' that can fit in DSB. Read about 'Decoded ICache' in Optimization Manual:. http://www.intel.com/content/www/us/en /architecture-and-technology/64-ia-32-architectures- optimization-manual.html""" class Decoder0_Alone: name = "Decoder0_Alone" domain = "Slots_Estimated" area = "FE" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['DSBmiss', 'FetchBW']) maxval = None def compute(self, EV): try: self.val = (EV("INST_DECODED.DECODERS:c1", 4) - EV("INST_DECODED.DECODERS:c2", 4)) / CORE_CLKS(self, EV, 4) / 2 self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Decoder0_Alone zero division") return self.val desc = """ This metric represents fraction of cycles where decoder-0 was the only active decoder""" class MITE_4wide: name = "MITE_4wide" domain = "Core_Clocks" area = "FE" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['DSBmiss', 'FetchBW']) maxval = None def compute(self, EV): try: self.val = (EV("IDQ.MITE_UOPS:c4", 4) - EV("IDQ.MITE_UOPS:c5", 4)) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "MITE_4wide zero division") return self.val desc = """ This metric represents fraction of cycles where (only) 4 uops were delivered by the MITE pipeline""" class DSB: name = "DSB" domain = "Slots_Estimated" area = "FE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['DSB', 'FetchBW']) maxval = None def compute(self, EV): try: self.val = (EV("IDQ.DSB_CYCLES_ANY", 3) - EV("IDQ.DSB_CYCLES_OK", 3)) / CORE_CLKS(self, EV, 3) / 2 self.thresh = (self.val > 0.15) and self.parent.thresh except ZeroDivisionError: handle_error(self, "DSB zero division") return self.val desc = """ This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.""" class Bad_Speculation: name = "Bad_Speculation" domain = "Slots" area = "BAD" level = 1 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['TmaL1']) maxval = None def compute(self, EV): try: self.val = max(1 -(self.Frontend_Bound.compute(EV) + self.Backend_Bound.compute(EV) + self.Retiring.compute(EV)) , 0 ) self.thresh = (self.val > 0.15) except ZeroDivisionError: handle_error(self, "Bad_Speculation zero division") return self.val desc = """ This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss- predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.""" class Branch_Mispredicts: name = "Branch_Mispredicts" domain = "Slots" area = "BAD" level = 2 htoff = False sample = ['BR_MISP_RETIRED.ALL_BRANCHES'] errcount = 0 sibling = None metricgroup = frozenset(['BadSpec', 'BrMispredicts', 'BvMP', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = Mispred_Clears_Fraction(self, EV, 2) * self.Bad_Speculation.compute(EV) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Branch_Mispredicts zero division") return self.val desc = """ This metric represents fraction of slots the CPU has wasted due to Branch Misprediction. These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path.. Using profile feedback in the compiler may help. Please see the Optimization Manual for general strategies for addressing branch misprediction issues.. http://www.intel.com/content/www/us/en/architecture-and- technology/64-ia-32-architectures-optimization-manual.html""" class Other_Mispredicts: name = "Other_Mispredicts" domain = "Slots" area = "BAD" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvIO', 'BrMispredicts']) maxval = None def compute(self, EV): try: self.val = max(self.Branch_Mispredicts.compute(EV) * (1 - EV("BR_MISP_RETIRED.ALL_BRANCHES", 3) / (EV("INT_MISC.CLEARS_COUNT", 3) - EV("MACHINE_CLEARS.COUNT", 3))) , 0.0001 ) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Other_Mispredicts zero division") return self.val desc = """ This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types).""" class Machine_Clears: name = "Machine_Clears" domain = "Slots" area = "BAD" level = 2 htoff = False sample = ['MACHINE_CLEARS.COUNT'] errcount = 0 sibling = None metricgroup = frozenset(['BadSpec', 'BvMS', 'MachineClears', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = max(0 , self.Bad_Speculation.compute(EV) - self.Branch_Mispredicts.compute(EV)) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Machine_Clears zero division") return self.val desc = """ This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes.. See \"Memory Disambiguation\" in Optimization Manual and:. https://software.intel.com/sites/default/files/ m/d/4/1/d/8/sma.pdf""" class Other_Nukes: name = "Other_Nukes" domain = "Slots" area = "BAD" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvIO', 'Machine_Clears']) maxval = None def compute(self, EV): try: self.val = max(self.Machine_Clears.compute(EV) * (1 - EV("MACHINE_CLEARS.MEMORY_ORDERING", 3) / EV("MACHINE_CLEARS.COUNT", 3)) , 0.0001 ) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Other_Nukes zero division") return self.val desc = """ This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering.""" class Backend_Bound: name = "Backend_Bound" domain = "Slots" area = "BE" level = 1 htoff = False sample = ['TOPDOWN.BACKEND_BOUND_SLOTS'] errcount = 0 sibling = None metricgroup = frozenset(['BvOB', 'TmaL1']) maxval = None def compute(self, EV): try: self.val = (EV("PERF_METRICS.BACKEND_BOUND", 1) / EV("TOPDOWN.SLOTS", 1)) / PERF_METRICS_SUM(self, EV, 1) + (Pipeline_Width * EV("INT_MISC.CLEARS_COUNT", 1)) / SLOTS(self, EV, 1) if topdown_use_fixed else(EV("TOPDOWN.BACKEND_BOUND_SLOTS", 1) + Pipeline_Width * EV("INT_MISC.CLEARS_COUNT", 1)) / SLOTS(self, EV, 1) self.thresh = (self.val > 0.2) except ZeroDivisionError: handle_error(self, "Backend_Bound zero division") return self.val desc = """ This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.""" class Memory_Bound: name = "Memory_Bound" domain = "Slots" area = "BE/Mem" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Backend', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = Memory_Bound_Fraction(self, EV, 2) * self.Backend_Bound.compute(EV) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Memory_Bound zero division") return self.val desc = """ This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).""" class L1_Bound: name = "L1_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = ['MEM_LOAD_RETIRED.L1_HIT:pp', 'MEM_LOAD_RETIRED.FB_HIT:pp'] errcount = 0 sibling = None metricgroup = frozenset(['CacheHits', 'MemoryBound', 'TmaL3mem']) maxval = None def compute(self, EV): try: self.val = max((EV("CYCLE_ACTIVITY.STALLS_MEM_ANY", 3) - EV("CYCLE_ACTIVITY.STALLS_L1D_MISS", 3)) / CLKS(self, EV, 3) , 0 ) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "L1_Bound zero division") return self.val desc = """ This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache.""" class DTLB_Load: name = "DTLB_Load" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_INST_RETIRED.STLB_MISS_LOADS:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvMT', 'MemoryTLB']) maxval = 1.0 def compute(self, EV): try: self.val = min(Mem_STLB_Hit_Cost * EV("DTLB_LOAD_MISSES.STLB_HIT:c1", 4) + EV("DTLB_LOAD_MISSES.WALK_ACTIVE", 4) , max(EV("CYCLE_ACTIVITY.CYCLES_MEM_ANY", 4) - EV("CYCLE_ACTIVITY.CYCLES_L1D_MISS", 4) , 0)) / CLKS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "DTLB_Load zero division") return self.val desc = """ This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss..""" class Load_STLB_Hit: name = "Load_STLB_Hit" domain = "Clocks_Estimated" area = "BE/Mem" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['MemoryTLB']) maxval = 1.0 def compute(self, EV): try: self.val = self.DTLB_Load.compute(EV) - self.Load_STLB_Miss.compute(EV) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Load_STLB_Hit zero division") return self.val desc = """ This metric roughly estimates the fraction of cycles where the (first level) DTLB was missed by load accesses, that later on hit in second-level TLB (STLB)""" class Load_STLB_Miss: name = "Load_STLB_Miss" domain = "Clocks_Calculated" area = "BE/Mem" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['MemoryTLB']) maxval = 1.0 def compute(self, EV): try: self.val = EV("DTLB_LOAD_MISSES.WALK_ACTIVE", 5) / CLKS(self, EV, 5) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Load_STLB_Miss zero division") return self.val desc = """ This metric estimates the fraction of cycles where the Second-level TLB (STLB) was missed by load accesses, performing a hardware page walk""" class Store_Fwd_Blk: name = "Store_Fwd_Blk" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = 1.0 def compute(self, EV): try: self.val = 13 * EV("LD_BLOCKS.STORE_FORWARD", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Store_Fwd_Blk zero division") return self.val desc = """ This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading.""" class L1_Hit_Latency: name = "L1_Hit_Latency" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_LOAD_RETIRED.L1_HIT'] errcount = 0 sibling = None metricgroup = frozenset(['BvML', 'MemoryLat']) maxval = 1.0 def compute(self, EV): try: self.val = min(2 *(EV("MEM_INST_RETIRED.ALL_LOADS", 4) - EV("MEM_LOAD_RETIRED.FB_HIT", 4) - EV("MEM_LOAD_RETIRED.L1_MISS", 4)) * Dependent_Loads_Weight(self, EV, 4) / 100 , max(EV("CYCLE_ACTIVITY.CYCLES_MEM_ANY", 4) - EV("CYCLE_ACTIVITY.CYCLES_L1D_MISS", 4) , 0)) / CLKS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "L1_Hit_Latency zero division") return self.val desc = """ This metric roughly estimates fraction of cycles with demand load accesses that hit the L1 cache. The short latency of the L1 data cache may be exposed in pointer-chasing memory access patterns as an example.""" class Lock_Latency: name = "Lock_Latency" domain = "Clocks" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_INST_RETIRED.LOCK_LOADS'] errcount = 0 sibling = None metricgroup = frozenset(['Offcore']) maxval = 1.0 def compute(self, EV): try: self.val = (16 * max(0 , EV("MEM_INST_RETIRED.LOCK_LOADS", 4) - EV("L2_RQSTS.ALL_RFO", 4)) + Mem_Lock_St_Fraction(self, EV, 4) * (Mem_L2_Store_Cost * EV("L2_RQSTS.RFO_HIT", 4) + ORO_Demand_RFO_C1(self, EV, 4))) / CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Lock_Latency zero division") return self.val desc = """ This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them.""" class Split_Loads: name = "Split_Loads" domain = "Clocks_Calculated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_INST_RETIRED.SPLIT_LOADS:pp'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = 1.0 def compute(self, EV): try: self.val = Load_Miss_Real_Latency(self, EV, 4) * EV("LD_BLOCKS.NO_SR", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Split_Loads zero division") return self.val desc = """ This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. . Consider aligning data or hot structure fields. See the Optimization Manual for more details""" class G4K_Aliasing: name = "4K_Aliasing" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("LD_BLOCKS_PARTIAL.ADDRESS_ALIAS", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "G4K_Aliasing zero division") return self.val desc = """ This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset. False match is possible; which incur a few cycles load re-issue. However; the short re-issue duration is often hidden by the out-of-order core and HW optimizations; hence a user may safely ignore a high value of this metric unless it manages to propagate up into parent nodes of the hierarchy (e.g. to L1_Bound).. Consider reducing independent loads/stores accesses with 4K offsets. See the Optimization Manual for more details""" class FB_Full: name = "FB_Full" domain = "Clocks_Calculated" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvMB', 'MemoryBW']) maxval = None def compute(self, EV): try: self.val = EV("L1D_PEND_MISS.FB_FULL", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.3) except ZeroDivisionError: handle_error(self, "FB_Full zero division") return self.val desc = """ This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory).. See $issueBW and $issueSL hints. Avoid software prefetches if indeed memory BW limited.""" class L2_Bound: name = "L2_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = ['MEM_LOAD_RETIRED.L2_HIT:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvML', 'CacheHits', 'MemoryBound', 'TmaL3mem']) maxval = None def compute(self, EV): try: self.val = (LOAD_L2_HIT(self, EV, 3) / (LOAD_L2_HIT(self, EV, 3) + EV("L1D_PEND_MISS.FB_FULL_PERIODS", 3))) * L2_Bound_Ratio(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "L2_Bound zero division") return self.val desc = """ This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance.""" class L3_Bound: name = "L3_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = ['MEM_LOAD_RETIRED.L3_HIT:pp'] errcount = 0 sibling = None metricgroup = frozenset(['CacheHits', 'MemoryBound', 'TmaL3mem']) maxval = None def compute(self, EV): try: self.val = (EV("CYCLE_ACTIVITY.STALLS_L2_MISS", 3) - EV("CYCLE_ACTIVITY.STALLS_L3_MISS", 3)) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "L3_Bound zero division") return self.val desc = """ This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance.""" class Contested_Accesses: name = "Contested_Accesses" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM:pp', 'MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvMS', 'DataSharing', 'Offcore', 'Snoop']) maxval = 1.0 def compute(self, EV): try: self.val = ((Mem_XSNP_HitM_Cost(self, EV, 4) - Mem_L2_Hit_Cost(self, EV, 4)) * LOAD_XSNP_HITM(self, EV, 4) + (Mem_XSNP_Hit_Cost(self, EV, 4) - Mem_L2_Hit_Cost(self, EV, 4)) * LOAD_XSNP_MISS(self, EV, 4)) * FB_Factor(self, EV, 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Contested_Accesses zero division") return self.val desc = """ This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing.""" class Data_Sharing: name = "Data_Sharing" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvMS', 'Offcore', 'Snoop']) maxval = 1.0 def compute(self, EV): try: self.val = (Mem_XSNP_Hit_Cost(self, EV, 4) - Mem_L2_Hit_Cost(self, EV, 4)) * LOAD_XSNP_HIT(self, EV, 4) * FB_Factor(self, EV, 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Data_Sharing zero division") return self.val desc = """ This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance.""" class L3_Hit_Latency: name = "L3_Hit_Latency" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_LOAD_RETIRED.L3_HIT:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvML', 'MemoryLat']) maxval = 1.0 def compute(self, EV): try: self.val = (Mem_XSNP_None_Cost(self, EV, 4) - Mem_L2_Hit_Cost(self, EV, 4)) * LOAD_L3_HIT(self, EV, 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "L3_Hit_Latency zero division") return self.val desc = """ This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings.""" class SQ_Full: name = "SQ_Full" domain = "Clocks" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvMB', 'MemoryBW', 'Offcore']) maxval = None def compute(self, EV): try: self.val = EV("L1D_PEND_MISS.L2_STALL", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.3) and self.parent.thresh except ZeroDivisionError: handle_error(self, "SQ_Full zero division") return self.val desc = """ This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors).""" class DRAM_Bound: name = "DRAM_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = ['MEM_LOAD_RETIRED.L3_MISS:pp'] errcount = 0 sibling = None metricgroup = frozenset(['MemoryBound', 'TmaL3mem']) maxval = 1.0 def compute(self, EV): try: self.val = MEM_Bound_Ratio(self, EV, 3) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "DRAM_Bound zero division") return self.val desc = """ This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance.""" class MEM_Bandwidth: name = "MEM_Bandwidth" domain = "Clocks" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvMB', 'MemoryBW', 'Offcore']) maxval = None def compute(self, EV): try: self.val = ORO_DRD_BW_Cycles(self, EV, 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "MEM_Bandwidth zero division") return self.val desc = """ This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that).. Improve data accesses to reduce cacheline transfers from/to memory. Examples: 1) Consume all bytes of a each cacheline before it is evicted (e.g. reorder structure elements and split non-hot ones), 2) merge computed-limited with BW-limited loops, 3) NUMA optimizations in multi-socket system. Note: software prefetches will not help BW-limited application..""" class MEM_Latency: name = "MEM_Latency" domain = "Clocks" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvML', 'MemoryLat', 'Offcore']) maxval = None def compute(self, EV): try: self.val = ORO_DRD_Any_Cycles(self, EV, 4) / CLKS(self, EV, 4) - self.MEM_Bandwidth.compute(EV) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "MEM_Latency zero division") return self.val desc = """ This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that).. Improve data accesses or interleave them with compute. Examples: 1) Data layout re-structuring, 2) Software Prefetches (also through the compiler)..""" class Local_MEM: name = "Local_MEM" domain = "Clocks_Estimated" area = "BE/Mem" level = 5 htoff = False sample = ['MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM'] errcount = 0 sibling = None metricgroup = frozenset(['Server']) maxval = 1.0 def compute(self, EV): try: self.val = (Mem_Local_DRAM_Cost(self, EV, 5) - Mem_XSNP_None_Cost(self, EV, 5)) * EV("MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM", 5) * FB_Factor(self, EV, 5) / CLKS(self, EV, 5) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Local_MEM zero division") return self.val desc = """ This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory. Caching will improve the latency and increase performance.""" class Remote_MEM: name = "Remote_MEM" domain = "Clocks_Estimated" area = "BE/Mem" level = 5 htoff = False sample = ['MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM:pp'] errcount = 0 sibling = None metricgroup = frozenset(['Server', 'Snoop']) maxval = 1.0 def compute(self, EV): try: self.val = (Mem_Remote_DRAM_Cost(self, EV, 5) - Mem_XSNP_None_Cost(self, EV, 5)) * EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM", 5) * FB_Factor(self, EV, 5) / CLKS(self, EV, 5) if DS else 0 EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM", 5) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Remote_MEM zero division") return self.val desc = """ This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory. This is caused often due to non-optimal NUMA allocations.""" class Remote_Cache: name = "Remote_Cache" domain = "Clocks_Estimated" area = "BE/Mem" level = 5 htoff = False sample = ['MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM:pp', 'MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD:pp'] errcount = 0 sibling = None metricgroup = frozenset(['Offcore', 'Server', 'Snoop']) maxval = 1.0 def compute(self, EV): try: self.val = ((Mem_Remote_HitM_Cost(self, EV, 5) - Mem_XSNP_None_Cost(self, EV, 5)) * EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM", 5) + (Mem_Remote_Fwd_Cost(self, EV, 5) - Mem_XSNP_None_Cost(self, EV, 5)) * EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD", 5)) * FB_Factor(self, EV, 5) / CLKS(self, EV, 5) if DS else 0 EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM", 5) EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD", 5) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Remote_Cache zero division") return self.val desc = """ This metric estimates fraction of cycles while the memory subsystem was handling loads from remote cache in other sockets including synchronizations issues. This is caused often due to non-optimal NUMA allocations.""" class PMM_Bound: name = "PMM_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['MemoryBound', 'Server', 'TmaL3mem']) maxval = 1.0 def compute(self, EV): try: self.val = (((1 - Mem_DDR_Hit_Fraction(self, EV, 3)) * MEM_Bound_Ratio(self, EV, 3)) if (OneMillion *(EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM", 3) + EV("MEM_LOAD_RETIRED.LOCAL_PMM", 3))> EV("MEM_LOAD_RETIRED.L1_MISS", 3)) else 0) if PMM_App_Direct else 0 EV("MEM_LOAD_RETIRED.LOCAL_PMM", 3) EV("MEM_LOAD_RETIRED.L1_MISS", 3) EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM", 3) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "PMM_Bound zero division") return self.val desc = """ This metric roughly estimates (based on idle latencies) how often the CPU was stalled on accesses to external 3D-Xpoint (Crystal Ridge, a.k.a. IXP) memory by loads, PMM stands for Persistent Memory Module. . Consider moving data-structure from AEP to DDR memory for better latency/bandwidth.""" class Store_Bound: name = "Store_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = ['MEM_INST_RETIRED.ALL_STORES:pp'] errcount = 0 sibling = None metricgroup = frozenset(['MemoryBound', 'TmaL3mem']) maxval = None def compute(self, EV): try: self.val = EV("EXE_ACTIVITY.BOUND_ON_STORES", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Store_Bound zero division") return self.val desc = """ This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck.""" class Store_Latency: name = "Store_Latency" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvML', 'MemoryLat', 'Offcore']) maxval = 1.0 def compute(self, EV): try: self.val = (Store_L2_Hit_Cycles(self, EV, 4) + (1 - Mem_Lock_St_Fraction(self, EV, 4)) * ORO_Demand_RFO_C1(self, EV, 4)) / CLKS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Store_Latency zero division") return self.val desc = """ This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Consider to avoid/reduce unnecessary (or easily load-able/computable) memory store.""" class False_Sharing: name = "False_Sharing" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM'] errcount = 0 sibling = None metricgroup = frozenset(['BvMS', 'DataSharing', 'Offcore', 'Snoop']) maxval = 1.0 def compute(self, EV): try: self.val = Mem_XSNP_HitM_Cost(self, EV, 4) * EV("OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "False_Sharing zero division") return self.val desc = """ This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. . False Sharing can be easily avoided by padding to make Logical Processors access different lines.""" class Split_Stores: name = "Split_Stores" domain = "Core_Utilization" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_INST_RETIRED.SPLIT_STORES:pp'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("MEM_INST_RETIRED.SPLIT_STORES", 4) / CORE_CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Split_Stores zero division") return self.val desc = """ This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity.""" class Streaming_Stores: name = "Streaming_Stores" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['OCR.STREAMING_WR.ANY_RESPONSE'] errcount = 0 sibling = None metricgroup = frozenset(['MemoryBW', 'Offcore']) maxval = 1.0 def compute(self, EV): try: self.val = 9 * EV("OCR.STREAMING_WR.ANY_RESPONSE", 4) / CLKS(self, EV, 4) if DS else 0 EV("OCR.STREAMING_WR.ANY_RESPONSE", 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Streaming_Stores zero division") return self.val desc = """ This metric estimates how often CPU was stalled due to Streaming store memory accesses; Streaming store optimize out a read request required by RFO stores. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should Streaming stores be a bottleneck.""" class DTLB_Store: name = "DTLB_Store" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_INST_RETIRED.STLB_MISS_STORES:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvMT', 'MemoryTLB']) maxval = 1.0 def compute(self, EV): try: self.val = (Mem_STLB_Hit_Cost * EV("DTLB_STORE_MISSES.STLB_HIT:c1", 4) + EV("DTLB_STORE_MISSES.WALK_ACTIVE", 4)) / CORE_CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "DTLB_Store zero division") return self.val desc = """ This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently- used data.""" class Store_STLB_Hit: name = "Store_STLB_Hit" domain = "Clocks_Estimated" area = "BE/Mem" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['MemoryTLB']) maxval = 1.0 def compute(self, EV): try: self.val = self.DTLB_Store.compute(EV) - self.Store_STLB_Miss.compute(EV) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Store_STLB_Hit zero division") return self.val desc = """ This metric roughly estimates the fraction of cycles where the TLB was missed by store accesses, hitting in the second- level TLB (STLB)""" class Store_STLB_Miss: name = "Store_STLB_Miss" domain = "Clocks_Calculated" area = "BE/Mem" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['MemoryTLB']) maxval = 1.0 def compute(self, EV): try: self.val = EV("DTLB_STORE_MISSES.WALK_ACTIVE", 5) / CORE_CLKS(self, EV, 5) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Store_STLB_Miss zero division") return self.val desc = """ This metric estimates the fraction of cycles where the STLB was missed by store accesses, performing a hardware page walk""" class Core_Bound: name = "Core_Bound" domain = "Slots" area = "BE/Core" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Backend', 'TmaL2', 'Compute']) maxval = None def compute(self, EV): try: self.val = max(0 , self.Backend_Bound.compute(EV) - self.Memory_Bound.compute(EV)) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Core_Bound zero division") return self.val desc = """ This metric represents fraction of slots where Core non- memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).. Tip: consider Port Saturation analysis as next step.""" class Divider: name = "Divider" domain = "Clocks" area = "BE/Core" level = 3 htoff = False sample = ['ARITH.DIVIDER_ACTIVE'] errcount = 0 sibling = None metricgroup = frozenset(['BvCB']) maxval = 1.0 def compute(self, EV): try: self.val = EV("ARITH.DIVIDER_ACTIVE", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Divider zero division") return self.val desc = """ This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication.""" class Serializing_Operation: name = "Serializing_Operation" domain = "Clocks" area = "BE/Core" level = 3 htoff = False sample = ['RESOURCE_STALLS.SCOREBOARD'] errcount = 0 sibling = None metricgroup = frozenset(['BvIO', 'PortsUtil']) maxval = None def compute(self, EV): try: self.val = EV("RESOURCE_STALLS.SCOREBOARD", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Serializing_Operation zero division") return self.val desc = """ This metric represents fraction of cycles the CPU issue- pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out- of-order execution which may limit performance.""" class Slow_Pause: name = "Slow_Pause" domain = "Clocks" area = "BE/Core" level = 4 htoff = False sample = ['MISC_RETIRED.PAUSE_INST'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = 37 * EV("MISC_RETIRED.PAUSE_INST", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Slow_Pause zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions.""" class Ports_Utilization: name = "Ports_Utilization" domain = "Clocks" area = "BE/Core" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['PortsUtil']) maxval = None def compute(self, EV): try: self.val = Core_Bound_Cycles(self, EV, 3) / CLKS(self, EV, 3) if (EV("ARITH.DIVIDER_ACTIVE", 3)<(EV("CYCLE_ACTIVITY.STALLS_TOTAL", 3) - EV("CYCLE_ACTIVITY.STALLS_MEM_ANY", 3))) else Few_Uops_Executed_Threshold(self, EV, 3) / CLKS(self, EV, 3) EV("CYCLE_ACTIVITY.STALLS_MEM_ANY", 3) EV("ARITH.DIVIDER_ACTIVE", 3) EV("CYCLE_ACTIVITY.STALLS_TOTAL", 3) self.thresh = (self.val > 0.15) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Ports_Utilization zero division") return self.val desc = """ This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.. Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop.""" class Ports_Utilized_0: name = "Ports_Utilized_0" domain = "Clocks" area = "BE/Core" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['PortsUtil']) maxval = None def compute(self, EV): try: self.val = EV("EXE_ACTIVITY.3_PORTS_UTIL:u0x80", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Ports_Utilized_0 zero division") return self.val desc = """ This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric.. Check assembly view and Appendix C in Optimization Manual to find out instructions with say 5 or more cycles latency.. http://www.intel.com/content/www/us/en/architecture-and- technology/64-ia-32-architectures-optimization-manual.html""" class Mixing_Vectors: name = "Mixing_Vectors" domain = "Clocks" area = "BE/Core" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = 1.0 def compute(self, EV): try: self.val = EV("UOPS_ISSUED.VECTOR_WIDTH_MISMATCH", 5) / EV("UOPS_ISSUED.ANY", 5) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "Mixing_Vectors zero division") return self.val desc = """ This metric estimates penalty in terms of percentage of injected blend uops out of all Uops Issued -- the Count Domain. Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic.""" class Ports_Utilized_1: name = "Ports_Utilized_1" domain = "Clocks" area = "BE/Core" level = 4 htoff = False sample = ['EXE_ACTIVITY.1_PORTS_UTIL'] errcount = 0 sibling = None metricgroup = frozenset(['PortsUtil']) maxval = None def compute(self, EV): try: self.val = EV("EXE_ACTIVITY.1_PORTS_UTIL", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Ports_Utilized_1 zero division") return self.val desc = """ This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). This can be due to heavy data-dependency among software instructions; or over oversubscribing a particular hardware resource. In some other cases with high 1_Port_Utilized and L1_Bound; this metric can point to L1 data-cache latency bottleneck that may not necessarily manifest with complete execution starvation (due to the short L1 latency e.g. walking a linked list) - looking at the assembly can be helpful.""" class Ports_Utilized_2: name = "Ports_Utilized_2" domain = "Clocks" area = "BE/Core" level = 4 htoff = False sample = ['EXE_ACTIVITY.2_PORTS_UTIL'] errcount = 0 sibling = None metricgroup = frozenset(['PortsUtil']) maxval = None def compute(self, EV): try: self.val = EV("EXE_ACTIVITY.2_PORTS_UTIL", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.15) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Ports_Utilized_2 zero division") return self.val desc = """ This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto- Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop.""" class Ports_Utilized_3m: name = "Ports_Utilized_3m" domain = "Clocks" area = "BE/Core" level = 4 htoff = False sample = ['UOPS_EXECUTED.CYCLES_GE_3'] errcount = 0 sibling = None metricgroup = frozenset(['BvCB', 'PortsUtil']) maxval = None def compute(self, EV): try: self.val = EV("UOPS_EXECUTED.CYCLES_GE_3", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.4) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Ports_Utilized_3m zero division") return self.val desc = """ This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise).""" class ALU_Op_Utilization: name = "ALU_Op_Utilization" domain = "Core_Execution" area = "BE/Core" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = (EV("UOPS_DISPATCHED.PORT_0", 5) + EV("UOPS_DISPATCHED.PORT_1", 5) + EV("UOPS_DISPATCHED.PORT_5", 5) + EV("UOPS_DISPATCHED.PORT_6", 5)) / (4 * CORE_CLKS(self, EV, 5)) self.thresh = (self.val > 0.4) except ZeroDivisionError: handle_error(self, "ALU_Op_Utilization zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.""" class Port_0: name = "Port_0" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED.PORT_0'] errcount = 0 sibling = None metricgroup = frozenset(['Compute']) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED.PORT_0", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_0 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ALU and 2nd branch""" class Port_1: name = "Port_1" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED.PORT_1'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED.PORT_1", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_1 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU)""" class Port_5: name = "Port_5" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED.PORT_5'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED.PORT_5", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_5 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ALU. See section 'Handling Port 5 Pressure' in Optimization Manual:. http://www.intel.com/content/www/us/en/architecture-and- technology/64-ia-32-architectures-optimization-manual.html""" class Port_6: name = "Port_6" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED.PORT_6'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED.PORT_6", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_6 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 Primary Branch and simple ALU""" class Load_Op_Utilization: name = "Load_Op_Utilization" domain = "Core_Execution" area = "BE/Core" level = 5 htoff = False sample = ['UOPS_DISPATCHED.PORT_2_3'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED.PORT_2_3", 5) / (2 * CORE_CLKS(self, EV, 5)) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Load_Op_Utilization zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations""" class Store_Op_Utilization: name = "Store_Op_Utilization" domain = "Core_Execution" area = "BE/Core" level = 5 htoff = False sample = ['UOPS_DISPATCHED.PORT_7_8'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = (EV("UOPS_DISPATCHED.PORT_4_9", 5) + EV("UOPS_DISPATCHED.PORT_7_8", 5)) / (4 * CORE_CLKS(self, EV, 5)) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Store_Op_Utilization zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port for Store operations""" class Retiring: name = "Retiring" domain = "Slots" area = "RET" level = 1 htoff = False sample = ['UOPS_RETIRED.SLOTS'] errcount = 0 sibling = None metricgroup = frozenset(['BvUW', 'TmaL1']) maxval = None def compute(self, EV): try: self.val = (EV("PERF_METRICS.RETIRING", 1) / EV("TOPDOWN.SLOTS", 1)) / PERF_METRICS_SUM(self, EV, 1) if topdown_use_fixed else EV("UOPS_RETIRED.SLOTS", 1) / SLOTS(self, EV, 1) self.thresh = (self.val > 0.7) or self.Heavy_Operations.thresh except ZeroDivisionError: handle_error(self, "Retiring zero division") return self.val desc = """ This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved. Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. . A high Retiring value for non-vectorized code may be a good hint for programmer to consider vectorizing his code. Doing so essentially lets more computations be done without significantly increasing number of instructions thus improving the performance.""" class Light_Operations: name = "Light_Operations" domain = "Slots" area = "RET" level = 2 htoff = False sample = ['INST_RETIRED.PREC_DIST'] errcount = 0 sibling = None metricgroup = frozenset(['Retire', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = max(0 , self.Retiring.compute(EV) - self.Heavy_Operations.compute(EV)) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Light_Operations zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Note this may undercount due to approximation using indirect events. Focus on techniques that reduce instruction count or result in more efficient instructions generation such as vectorization.""" class FP_Arith: name = "FP_Arith" domain = "Uops" area = "RET" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['HPC']) maxval = None def compute(self, EV): try: self.val = self.X87_Use.compute(EV) + self.FP_Scalar.compute(EV) + self.FP_Vector.compute(EV) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "FP_Arith zero division") return self.val desc = """ This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.""" class X87_Use: name = "X87_Use" domain = "Uops" area = "RET" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Compute']) maxval = None def compute(self, EV): try: self.val = self.Retiring.compute(EV) * EV("UOPS_EXECUTED.X87", 4) / EV("UOPS_EXECUTED.THREAD", 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "X87_Use zero division") return self.val desc = """ This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.. Tip: consider compiler flags to generate newer AVX (or SSE) instruction sets; which typically perform better and feature vectors.""" class FP_Scalar: name = "FP_Scalar" domain = "Uops" area = "RET" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Compute', 'Flops']) maxval = None def compute(self, EV): try: self.val = FP_Arith_Scalar(self, EV, 4) / Retired_Slots(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "FP_Scalar zero division") return self.val desc = """ This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting.. Investigate what limits (compiler) generation of vector code.""" class FP_Vector: name = "FP_Vector" domain = "Uops" area = "RET" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Compute', 'Flops']) maxval = 1.0 def compute(self, EV): try: self.val = FP_Arith_Vector(self, EV, 4) / Retired_Slots(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "FP_Vector zero division") return self.val desc = """ This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting.. Check if vector width is expected""" class FP_Vector_128b: name = "FP_Vector_128b" domain = "Uops" area = "RET" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Compute', 'Flops']) maxval = 1.0 def compute(self, EV): try: self.val = (EV("FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", 5) + EV("FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE", 5)) / Retired_Slots(self, EV, 5) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "FP_Vector_128b zero division") return self.val desc = """ This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting.. Try to exploit wider vector length""" class FP_Vector_256b: name = "FP_Vector_256b" domain = "Uops" area = "RET" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Compute', 'Flops']) maxval = 1.0 def compute(self, EV): try: self.val = (EV("FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE", 5) + EV("FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE", 5)) / Retired_Slots(self, EV, 5) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "FP_Vector_256b zero division") return self.val desc = """ This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting.. Try to exploit wider vector length""" class FP_Vector_512b: name = "FP_Vector_512b" domain = "Uops" area = "RET" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Compute', 'Flops']) maxval = 1.0 def compute(self, EV): try: self.val = (EV("FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE", 5) + EV("FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE", 5)) / Retired_Slots(self, EV, 5) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "FP_Vector_512b zero division") return self.val desc = """ This metric approximates arithmetic FP vector uops fraction the CPU has retired for 512-bit wide vectors. May overcount due to FMA double counting.""" class Memory_Operations: name = "Memory_Operations" domain = "Slots" area = "RET" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Pipeline']) maxval = None def compute(self, EV): try: self.val = self.Light_Operations.compute(EV) * EV("MEM_INST_RETIRED.ANY", 3) / EV("INST_RETIRED.ANY", 3) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Memory_Operations zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring memory operations -- uops for memory load or store accesses.""" class Branch_Instructions: name = "Branch_Instructions" domain = "Slots" area = "RET" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Branches', 'BvBO', 'Pipeline']) maxval = None def compute(self, EV): try: self.val = self.Light_Operations.compute(EV) * EV("BR_INST_RETIRED.ALL_BRANCHES", 3) / Retired_Slots(self, EV, 3) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Branch_Instructions zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring branch instructions.""" class Other_Light_Ops: name = "Other_Light_Ops" domain = "Slots" area = "RET" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Pipeline']) maxval = None def compute(self, EV): try: self.val = max(0 , self.Light_Operations.compute(EV) - Light_Ops_Sum(self, EV, 3)) self.thresh = (self.val > 0.3) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Other_Light_Ops zero division") return self.val desc = """ This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes. May undercount due to FMA double counting""" class Nop_Instructions: name = "Nop_Instructions" domain = "Slots" area = "RET" level = 4 htoff = False sample = ['INST_RETIRED.NOP'] errcount = 0 sibling = None metricgroup = frozenset(['BvBO', 'Pipeline']) maxval = None def compute(self, EV): try: self.val = self.Light_Operations.compute(EV) * EV("INST_RETIRED.NOP", 4) / Retired_Slots(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Nop_Instructions zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body.. Improve Codegen by correctly placing NOPs outside hot sections (e.g. outside loop body).""" class Heavy_Operations: name = "Heavy_Operations" domain = "Slots" area = "RET" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Retire', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = self.Microcode_Sequencer.compute(EV) + self.Retiring.compute(EV) * (EV("UOPS_DECODED.DEC0", 2) - EV("UOPS_DECODED.DEC0:c1", 2)) / EV("IDQ.MITE_UOPS", 2) self.thresh = (self.val > 0.1) except ZeroDivisionError: handle_error(self, "Heavy_Operations zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences. Note this may overcount due to approximation using indirect events""" class Few_Uops_Instructions: name = "Few_Uops_Instructions" domain = "Slots" area = "RET" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = self.Heavy_Operations.compute(EV) - self.Microcode_Sequencer.compute(EV) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Few_Uops_Instructions zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring instructions that that are decoder into two or up to four uops. This highly-correlates with the number of uops in such instructions.""" class Microcode_Sequencer: name = "Microcode_Sequencer" domain = "Slots" area = "RET" level = 3 htoff = False sample = ['IDQ.MS_UOPS'] errcount = 0 sibling = None metricgroup = frozenset(['MicroSeq']) maxval = None def compute(self, EV): try: self.val = Retire_Fraction(self, EV, 3) * EV("IDQ.MS_UOPS", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Microcode_Sequencer zero division") return self.val desc = """ This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided..""" class Assists: name = "Assists" domain = "Slots_Estimated" area = "RET" level = 4 htoff = False sample = ['ASSISTS.ANY'] errcount = 0 sibling = None metricgroup = frozenset(['BvIO']) maxval = 1.0 def compute(self, EV): try: self.val = Avg_Assist_Cost * EV("ASSISTS.ANY", 4) / SLOTS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Assists zero division") return self.val desc = """ This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases.""" class FP_Assists: name = "FP_Assists" domain = "Slots_Estimated" area = "RET" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['HPC']) maxval = None def compute(self, EV): try: self.val = 34 * EV("ASSISTS.FP", 5) / SLOTS(self, EV, 5) self.thresh = (self.val > 0.1) except ZeroDivisionError: handle_error(self, "FP_Assists zero division") return self.val desc = """ This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists. FP Assist may apply when working with very small floating point values (so-called Denormals).. Consider DAZ (Denormals Are Zero) and/or FTZ (Flush To Zero) options in your compiler; \"-ffast-math\" with -O2 in GCC for example. This option may improve performance if the denormal values are not critical in your application. Also note that the DAZ and FTZ modes are not compatible with the IEEE Standard 754.. https://www.intel.com/content/www/us/en/develop/docume ntation/vtune-help/top/reference/cpu-metrics-reference/bad- speculation-back-end-bound-pipeline-slots/fp-assists.html""" class CISC: name = "CISC" domain = "Slots" area = "RET" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = max(0 , self.Microcode_Sequencer.compute(EV) - self.Assists.compute(EV)) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "CISC zero division") return self.val desc = """ This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources.""" class Metric_Mispredictions: name = "Mispredictions" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['Bad', 'BadSpec', 'BrMispredicts', 'BvMP']) sibling = None def compute(self, EV): try: self.val = Mispredictions(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Mispredictions zero division") desc = """ Total pipeline cost of Branch Misprediction related bottlenecks""" class Metric_Big_Code: name = "Big_Code" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvBC', 'BigFootprint', 'Fed', 'Frontend', 'IcMiss', 'MemoryTLB']) sibling = None def compute(self, EV): try: self.val = Big_Code(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Big_Code zero division") desc = """ Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)""" class Metric_Instruction_Fetch_BW: name = "Instruction_Fetch_BW" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvFB', 'Fed', 'FetchBW', 'Frontend']) sibling = None def compute(self, EV): try: self.val = Instruction_Fetch_BW(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Instruction_Fetch_BW zero division") desc = """ Total pipeline cost of instruction fetch bandwidth related bottlenecks (when the front-end could not sustain operations delivery to the back-end)""" class Metric_Cache_Memory_Bandwidth: name = "Cache_Memory_Bandwidth" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvMB', 'Mem', 'MemoryBW', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Cache_Memory_Bandwidth(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Cache_Memory_Bandwidth zero division") desc = """ Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks""" class Metric_Cache_Memory_Latency: name = "Cache_Memory_Latency" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvML', 'Mem', 'MemoryLat', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Cache_Memory_Latency(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Cache_Memory_Latency zero division") desc = """ Total pipeline cost of external Memory- or Cache-Latency related bottlenecks""" class Metric_Memory_Data_TLBs: name = "Memory_Data_TLBs" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvMT', 'Mem', 'MemoryTLB', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Memory_Data_TLBs(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Memory_Data_TLBs zero division") desc = """ Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)""" class Metric_Memory_Synchronization: name = "Memory_Synchronization" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvMS', 'Mem', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Memory_Synchronization(self, EV, 0) self.thresh = (self.val > 10) except ZeroDivisionError: handle_error_metric(self, "Memory_Synchronization zero division") desc = """ Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors)""" class Metric_Compute_Bound_Est: name = "Compute_Bound_Est" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvCB', 'Cor']) sibling = None def compute(self, EV): try: self.val = Compute_Bound_Est(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Compute_Bound_Est zero division") desc = """ Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy.""" class Metric_Irregular_Overhead: name = "Irregular_Overhead" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['Bad', 'BvIO', 'Cor', 'Ret']) sibling = None def compute(self, EV): try: self.val = Irregular_Overhead(self, EV, 0) self.thresh = (self.val > 10) except ZeroDivisionError: handle_error_metric(self, "Irregular_Overhead zero division") desc = """ Total pipeline cost of irregular execution (e.g. FP-assists in HPC, Wait time with work imbalance multithreaded workloads, overhead in system services or virtualized environments)""" class Metric_Other_Bottlenecks: name = "Other_Bottlenecks" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvOB', 'Cor', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Other_Bottlenecks(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Other_Bottlenecks zero division") desc = """ Total pipeline cost of remaining bottlenecks in the back- end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls.""" class Metric_Branching_Overhead: name = "Branching_Overhead" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvBO', 'Ret']) sibling = None def compute(self, EV): try: self.val = Branching_Overhead(self, EV, 0) self.thresh = (self.val > 5) except ZeroDivisionError: handle_error_metric(self, "Branching_Overhead zero division") desc = """ Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA. Examples include function calls; loops and alignments. (A lower bound). Consider Loop Unrolling or function inlining optimizations""" class Metric_Useful_Work: name = "Useful_Work" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Bottleneck" metricgroup = frozenset(['BvUW', 'Ret']) sibling = None def compute(self, EV): try: self.val = Useful_Work(self, EV, 0) self.thresh = (self.val > 20) except ZeroDivisionError: handle_error_metric(self, "Useful_Work zero division") desc = """ Total pipeline cost of \"useful operations\" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead.""" class Metric_Core_Bound_Likely: name = "Core_Bound_Likely" domain = "Metric" maxval = 1.0 errcount = 0 area = "Info.Botlnk.L0" metricgroup = frozenset(['Cor', 'SMT']) sibling = None def compute(self, EV): try: self.val = Core_Bound_Likely(self, EV, 0) self.thresh = (self.val > 0.5) except ZeroDivisionError: handle_error_metric(self, "Core_Bound_Likely zero division") desc = """ Probability of Core Bound bottleneck hidden by SMT-profiling artifacts. Tip: consider analysis with SMT disabled""" class Metric_IPC: name = "IPC" domain = "Metric" maxval = Pipeline_Width + 2 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Ret', 'Summary']) sibling = None def compute(self, EV): try: self.val = IPC(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IPC zero division") desc = """ Instructions Per Cycle (per Logical Processor)""" class Metric_UopPI: name = "UopPI" domain = "Metric" maxval = 2.0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Pipeline', 'Ret', 'Retire']) sibling = None def compute(self, EV): try: self.val = UopPI(self, EV, 0) self.thresh = (self.val > 1.05) except ZeroDivisionError: handle_error_metric(self, "UopPI zero division") desc = """ Uops Per Instruction""" class Metric_UpTB: name = "UpTB" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Branches', 'Fed', 'FetchBW']) sibling = None def compute(self, EV): try: self.val = UpTB(self, EV, 0) self.thresh = self.val < Pipeline_Width * 1.5 except ZeroDivisionError: handle_error_metric(self, "UpTB zero division") desc = """ Uops per taken branch""" class Metric_CPI: name = "CPI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Pipeline', 'Mem']) sibling = None def compute(self, EV): try: self.val = CPI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CPI zero division") desc = """ Cycles Per Instruction (per Logical Processor)""" class Metric_CLKS: name = "CLKS" domain = "Count" maxval = 0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Pipeline']) sibling = None def compute(self, EV): try: self.val = CLKS(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CLKS zero division") desc = """ Per-Logical Processor actual clocks when the Logical Processor is active.""" class Metric_SLOTS: name = "SLOTS" domain = "Count" maxval = 0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['TmaL1']) sibling = None def compute(self, EV): try: self.val = SLOTS(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "SLOTS zero division") desc = """ Total issue-pipeline slots (per-Physical Core till ICL; per- Logical Processor ICL onward)""" class Metric_Slots_Utilization: name = "Slots_Utilization" domain = "Metric" maxval = 1.0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['SMT', 'TmaL1']) sibling = None def compute(self, EV): try: self.val = Slots_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Slots_Utilization zero division") desc = """ Fraction of Physical Core issue-slots utilized by this Logical Processor""" class Metric_Execute_per_Issue: name = "Execute_per_Issue" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Cor', 'Pipeline']) sibling = None def compute(self, EV): try: self.val = Execute_per_Issue(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Execute_per_Issue zero division") desc = """ The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage.""" class Metric_CoreIPC: name = "CoreIPC" domain = "Core_Metric" maxval = Pipeline_Width + 2 errcount = 0 area = "Info.Core" metricgroup = frozenset(['Ret', 'SMT', 'TmaL1']) sibling = None def compute(self, EV): try: self.val = CoreIPC(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CoreIPC zero division") desc = """ Instructions Per Cycle across hyper-threads (per physical core)""" class Metric_FLOPc: name = "FLOPc" domain = "Core_Metric" maxval = 10.0 errcount = 0 area = "Info.Core" metricgroup = frozenset(['Ret', 'Flops']) sibling = None def compute(self, EV): try: self.val = FLOPc(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "FLOPc zero division") desc = """ Floating Point Operations Per Cycle""" class Metric_FP_Arith_Utilization: name = "FP_Arith_Utilization" domain = "Core_Metric" maxval = 2.0 errcount = 0 area = "Info.Core" metricgroup = frozenset(['Cor', 'Flops', 'HPC']) sibling = None def compute(self, EV): try: self.val = FP_Arith_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "FP_Arith_Utilization zero division") desc = """ Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to Fused-Multiply Add FMA counting - common.""" class Metric_ILP: name = "ILP" domain = "Metric" maxval = Exe_Ports errcount = 0 area = "Info.Core" metricgroup = frozenset(['Backend', 'Cor', 'Pipeline', 'PortsUtil']) sibling = None def compute(self, EV): try: self.val = ILP(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "ILP zero division") desc = """ Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical- processor)""" class Metric_EPC: name = "EPC" domain = "Metric" maxval = 20.0 errcount = 0 area = "Info.Core" metricgroup = frozenset(['Power']) sibling = None def compute(self, EV): try: self.val = EPC(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "EPC zero division") desc = """ uops Executed per Cycle""" class Metric_CORE_CLKS: name = "CORE_CLKS" domain = "Count" maxval = 0 errcount = 0 area = "Info.Core" metricgroup = frozenset(['SMT']) sibling = None def compute(self, EV): try: self.val = CORE_CLKS(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CORE_CLKS zero division") desc = """ Core actual clocks when any Logical Processor is active on the Physical Core""" class Metric_IpLoad: name = "IpLoad" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['InsType']) sibling = None def compute(self, EV): try: self.val = IpLoad(self, EV, 0) self.thresh = (self.val < 3) except ZeroDivisionError: handle_error_metric(self, "IpLoad zero division") desc = """ Instructions per Load (lower number means higher occurrence rate). Tip: reduce memory accesses.""" class Metric_IpStore: name = "IpStore" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['InsType']) sibling = None def compute(self, EV): try: self.val = IpStore(self, EV, 0) self.thresh = (self.val < 8) except ZeroDivisionError: handle_error_metric(self, "IpStore zero division") desc = """ Instructions per Store (lower number means higher occurrence rate). Tip: reduce memory accesses.""" class Metric_IpBranch: name = "IpBranch" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Branches', 'Fed', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpBranch(self, EV, 0) self.thresh = (self.val < 8) except ZeroDivisionError: handle_error_metric(self, "IpBranch zero division") desc = """ Instructions per Branch (lower number means higher occurrence rate)""" class Metric_IpCall: name = "IpCall" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Branches', 'Fed', 'PGO']) sibling = None def compute(self, EV): try: self.val = IpCall(self, EV, 0) self.thresh = (self.val < 200) except ZeroDivisionError: handle_error_metric(self, "IpCall zero division") desc = """ Instructions per (near) call (lower number means higher occurrence rate)""" class Metric_IpTB: name = "IpTB" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Branches', 'Fed', 'FetchBW', 'Frontend', 'PGO']) sibling = None def compute(self, EV): try: self.val = IpTB(self, EV, 0) self.thresh = self.val < Pipeline_Width * 2 + 1 except ZeroDivisionError: handle_error_metric(self, "IpTB zero division") desc = """ Instructions per taken branch""" class Metric_BpTkBranch: name = "BpTkBranch" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Branches', 'Fed', 'PGO']) sibling = None def compute(self, EV): try: self.val = BpTkBranch(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "BpTkBranch zero division") desc = """ Branch instructions per taken branch. . Can be used to approximate PGO-likelihood for non-loopy codes.""" class Metric_IpFLOP: name = "IpFLOP" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Flops', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpFLOP(self, EV, 0) self.thresh = (self.val < 10) except ZeroDivisionError: handle_error_metric(self, "IpFLOP zero division") desc = """ Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate). Reference: Tuning Performance via Metrics with Expectations. https://doi.org/10.1109/LCA.2019.2916408""" class Metric_IpArith: name = "IpArith" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Flops', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpArith(self, EV, 0) self.thresh = (self.val < 10) except ZeroDivisionError: handle_error_metric(self, "IpArith zero division") desc = """ Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW.""" class Metric_IpArith_Scalar_SP: name = "IpArith_Scalar_SP" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Flops', 'FpScalar', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpArith_Scalar_SP(self, EV, 0) self.thresh = (self.val < 10) except ZeroDivisionError: handle_error_metric(self, "IpArith_Scalar_SP zero division") desc = """ Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.""" class Metric_IpArith_Scalar_DP: name = "IpArith_Scalar_DP" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Flops', 'FpScalar', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpArith_Scalar_DP(self, EV, 0) self.thresh = (self.val < 10) except ZeroDivisionError: handle_error_metric(self, "IpArith_Scalar_DP zero division") desc = """ Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.""" class Metric_IpArith_AVX128: name = "IpArith_AVX128" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Flops', 'FpVector', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpArith_AVX128(self, EV, 0) self.thresh = (self.val < 10) except ZeroDivisionError: handle_error_metric(self, "IpArith_AVX128 zero division") desc = """ Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.""" class Metric_IpArith_AVX256: name = "IpArith_AVX256" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Flops', 'FpVector', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpArith_AVX256(self, EV, 0) self.thresh = (self.val < 10) except ZeroDivisionError: handle_error_metric(self, "IpArith_AVX256 zero division") desc = """ Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.""" class Metric_IpArith_AVX512: name = "IpArith_AVX512" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Flops', 'FpVector', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpArith_AVX512(self, EV, 0) self.thresh = (self.val < 10) except ZeroDivisionError: handle_error_metric(self, "IpArith_AVX512 zero division") desc = """ Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.""" class Metric_IpPause: name = "IpPause" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Flops', 'FpVector', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpPause(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IpPause zero division") desc = """ Instructions per PAUSE (lower number means higher occurrence rate)""" class Metric_IpSWPF: name = "IpSWPF" domain = "Inst_Metric" maxval = 1000 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Prefetches']) sibling = None def compute(self, EV): try: self.val = IpSWPF(self, EV, 0) self.thresh = (self.val < 100) except ZeroDivisionError: handle_error_metric(self, "IpSWPF zero division") desc = """ Instructions per Software prefetch instruction (of any type: NTA/T0/T1/T2/Prefetch) (lower number means higher occurrence rate)""" class Metric_Instructions: name = "Instructions" domain = "Count" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Summary', 'TmaL1']) sibling = None def compute(self, EV): try: self.val = Instructions(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Instructions zero division") desc = """ Total number of retired Instructions""" class Metric_Retire: name = "Retire" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Pipeline" metricgroup = frozenset(['Pipeline', 'Ret']) sibling = None def compute(self, EV): try: self.val = Retire(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Retire zero division") desc = """ Average number of Uops retired in cycles where at least one uop has retired.""" class Metric_IpAssist: name = "IpAssist" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Pipeline" metricgroup = frozenset(['MicroSeq', 'Pipeline', 'Ret', 'Retire']) sibling = None def compute(self, EV): try: self.val = IpAssist(self, EV, 0) self.thresh = (self.val < 100000) except ZeroDivisionError: handle_error_metric(self, "IpAssist zero division") desc = """ Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate)""" class Metric_Execute: name = "Execute" domain = "Metric" maxval = Exe_Ports errcount = 0 area = "Info.Pipeline" metricgroup = frozenset(['Cor', 'Pipeline', 'PortsUtil', 'SMT']) sibling = None def compute(self, EV): try: self.val = Execute(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Execute zero division") desc = """ """ class Metric_Fetch_LSD: name = "Fetch_LSD" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Pipeline" metricgroup = frozenset(['Fed', 'FetchBW']) sibling = None def compute(self, EV): try: self.val = Fetch_LSD(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Fetch_LSD zero division") desc = """ Average number of uops fetched from LSD per cycle""" class Metric_Fetch_DSB: name = "Fetch_DSB" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Pipeline" metricgroup = frozenset(['Fed', 'FetchBW']) sibling = None def compute(self, EV): try: self.val = Fetch_DSB(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Fetch_DSB zero division") desc = """ Average number of uops fetched from DSB per cycle""" class Metric_Fetch_MITE: name = "Fetch_MITE" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Pipeline" metricgroup = frozenset(['Fed', 'FetchBW']) sibling = None def compute(self, EV): try: self.val = Fetch_MITE(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Fetch_MITE zero division") desc = """ Average number of uops fetched from MITE per cycle""" class Metric_Fetch_UpC: name = "Fetch_UpC" domain = "Metric" maxval = 6.0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['Fed', 'FetchBW']) sibling = None def compute(self, EV): try: self.val = Fetch_UpC(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Fetch_UpC zero division") desc = """ Average number of Uops issued by front-end when it issued something""" class Metric_DSB_Coverage: name = "DSB_Coverage" domain = "Metric" maxval = 1.0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['DSB', 'Fed', 'FetchBW']) sibling = None def compute(self, EV): try: self.val = DSB_Coverage(self, EV, 0) self.thresh = (self.val < 0.7) and HighIPC(self, EV, 1) except ZeroDivisionError: handle_error_metric(self, "DSB_Coverage zero division") desc = """ Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). See section 'Decoded ICache' in Optimization Manual. http://www.intel.com/content/www/us/en/architecture- and-technology/64-ia-32-architectures-optimization- manual.html""" class Metric_DSB_Switch_Cost: name = "DSB_Switch_Cost" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['DSBmiss']) sibling = None def compute(self, EV): try: self.val = DSB_Switch_Cost(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "DSB_Switch_Cost zero division") desc = """ Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.""" class Metric_DSB_Misses: name = "DSB_Misses" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Botlnk.L2" metricgroup = frozenset(['DSBmiss', 'Fed']) sibling = None def compute(self, EV): try: self.val = DSB_Misses(self, EV, 0) self.thresh = (self.val > 10) except ZeroDivisionError: handle_error_metric(self, "DSB_Misses zero division") desc = """ Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck.""" class Metric_DSB_Bandwidth: name = "DSB_Bandwidth" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Botlnk.L2" metricgroup = frozenset(['DSB', 'FetchBW']) sibling = None def compute(self, EV): try: self.val = DSB_Bandwidth(self, EV, 0) self.thresh = (self.val > 10) except ZeroDivisionError: handle_error_metric(self, "DSB_Bandwidth zero division") desc = """ Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck.""" class Metric_ICache_Miss_Latency: name = "ICache_Miss_Latency" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['Fed', 'FetchLat', 'IcMiss']) sibling = None def compute(self, EV): try: self.val = ICache_Miss_Latency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "ICache_Miss_Latency zero division") desc = """ Average Latency for L1 instruction cache misses""" class Metric_IC_Misses: name = "IC_Misses" domain = "Scaled_Slots" maxval = 0 errcount = 0 area = "Info.Botlnk.L2" metricgroup = frozenset(['Fed', 'FetchLat', 'IcMiss']) sibling = None def compute(self, EV): try: self.val = IC_Misses(self, EV, 0) self.thresh = (self.val > 5) except ZeroDivisionError: handle_error_metric(self, "IC_Misses zero division") desc = """ Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck.""" class Metric_IpDSB_Miss_Ret: name = "IpDSB_Miss_Ret" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['DSBmiss', 'Fed']) sibling = None def compute(self, EV): try: self.val = IpDSB_Miss_Ret(self, EV, 0) self.thresh = (self.val < 50) except ZeroDivisionError: handle_error_metric(self, "IpDSB_Miss_Ret zero division") desc = """ Instructions per non-speculative DSB miss (lower number means higher occurrence rate)""" class Metric_IpUnknown_Branch: name = "IpUnknown_Branch" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['Fed']) sibling = None def compute(self, EV): try: self.val = IpUnknown_Branch(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IpUnknown_Branch zero division") desc = """ Instructions per speculative Unknown Branch Misprediction (BAClear) (lower number means higher occurrence rate)""" class Metric_L2MPKI_Code: name = "L2MPKI_Code" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['IcMiss']) sibling = None def compute(self, EV): try: self.val = L2MPKI_Code(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2MPKI_Code zero division") desc = """ L2 cache true code cacheline misses per kilo instruction""" class Metric_L2MPKI_Code_All: name = "L2MPKI_Code_All" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['IcMiss']) sibling = None def compute(self, EV): try: self.val = L2MPKI_Code_All(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2MPKI_Code_All zero division") desc = """ L2 cache speculative code cacheline misses per kilo instruction""" class Metric_IpMispredict: name = "IpMispredict" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Bad_Spec" metricgroup = frozenset(['Bad', 'BadSpec', 'BrMispredicts']) sibling = None def compute(self, EV): try: self.val = IpMispredict(self, EV, 0) self.thresh = (self.val < 200) except ZeroDivisionError: handle_error_metric(self, "IpMispredict zero division") desc = """ Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)""" class Metric_IpMisp_Cond_Ntaken: name = "IpMisp_Cond_Ntaken" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Bad_Spec" metricgroup = frozenset(['Bad', 'BrMispredicts']) sibling = None def compute(self, EV): try: self.val = IpMisp_Cond_Ntaken(self, EV, 0) self.thresh = (self.val < 200) except ZeroDivisionError: handle_error_metric(self, "IpMisp_Cond_Ntaken zero division") desc = """ Instructions per retired Mispredicts for conditional non- taken branches (lower number means higher occurrence rate).""" class Metric_IpMisp_Cond_Taken: name = "IpMisp_Cond_Taken" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Bad_Spec" metricgroup = frozenset(['Bad', 'BrMispredicts']) sibling = None def compute(self, EV): try: self.val = IpMisp_Cond_Taken(self, EV, 0) self.thresh = (self.val < 200) except ZeroDivisionError: handle_error_metric(self, "IpMisp_Cond_Taken zero division") desc = """ Instructions per retired Mispredicts for conditional taken branches (lower number means higher occurrence rate).""" class Metric_IpMisp_Ret: name = "IpMisp_Ret" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Bad_Spec" metricgroup = frozenset(['Bad', 'BrMispredicts']) sibling = None def compute(self, EV): try: self.val = IpMisp_Ret(self, EV, 0) self.thresh = (self.val < 500) except ZeroDivisionError: handle_error_metric(self, "IpMisp_Ret zero division") desc = """ Instructions per retired Mispredicts for return branches (lower number means higher occurrence rate).""" class Metric_IpMisp_Indirect: name = "IpMisp_Indirect" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Bad_Spec" metricgroup = frozenset(['Bad', 'BrMispredicts']) sibling = None def compute(self, EV): try: self.val = IpMisp_Indirect(self, EV, 0) self.thresh = (self.val < 1000) except ZeroDivisionError: handle_error_metric(self, "IpMisp_Indirect zero division") desc = """ Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).""" class Metric_Branch_Misprediction_Cost: name = "Branch_Misprediction_Cost" domain = "Core_Metric" maxval = 300 errcount = 0 area = "Info.Bad_Spec" metricgroup = frozenset(['Bad', 'BrMispredicts']) sibling = None def compute(self, EV): try: self.val = Branch_Misprediction_Cost(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Branch_Misprediction_Cost zero division") desc = """ Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)""" class Metric_Spec_Clears_Ratio: name = "Spec_Clears_Ratio" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Bad_Spec" metricgroup = frozenset(['BrMispredicts']) sibling = None def compute(self, EV): try: self.val = Spec_Clears_Ratio(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Spec_Clears_Ratio zero division") desc = """ Speculative to Retired ratio of all clears (covering Mispredicts and nukes)""" class Metric_Cond_NT: name = "Cond_NT" domain = "Fraction" maxval = 1.0 errcount = 0 area = "Info.Branches" metricgroup = frozenset(['Bad', 'Branches', 'CodeGen', 'PGO']) sibling = None def compute(self, EV): try: self.val = Cond_NT(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Cond_NT zero division") desc = """ Fraction of branches that are non-taken conditionals""" class Metric_Cond_TK: name = "Cond_TK" domain = "Fraction" maxval = 1.0 errcount = 0 area = "Info.Branches" metricgroup = frozenset(['Bad', 'Branches', 'CodeGen', 'PGO']) sibling = None def compute(self, EV): try: self.val = Cond_TK(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Cond_TK zero division") desc = """ Fraction of branches that are taken conditionals""" class Metric_CallRet: name = "CallRet" domain = "Fraction" maxval = 1.0 errcount = 0 area = "Info.Branches" metricgroup = frozenset(['Bad', 'Branches']) sibling = None def compute(self, EV): try: self.val = CallRet(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CallRet zero division") desc = """ Fraction of branches that are CALL or RET""" class Metric_Jump: name = "Jump" domain = "Fraction" maxval = 1.0 errcount = 0 area = "Info.Branches" metricgroup = frozenset(['Bad', 'Branches']) sibling = None def compute(self, EV): try: self.val = Jump(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Jump zero division") desc = """ Fraction of branches that are unconditional (direct or indirect) jumps""" class Metric_Other_Branches: name = "Other_Branches" domain = "Fraction" maxval = 1.0 errcount = 0 area = "Info.Branches" metricgroup = frozenset(['Bad', 'Branches']) sibling = None def compute(self, EV): try: self.val = Other_Branches(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Other_Branches zero division") desc = """ Fraction of branches of other types (not individually covered by other metrics in Info.Branches group)""" class Metric_Load_Miss_Real_Latency: name = "Load_Miss_Real_Latency" domain = "Clocks_Latency" maxval = 1000 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBound', 'MemoryLat']) sibling = None def compute(self, EV): try: self.val = Load_Miss_Real_Latency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Load_Miss_Real_Latency zero division") desc = """ Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)""" class Metric_MLP: name = "MLP" domain = "Metric" maxval = 10.0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBound', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = MLP(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "MLP zero division") desc = """ Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)""" class Metric_L1MPKI: name = "L1MPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheHits', 'Mem']) sibling = None def compute(self, EV): try: self.val = L1MPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L1MPKI zero division") desc = """ L1 cache true misses per kilo instruction for retired demand loads""" class Metric_L1MPKI_Load: name = "L1MPKI_Load" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheHits', 'Mem']) sibling = None def compute(self, EV): try: self.val = L1MPKI_Load(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L1MPKI_Load zero division") desc = """ L1 cache true misses per kilo instruction for all demand loads (including speculative)""" class Metric_L2MPKI: name = "L2MPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'Backend', 'CacheHits']) sibling = None def compute(self, EV): try: self.val = L2MPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2MPKI zero division") desc = """ L2 cache true misses per kilo instruction for retired demand loads""" class Metric_L2MPKI_All: name = "L2MPKI_All" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheHits', 'Mem', 'Offcore']) sibling = None def compute(self, EV): try: self.val = L2MPKI_All(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2MPKI_All zero division") desc = """ L2 cache misses per kilo instruction for all request types (including speculative)""" class Metric_L2MPKI_Load: name = "L2MPKI_Load" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheHits', 'Mem']) sibling = None def compute(self, EV): try: self.val = L2MPKI_Load(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2MPKI_Load zero division") desc = """ L2 cache misses per kilo instruction for all demand loads (including speculative)""" class Metric_L2MPKI_RFO: name = "L2MPKI_RFO" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheMisses', 'Offcore']) sibling = None def compute(self, EV): try: self.val = L2MPKI_RFO(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2MPKI_RFO zero division") desc = """ Offcore requests (L2 cache miss) per kilo instruction for demand RFOs""" class Metric_L2HPKI_Load: name = "L2HPKI_Load" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheHits', 'Mem']) sibling = None def compute(self, EV): try: self.val = L2HPKI_Load(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2HPKI_Load zero division") desc = """ L2 cache hits per kilo instruction for all demand loads (including speculative)""" class Metric_L3MPKI: name = "L3MPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem']) sibling = None def compute(self, EV): try: self.val = L3MPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L3MPKI zero division") desc = """ L3 cache true misses per kilo instruction for retired demand loads""" class Metric_FB_HPKI: name = "FB_HPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheHits', 'Mem']) sibling = None def compute(self, EV): try: self.val = FB_HPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "FB_HPKI zero division") desc = """ Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss- handling entries)""" class Metric_L1D_Cache_Fill_BW: name = "L1D_Cache_Fill_BW" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L1D_Cache_Fill_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L1D_Cache_Fill_BW zero division") desc = """ """ class Metric_L2_Cache_Fill_BW: name = "L2_Cache_Fill_BW" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L2_Cache_Fill_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2_Cache_Fill_BW zero division") desc = """ """ class Metric_L3_Cache_Fill_BW: name = "L3_Cache_Fill_BW" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L3_Cache_Fill_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L3_Cache_Fill_BW zero division") desc = """ """ class Metric_L3_Cache_Access_BW: name = "L3_Cache_Access_BW" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBW', 'Offcore']) sibling = None def compute(self, EV): try: self.val = L3_Cache_Access_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L3_Cache_Access_BW zero division") desc = """ """ class Metric_Page_Walks_Utilization: name = "Page_Walks_Utilization" domain = "Core_Metric" maxval = 1.0 errcount = 0 area = "Info.Memory.TLB" metricgroup = frozenset(['Mem', 'MemoryTLB']) sibling = None def compute(self, EV): try: self.val = Page_Walks_Utilization(self, EV, 0) self.thresh = (self.val > 0.5) except ZeroDivisionError: handle_error_metric(self, "Page_Walks_Utilization zero division") desc = """ Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses""" class Metric_Code_STLB_MPKI: name = "Code_STLB_MPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory.TLB" metricgroup = frozenset(['Fed', 'MemoryTLB']) sibling = None def compute(self, EV): try: self.val = Code_STLB_MPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Code_STLB_MPKI zero division") desc = """ STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)""" class Metric_Load_STLB_MPKI: name = "Load_STLB_MPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory.TLB" metricgroup = frozenset(['Mem', 'MemoryTLB']) sibling = None def compute(self, EV): try: self.val = Load_STLB_MPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Load_STLB_MPKI zero division") desc = """ STLB (2nd level TLB) data load speculative misses per kilo instruction (misses of any page-size that complete the page walk)""" class Metric_Store_STLB_MPKI: name = "Store_STLB_MPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory.TLB" metricgroup = frozenset(['Mem', 'MemoryTLB']) sibling = None def compute(self, EV): try: self.val = Store_STLB_MPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Store_STLB_MPKI zero division") desc = """ STLB (2nd level TLB) data store speculative misses per kilo instruction (misses of any page-size that complete the page walk)""" class Metric_L1D_Cache_Fill_BW_2T: name = "L1D_Cache_Fill_BW_2T" domain = "Core_Metric" maxval = 0 errcount = 0 area = "Info.Memory.Core" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L1D_Cache_Fill_BW_2T(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L1D_Cache_Fill_BW_2T zero division") desc = """ Average per-core data fill bandwidth to the L1 data cache [GB / sec]""" class Metric_L2_Cache_Fill_BW_2T: name = "L2_Cache_Fill_BW_2T" domain = "Core_Metric" maxval = 0 errcount = 0 area = "Info.Memory.Core" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L2_Cache_Fill_BW_2T(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2_Cache_Fill_BW_2T zero division") desc = """ Average per-core data fill bandwidth to the L2 cache [GB / sec]""" class Metric_L3_Cache_Fill_BW_2T: name = "L3_Cache_Fill_BW_2T" domain = "Core_Metric" maxval = 0 errcount = 0 area = "Info.Memory.Core" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L3_Cache_Fill_BW_2T(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L3_Cache_Fill_BW_2T zero division") desc = """ Average per-core data fill bandwidth to the L3 cache [GB / sec]""" class Metric_L3_Cache_Access_BW_2T: name = "L3_Cache_Access_BW_2T" domain = "Core_Metric" maxval = 0 errcount = 0 area = "Info.Memory.Core" metricgroup = frozenset(['Mem', 'MemoryBW', 'Offcore']) sibling = None def compute(self, EV): try: self.val = L3_Cache_Access_BW_2T(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L3_Cache_Access_BW_2T zero division") desc = """ Average per-core data access bandwidth to the L3 cache [GB / sec]""" class Metric_L2_Evictions_Silent_PKI: name = "L2_Evictions_Silent_PKI" domain = "Core_Metric" maxval = 0 errcount = 0 area = "Info.Memory.Core" metricgroup = frozenset(['L2Evicts', 'Mem', 'Server']) sibling = None def compute(self, EV): try: self.val = L2_Evictions_Silent_PKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2_Evictions_Silent_PKI zero division") desc = """ Rate of silent evictions from the L2 cache per Kilo instruction where the evicted lines are dropped (no writeback to L3 or memory)""" class Metric_L2_Evictions_NonSilent_PKI: name = "L2_Evictions_NonSilent_PKI" domain = "Core_Metric" maxval = 0 errcount = 0 area = "Info.Memory.Core" metricgroup = frozenset(['L2Evicts', 'Mem', 'Server']) sibling = None def compute(self, EV): try: self.val = L2_Evictions_NonSilent_PKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2_Evictions_NonSilent_PKI zero division") desc = """ Rate of non silent evictions from the L2 cache per Kilo instruction""" class Metric_Load_L2_Miss_Latency: name = "Load_L2_Miss_Latency" domain = "Clocks_Latency" maxval = 1000 errcount = 0 area = "Info.Memory.Latency" metricgroup = frozenset(['Memory_Lat', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Load_L2_Miss_Latency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Load_L2_Miss_Latency zero division") desc = """ Average Latency for L2 cache miss demand Loads""" class Metric_Load_L3_Miss_Latency: name = "Load_L3_Miss_Latency" domain = "Clocks_Latency" maxval = 1000 errcount = 0 area = "Info.Memory.Latency" metricgroup = frozenset(['Memory_Lat', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Load_L3_Miss_Latency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Load_L3_Miss_Latency zero division") desc = """ Average Latency for L3 cache miss demand Loads""" class Metric_Load_L2_MLP: name = "Load_L2_MLP" domain = "Metric" maxval = 100 errcount = 0 area = "Info.Memory.Latency" metricgroup = frozenset(['Memory_BW', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Load_L2_MLP(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Load_L2_MLP zero division") desc = """ Average Parallel L2 cache miss demand Loads""" class Metric_Data_L2_MLP: name = "Data_L2_MLP" domain = "Metric" maxval = 100 errcount = 0 area = "Info.Memory.Latency" metricgroup = frozenset(['Memory_BW', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Data_L2_MLP(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Data_L2_MLP zero division") desc = """ Average Parallel L2 cache miss data reads""" class Metric_UC_Load_PKI: name = "UC_Load_PKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory.Mix" metricgroup = frozenset(['Mem']) sibling = None def compute(self, EV): try: self.val = UC_Load_PKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "UC_Load_PKI zero division") desc = """ Un-cacheable retired load per kilo instruction""" class Metric_Bus_Lock_PKI: name = "Bus_Lock_PKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory.Mix" metricgroup = frozenset(['Mem']) sibling = None def compute(self, EV): try: self.val = Bus_Lock_PKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Bus_Lock_PKI zero division") desc = """ \"Bus lock\" per kilo instruction""" class Metric_CPU_Utilization: name = "CPU_Utilization" domain = "Metric" maxval = 1 errcount = 0 area = "Info.System" metricgroup = frozenset(['HPC', 'Summary']) sibling = None def compute(self, EV): try: self.val = CPU_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CPU_Utilization zero division") desc = """ Average CPU Utilization (percentage)""" class Metric_CPUs_Utilized: name = "CPUs_Utilized" domain = "Metric" maxval = 300 errcount = 0 area = "Info.System" metricgroup = frozenset(['Summary']) sibling = None def compute(self, EV): try: self.val = CPUs_Utilized(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CPUs_Utilized zero division") desc = """ Average number of utilized CPUs""" class Metric_Core_Frequency: name = "Core_Frequency" domain = "SystemMetric" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Summary', 'Power']) sibling = None def compute(self, EV): try: self.val = Core_Frequency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Core_Frequency zero division") desc = """ Measured Average Core Frequency for unhalted processors [GHz]""" class Metric_Uncore_Frequency: name = "Uncore_Frequency" domain = "SystemMetric" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['SoC']) sibling = None def compute(self, EV): try: self.val = Uncore_Frequency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Uncore_Frequency zero division") desc = """ Measured Average Uncore Frequency for the SoC [GHz]""" class Metric_GFLOPs: name = "GFLOPs" domain = "Metric" maxval = 200 errcount = 0 area = "Info.System" metricgroup = frozenset(['Cor', 'Flops', 'HPC']) sibling = None def compute(self, EV): try: self.val = GFLOPs(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "GFLOPs zero division") desc = """ Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width""" class Metric_Turbo_Utilization: name = "Turbo_Utilization" domain = "Core_Metric" maxval = 10.0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Power']) sibling = None def compute(self, EV): try: self.val = Turbo_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Turbo_Utilization zero division") desc = """ Average Frequency Utilization relative nominal frequency""" class Metric_Power_License0_Utilization: name = "Power_License0_Utilization" domain = "Core_Metric" maxval = 1.0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Power']) sibling = None def compute(self, EV): try: self.val = Power_License0_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Power_License0_Utilization zero division") desc = """ Fraction of Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes.""" class Metric_Power_License1_Utilization: name = "Power_License1_Utilization" domain = "Core_Metric" maxval = 1.0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Power']) sibling = None def compute(self, EV): try: self.val = Power_License1_Utilization(self, EV, 0) self.thresh = (self.val > 0.5) except ZeroDivisionError: handle_error_metric(self, "Power_License1_Utilization zero division") desc = """ Fraction of Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions.""" class Metric_Power_License2_Utilization: name = "Power_License2_Utilization" domain = "Core_Metric" maxval = 1.0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Power']) sibling = None def compute(self, EV): try: self.val = Power_License2_Utilization(self, EV, 0) self.thresh = (self.val > 0.5) except ZeroDivisionError: handle_error_metric(self, "Power_License2_Utilization zero division") desc = """ Fraction of Core cycles where the core was running with power-delivery for license level 2 (introduced in SKX). This includes high current AVX 512-bit instructions.""" class Metric_SMT_2T_Utilization: name = "SMT_2T_Utilization" domain = "Core_Metric" maxval = 1.0 errcount = 0 area = "Info.System" metricgroup = frozenset(['SMT']) sibling = None def compute(self, EV): try: self.val = SMT_2T_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "SMT_2T_Utilization zero division") desc = """ Fraction of cycles where both hardware Logical Processors were active""" class Metric_Kernel_Utilization: name = "Kernel_Utilization" domain = "Metric" maxval = 1.0 errcount = 0 area = "Info.System" metricgroup = frozenset(['OS']) sibling = None def compute(self, EV): try: self.val = Kernel_Utilization(self, EV, 0) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error_metric(self, "Kernel_Utilization zero division") desc = """ Fraction of cycles spent in the Operating System (OS) Kernel mode""" class Metric_Kernel_CPI: name = "Kernel_CPI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['OS']) sibling = None def compute(self, EV): try: self.val = Kernel_CPI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Kernel_CPI zero division") desc = """ Cycles Per Instruction for the Operating System (OS) Kernel mode""" class Metric_DRAM_BW_Use: name = "DRAM_BW_Use" domain = "GB/sec" maxval = 200 errcount = 0 area = "Info.System" metricgroup = frozenset(['HPC', 'MemOffcore', 'MemoryBW', 'SoC']) sibling = None def compute(self, EV): try: self.val = DRAM_BW_Use(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "DRAM_BW_Use zero division") desc = """ Average external Memory Bandwidth Use for reads and writes [GB / sec]""" class Metric_MEM_Read_Latency: name = "MEM_Read_Latency" domain = "NanoSeconds" maxval = 1000 errcount = 0 area = "Info.System" metricgroup = frozenset(['Mem', 'MemoryLat', 'SoC']) sibling = None def compute(self, EV): try: self.val = MEM_Read_Latency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "MEM_Read_Latency zero division") desc = """ Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches.""" class Metric_MEM_Parallel_Reads: name = "MEM_Parallel_Reads" domain = "SystemMetric" maxval = 100 errcount = 0 area = "Info.System" metricgroup = frozenset(['Mem', 'MemoryBW', 'SoC']) sibling = None def compute(self, EV): try: self.val = MEM_Parallel_Reads(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "MEM_Parallel_Reads zero division") desc = """ Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches""" class Metric_MEM_PMM_Read_Latency: name = "MEM_PMM_Read_Latency" domain = "NanoSeconds" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['MemOffcore', 'MemoryLat', 'SoC', 'Server']) sibling = None def compute(self, EV): try: self.val = MEM_PMM_Read_Latency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "MEM_PMM_Read_Latency zero division") desc = """ Average latency of data read request to external 3D X-Point memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches""" class Metric_MEM_DRAM_Read_Latency: name = "MEM_DRAM_Read_Latency" domain = "NanoSeconds" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['MemOffcore', 'MemoryLat', 'SoC', 'Server']) sibling = None def compute(self, EV): try: self.val = MEM_DRAM_Read_Latency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "MEM_DRAM_Read_Latency zero division") desc = """ Average latency of data read request to external DRAM memory [in nanoseconds]. Accounts for demand loads and L1/L2 data- read prefetches""" class Metric_PMM_Read_BW: name = "PMM_Read_BW" domain = "GB/sec" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['MemOffcore', 'MemoryBW', 'SoC', 'Server']) sibling = None def compute(self, EV): try: self.val = PMM_Read_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "PMM_Read_BW zero division") desc = """ Average 3DXP Memory Bandwidth Use for reads [GB / sec]""" class Metric_PMM_Write_BW: name = "PMM_Write_BW" domain = "GB/sec" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['MemOffcore', 'MemoryBW', 'SoC', 'Server']) sibling = None def compute(self, EV): try: self.val = PMM_Write_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "PMM_Write_BW zero division") desc = """ Average 3DXP Memory Bandwidth Use for Writes [GB / sec]""" class Metric_IO_Read_BW: name = "IO_Read_BW" domain = "GB/sec" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['IoBW', 'MemOffcore', 'SoC', 'Server']) sibling = None def compute(self, EV): try: self.val = IO_Read_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IO_Read_BW zero division") desc = """ Average IO (network or disk) Bandwidth Use for Reads [GB / sec]. Bandwidth of IO reads that are initiated by end device controllers that are requesting memory from the CPU""" class Metric_IO_Write_BW: name = "IO_Write_BW" domain = "GB/sec" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['IoBW', 'MemOffcore', 'SoC', 'Server']) sibling = None def compute(self, EV): try: self.val = IO_Write_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IO_Write_BW zero division") desc = """ Average IO (network or disk) Bandwidth Use for Writes [GB / sec]. Bandwidth of IO writes that are initiated by end device controllers that are writing memory to the CPU""" class Metric_Time: name = "Time" domain = "Seconds" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Summary']) sibling = None def compute(self, EV): try: self.val = Time(self, EV, 0) self.thresh = (self.val < 1) except ZeroDivisionError: handle_error_metric(self, "Time zero division") desc = """ Run duration time in seconds""" class Metric_Socket_CLKS: name = "Socket_CLKS" domain = "Count" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['SoC']) sibling = None def compute(self, EV): try: self.val = Socket_CLKS(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Socket_CLKS zero division") desc = """ Socket actual clocks when any core is active on that socket""" class Metric_IpFarBranch: name = "IpFarBranch" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Branches', 'OS']) sibling = None def compute(self, EV): try: self.val = IpFarBranch(self, EV, 0) self.thresh = (self.val < 1000000) except ZeroDivisionError: handle_error_metric(self, "IpFarBranch zero division") desc = """ Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]""" # Schedule class Setup: def __init__(self, r): o = dict() n = Frontend_Bound() ; r.run(n) ; o["Frontend_Bound"] = n n = Fetch_Latency() ; r.run(n) ; o["Fetch_Latency"] = n n = ICache_Misses() ; r.run(n) ; o["ICache_Misses"] = n n = ITLB_Misses() ; r.run(n) ; o["ITLB_Misses"] = n n = Branch_Resteers() ; r.run(n) ; o["Branch_Resteers"] = n n = Mispredicts_Resteers() ; r.run(n) ; o["Mispredicts_Resteers"] = n n = Clears_Resteers() ; r.run(n) ; o["Clears_Resteers"] = n n = Unknown_Branches() ; r.run(n) ; o["Unknown_Branches"] = n n = MS_Switches() ; r.run(n) ; o["MS_Switches"] = n n = LCP() ; r.run(n) ; o["LCP"] = n n = DSB_Switches() ; r.run(n) ; o["DSB_Switches"] = n n = Fetch_Bandwidth() ; r.run(n) ; o["Fetch_Bandwidth"] = n n = MITE() ; r.run(n) ; o["MITE"] = n n = Decoder0_Alone() ; r.run(n) ; o["Decoder0_Alone"] = n n = MITE_4wide() ; r.run(n) ; o["MITE_4wide"] = n n = DSB() ; r.run(n) ; o["DSB"] = n n = Bad_Speculation() ; r.run(n) ; o["Bad_Speculation"] = n n = Branch_Mispredicts() ; r.run(n) ; o["Branch_Mispredicts"] = n n = Other_Mispredicts() ; r.run(n) ; o["Other_Mispredicts"] = n n = Machine_Clears() ; r.run(n) ; o["Machine_Clears"] = n n = Other_Nukes() ; r.run(n) ; o["Other_Nukes"] = n n = Backend_Bound() ; r.run(n) ; o["Backend_Bound"] = n n = Memory_Bound() ; r.run(n) ; o["Memory_Bound"] = n n = L1_Bound() ; r.run(n) ; o["L1_Bound"] = n n = DTLB_Load() ; r.run(n) ; o["DTLB_Load"] = n n = Load_STLB_Hit() ; r.run(n) ; o["Load_STLB_Hit"] = n n = Load_STLB_Miss() ; r.run(n) ; o["Load_STLB_Miss"] = n n = Store_Fwd_Blk() ; r.run(n) ; o["Store_Fwd_Blk"] = n n = L1_Hit_Latency() ; r.run(n) ; o["L1_Hit_Latency"] = n n = Lock_Latency() ; r.run(n) ; o["Lock_Latency"] = n n = Split_Loads() ; r.run(n) ; o["Split_Loads"] = n n = G4K_Aliasing() ; r.run(n) ; o["G4K_Aliasing"] = n n = FB_Full() ; r.run(n) ; o["FB_Full"] = n n = L2_Bound() ; r.run(n) ; o["L2_Bound"] = n n = L3_Bound() ; r.run(n) ; o["L3_Bound"] = n n = Contested_Accesses() ; r.run(n) ; o["Contested_Accesses"] = n n = Data_Sharing() ; r.run(n) ; o["Data_Sharing"] = n n = L3_Hit_Latency() ; r.run(n) ; o["L3_Hit_Latency"] = n n = SQ_Full() ; r.run(n) ; o["SQ_Full"] = n n = DRAM_Bound() ; r.run(n) ; o["DRAM_Bound"] = n n = MEM_Bandwidth() ; r.run(n) ; o["MEM_Bandwidth"] = n n = MEM_Latency() ; r.run(n) ; o["MEM_Latency"] = n n = Local_MEM() ; r.run(n) ; o["Local_MEM"] = n n = Remote_MEM() ; r.run(n) ; o["Remote_MEM"] = n n = Remote_Cache() ; r.run(n) ; o["Remote_Cache"] = n n = PMM_Bound() ; r.run(n) ; o["PMM_Bound"] = n n = Store_Bound() ; r.run(n) ; o["Store_Bound"] = n n = Store_Latency() ; r.run(n) ; o["Store_Latency"] = n n = False_Sharing() ; r.run(n) ; o["False_Sharing"] = n n = Split_Stores() ; r.run(n) ; o["Split_Stores"] = n n = Streaming_Stores() ; r.run(n) ; o["Streaming_Stores"] = n n = DTLB_Store() ; r.run(n) ; o["DTLB_Store"] = n n = Store_STLB_Hit() ; r.run(n) ; o["Store_STLB_Hit"] = n n = Store_STLB_Miss() ; r.run(n) ; o["Store_STLB_Miss"] = n n = Core_Bound() ; r.run(n) ; o["Core_Bound"] = n n = Divider() ; r.run(n) ; o["Divider"] = n n = Serializing_Operation() ; r.run(n) ; o["Serializing_Operation"] = n n = Slow_Pause() ; r.run(n) ; o["Slow_Pause"] = n n = Ports_Utilization() ; r.run(n) ; o["Ports_Utilization"] = n n = Ports_Utilized_0() ; r.run(n) ; o["Ports_Utilized_0"] = n n = Mixing_Vectors() ; r.run(n) ; o["Mixing_Vectors"] = n n = Ports_Utilized_1() ; r.run(n) ; o["Ports_Utilized_1"] = n n = Ports_Utilized_2() ; r.run(n) ; o["Ports_Utilized_2"] = n n = Ports_Utilized_3m() ; r.run(n) ; o["Ports_Utilized_3m"] = n n = ALU_Op_Utilization() ; r.run(n) ; o["ALU_Op_Utilization"] = n n = Port_0() ; r.run(n) ; o["Port_0"] = n n = Port_1() ; r.run(n) ; o["Port_1"] = n n = Port_5() ; r.run(n) ; o["Port_5"] = n n = Port_6() ; r.run(n) ; o["Port_6"] = n n = Load_Op_Utilization() ; r.run(n) ; o["Load_Op_Utilization"] = n n = Store_Op_Utilization() ; r.run(n) ; o["Store_Op_Utilization"] = n n = Retiring() ; r.run(n) ; o["Retiring"] = n n = Light_Operations() ; r.run(n) ; o["Light_Operations"] = n n = FP_Arith() ; r.run(n) ; o["FP_Arith"] = n n = X87_Use() ; r.run(n) ; o["X87_Use"] = n n = FP_Scalar() ; r.run(n) ; o["FP_Scalar"] = n n = FP_Vector() ; r.run(n) ; o["FP_Vector"] = n n = FP_Vector_128b() ; r.run(n) ; o["FP_Vector_128b"] = n n = FP_Vector_256b() ; r.run(n) ; o["FP_Vector_256b"] = n n = FP_Vector_512b() ; r.run(n) ; o["FP_Vector_512b"] = n n = Memory_Operations() ; r.run(n) ; o["Memory_Operations"] = n n = Branch_Instructions() ; r.run(n) ; o["Branch_Instructions"] = n n = Other_Light_Ops() ; r.run(n) ; o["Other_Light_Ops"] = n n = Nop_Instructions() ; r.run(n) ; o["Nop_Instructions"] = n n = Heavy_Operations() ; r.run(n) ; o["Heavy_Operations"] = n n = Few_Uops_Instructions() ; r.run(n) ; o["Few_Uops_Instructions"] = n n = Microcode_Sequencer() ; r.run(n) ; o["Microcode_Sequencer"] = n n = Assists() ; r.run(n) ; o["Assists"] = n n = FP_Assists() ; r.run(n) ; o["FP_Assists"] = n n = CISC() ; r.run(n) ; o["CISC"] = n # parents o["Fetch_Latency"].parent = o["Frontend_Bound"] o["ICache_Misses"].parent = o["Fetch_Latency"] o["ITLB_Misses"].parent = o["Fetch_Latency"] o["Branch_Resteers"].parent = o["Fetch_Latency"] o["Mispredicts_Resteers"].parent = o["Branch_Resteers"] o["Clears_Resteers"].parent = o["Branch_Resteers"] o["Unknown_Branches"].parent = o["Branch_Resteers"] o["MS_Switches"].parent = o["Fetch_Latency"] o["LCP"].parent = o["Fetch_Latency"] o["DSB_Switches"].parent = o["Fetch_Latency"] o["Fetch_Bandwidth"].parent = o["Frontend_Bound"] o["MITE"].parent = o["Fetch_Bandwidth"] o["Decoder0_Alone"].parent = o["MITE"] o["MITE_4wide"].parent = o["MITE"] o["DSB"].parent = o["Fetch_Bandwidth"] o["Branch_Mispredicts"].parent = o["Bad_Speculation"] o["Other_Mispredicts"].parent = o["Branch_Mispredicts"] o["Machine_Clears"].parent = o["Bad_Speculation"] o["Other_Nukes"].parent = o["Machine_Clears"] o["Memory_Bound"].parent = o["Backend_Bound"] o["L1_Bound"].parent = o["Memory_Bound"] o["DTLB_Load"].parent = o["L1_Bound"] o["Load_STLB_Hit"].parent = o["DTLB_Load"] o["Load_STLB_Miss"].parent = o["DTLB_Load"] o["Store_Fwd_Blk"].parent = o["L1_Bound"] o["L1_Hit_Latency"].parent = o["L1_Bound"] o["Lock_Latency"].parent = o["L1_Bound"] o["Split_Loads"].parent = o["L1_Bound"] o["G4K_Aliasing"].parent = o["L1_Bound"] o["FB_Full"].parent = o["L1_Bound"] o["L2_Bound"].parent = o["Memory_Bound"] o["L3_Bound"].parent = o["Memory_Bound"] o["Contested_Accesses"].parent = o["L3_Bound"] o["Data_Sharing"].parent = o["L3_Bound"] o["L3_Hit_Latency"].parent = o["L3_Bound"] o["SQ_Full"].parent = o["L3_Bound"] o["DRAM_Bound"].parent = o["Memory_Bound"] o["MEM_Bandwidth"].parent = o["DRAM_Bound"] o["MEM_Latency"].parent = o["DRAM_Bound"] o["Local_MEM"].parent = o["MEM_Latency"] o["Remote_MEM"].parent = o["MEM_Latency"] o["Remote_Cache"].parent = o["MEM_Latency"] o["PMM_Bound"].parent = o["Memory_Bound"] o["Store_Bound"].parent = o["Memory_Bound"] o["Store_Latency"].parent = o["Store_Bound"] o["False_Sharing"].parent = o["Store_Bound"] o["Split_Stores"].parent = o["Store_Bound"] o["Streaming_Stores"].parent = o["Store_Bound"] o["DTLB_Store"].parent = o["Store_Bound"] o["Store_STLB_Hit"].parent = o["DTLB_Store"] o["Store_STLB_Miss"].parent = o["DTLB_Store"] o["Core_Bound"].parent = o["Backend_Bound"] o["Divider"].parent = o["Core_Bound"] o["Serializing_Operation"].parent = o["Core_Bound"] o["Slow_Pause"].parent = o["Serializing_Operation"] o["Ports_Utilization"].parent = o["Core_Bound"] o["Ports_Utilized_0"].parent = o["Ports_Utilization"] o["Mixing_Vectors"].parent = o["Ports_Utilized_0"] o["Ports_Utilized_1"].parent = o["Ports_Utilization"] o["Ports_Utilized_2"].parent = o["Ports_Utilization"] o["Ports_Utilized_3m"].parent = o["Ports_Utilization"] o["ALU_Op_Utilization"].parent = o["Ports_Utilized_3m"] o["Port_0"].parent = o["ALU_Op_Utilization"] o["Port_1"].parent = o["ALU_Op_Utilization"] o["Port_5"].parent = o["ALU_Op_Utilization"] o["Port_6"].parent = o["ALU_Op_Utilization"] o["Load_Op_Utilization"].parent = o["Ports_Utilized_3m"] o["Store_Op_Utilization"].parent = o["Ports_Utilized_3m"] o["Light_Operations"].parent = o["Retiring"] o["FP_Arith"].parent = o["Light_Operations"] o["X87_Use"].parent = o["FP_Arith"] o["FP_Scalar"].parent = o["FP_Arith"] o["FP_Vector"].parent = o["FP_Arith"] o["FP_Vector_128b"].parent = o["FP_Vector"] o["FP_Vector_256b"].parent = o["FP_Vector"] o["FP_Vector_512b"].parent = o["FP_Vector"] o["Memory_Operations"].parent = o["Light_Operations"] o["Branch_Instructions"].parent = o["Light_Operations"] o["Other_Light_Ops"].parent = o["Light_Operations"] o["Nop_Instructions"].parent = o["Other_Light_Ops"] o["Heavy_Operations"].parent = o["Retiring"] o["Few_Uops_Instructions"].parent = o["Heavy_Operations"] o["Microcode_Sequencer"].parent = o["Heavy_Operations"] o["Assists"].parent = o["Microcode_Sequencer"] o["FP_Assists"].parent = o["Assists"] o["CISC"].parent = o["Microcode_Sequencer"] # user visible metrics n = Metric_Mispredictions() ; r.metric(n) ; o["Mispredictions"] = n n = Metric_Big_Code() ; r.metric(n) ; o["Big_Code"] = n n = Metric_Instruction_Fetch_BW() ; r.metric(n) ; o["Instruction_Fetch_BW"] = n n = Metric_Cache_Memory_Bandwidth() ; r.metric(n) ; o["Cache_Memory_Bandwidth"] = n n = Metric_Cache_Memory_Latency() ; r.metric(n) ; o["Cache_Memory_Latency"] = n n = Metric_Memory_Data_TLBs() ; r.metric(n) ; o["Memory_Data_TLBs"] = n n = Metric_Memory_Synchronization() ; r.metric(n) ; o["Memory_Synchronization"] = n n = Metric_Compute_Bound_Est() ; r.metric(n) ; o["Compute_Bound_Est"] = n n = Metric_Irregular_Overhead() ; r.metric(n) ; o["Irregular_Overhead"] = n n = Metric_Other_Bottlenecks() ; r.metric(n) ; o["Other_Bottlenecks"] = n n = Metric_Branching_Overhead() ; r.metric(n) ; o["Branching_Overhead"] = n n = Metric_Useful_Work() ; r.metric(n) ; o["Useful_Work"] = n n = Metric_Core_Bound_Likely() ; r.metric(n) ; o["Core_Bound_Likely"] = n n = Metric_IPC() ; r.metric(n) ; o["IPC"] = n n = Metric_UopPI() ; r.metric(n) ; o["UopPI"] = n n = Metric_UpTB() ; r.metric(n) ; o["UpTB"] = n n = Metric_CPI() ; r.metric(n) ; o["CPI"] = n n = Metric_CLKS() ; r.metric(n) ; o["CLKS"] = n n = Metric_SLOTS() ; r.metric(n) ; o["SLOTS"] = n n = Metric_Slots_Utilization() ; r.metric(n) ; o["Slots_Utilization"] = n n = Metric_Execute_per_Issue() ; r.metric(n) ; o["Execute_per_Issue"] = n n = Metric_CoreIPC() ; r.metric(n) ; o["CoreIPC"] = n n = Metric_FLOPc() ; r.metric(n) ; o["FLOPc"] = n n = Metric_FP_Arith_Utilization() ; r.metric(n) ; o["FP_Arith_Utilization"] = n n = Metric_ILP() ; r.metric(n) ; o["ILP"] = n n = Metric_EPC() ; r.metric(n) ; o["EPC"] = n n = Metric_CORE_CLKS() ; r.metric(n) ; o["CORE_CLKS"] = n n = Metric_IpLoad() ; r.metric(n) ; o["IpLoad"] = n n = Metric_IpStore() ; r.metric(n) ; o["IpStore"] = n n = Metric_IpBranch() ; r.metric(n) ; o["IpBranch"] = n n = Metric_IpCall() ; r.metric(n) ; o["IpCall"] = n n = Metric_IpTB() ; r.metric(n) ; o["IpTB"] = n n = Metric_BpTkBranch() ; r.metric(n) ; o["BpTkBranch"] = n n = Metric_IpFLOP() ; r.metric(n) ; o["IpFLOP"] = n n = Metric_IpArith() ; r.metric(n) ; o["IpArith"] = n n = Metric_IpArith_Scalar_SP() ; r.metric(n) ; o["IpArith_Scalar_SP"] = n n = Metric_IpArith_Scalar_DP() ; r.metric(n) ; o["IpArith_Scalar_DP"] = n n = Metric_IpArith_AVX128() ; r.metric(n) ; o["IpArith_AVX128"] = n n = Metric_IpArith_AVX256() ; r.metric(n) ; o["IpArith_AVX256"] = n n = Metric_IpArith_AVX512() ; r.metric(n) ; o["IpArith_AVX512"] = n n = Metric_IpPause() ; r.metric(n) ; o["IpPause"] = n n = Metric_IpSWPF() ; r.metric(n) ; o["IpSWPF"] = n n = Metric_Instructions() ; r.metric(n) ; o["Instructions"] = n n = Metric_Retire() ; r.metric(n) ; o["Retire"] = n n = Metric_IpAssist() ; r.metric(n) ; o["IpAssist"] = n n = Metric_Execute() ; r.metric(n) ; o["Execute"] = n n = Metric_Fetch_LSD() ; r.metric(n) ; o["Fetch_LSD"] = n n = Metric_Fetch_DSB() ; r.metric(n) ; o["Fetch_DSB"] = n n = Metric_Fetch_MITE() ; r.metric(n) ; o["Fetch_MITE"] = n n = Metric_Fetch_UpC() ; r.metric(n) ; o["Fetch_UpC"] = n n = Metric_DSB_Coverage() ; r.metric(n) ; o["DSB_Coverage"] = n n = Metric_DSB_Switch_Cost() ; r.metric(n) ; o["DSB_Switch_Cost"] = n n = Metric_DSB_Misses() ; r.metric(n) ; o["DSB_Misses"] = n n = Metric_DSB_Bandwidth() ; r.metric(n) ; o["DSB_Bandwidth"] = n n = Metric_ICache_Miss_Latency() ; r.metric(n) ; o["ICache_Miss_Latency"] = n n = Metric_IC_Misses() ; r.metric(n) ; o["IC_Misses"] = n n = Metric_IpDSB_Miss_Ret() ; r.metric(n) ; o["IpDSB_Miss_Ret"] = n n = Metric_IpUnknown_Branch() ; r.metric(n) ; o["IpUnknown_Branch"] = n n = Metric_L2MPKI_Code() ; r.metric(n) ; o["L2MPKI_Code"] = n n = Metric_L2MPKI_Code_All() ; r.metric(n) ; o["L2MPKI_Code_All"] = n n = Metric_IpMispredict() ; r.metric(n) ; o["IpMispredict"] = n n = Metric_IpMisp_Cond_Ntaken() ; r.metric(n) ; o["IpMisp_Cond_Ntaken"] = n n = Metric_IpMisp_Cond_Taken() ; r.metric(n) ; o["IpMisp_Cond_Taken"] = n n = Metric_IpMisp_Ret() ; r.metric(n) ; o["IpMisp_Ret"] = n n = Metric_IpMisp_Indirect() ; r.metric(n) ; o["IpMisp_Indirect"] = n n = Metric_Branch_Misprediction_Cost() ; r.metric(n) ; o["Branch_Misprediction_Cost"] = n n = Metric_Spec_Clears_Ratio() ; r.metric(n) ; o["Spec_Clears_Ratio"] = n n = Metric_Cond_NT() ; r.metric(n) ; o["Cond_NT"] = n n = Metric_Cond_TK() ; r.metric(n) ; o["Cond_TK"] = n n = Metric_CallRet() ; r.metric(n) ; o["CallRet"] = n n = Metric_Jump() ; r.metric(n) ; o["Jump"] = n n = Metric_Other_Branches() ; r.metric(n) ; o["Other_Branches"] = n n = Metric_Load_Miss_Real_Latency() ; r.metric(n) ; o["Load_Miss_Real_Latency"] = n n = Metric_MLP() ; r.metric(n) ; o["MLP"] = n n = Metric_L1MPKI() ; r.metric(n) ; o["L1MPKI"] = n n = Metric_L1MPKI_Load() ; r.metric(n) ; o["L1MPKI_Load"] = n n = Metric_L2MPKI() ; r.metric(n) ; o["L2MPKI"] = n n = Metric_L2MPKI_All() ; r.metric(n) ; o["L2MPKI_All"] = n n = Metric_L2MPKI_Load() ; r.metric(n) ; o["L2MPKI_Load"] = n n = Metric_L2MPKI_RFO() ; r.metric(n) ; o["L2MPKI_RFO"] = n n = Metric_L2HPKI_Load() ; r.metric(n) ; o["L2HPKI_Load"] = n n = Metric_L3MPKI() ; r.metric(n) ; o["L3MPKI"] = n n = Metric_FB_HPKI() ; r.metric(n) ; o["FB_HPKI"] = n n = Metric_L1D_Cache_Fill_BW() ; r.metric(n) ; o["L1D_Cache_Fill_BW"] = n n = Metric_L2_Cache_Fill_BW() ; r.metric(n) ; o["L2_Cache_Fill_BW"] = n n = Metric_L3_Cache_Fill_BW() ; r.metric(n) ; o["L3_Cache_Fill_BW"] = n n = Metric_L3_Cache_Access_BW() ; r.metric(n) ; o["L3_Cache_Access_BW"] = n n = Metric_Page_Walks_Utilization() ; r.metric(n) ; o["Page_Walks_Utilization"] = n n = Metric_Code_STLB_MPKI() ; r.metric(n) ; o["Code_STLB_MPKI"] = n n = Metric_Load_STLB_MPKI() ; r.metric(n) ; o["Load_STLB_MPKI"] = n n = Metric_Store_STLB_MPKI() ; r.metric(n) ; o["Store_STLB_MPKI"] = n n = Metric_L1D_Cache_Fill_BW_2T() ; r.metric(n) ; o["L1D_Cache_Fill_BW_2T"] = n n = Metric_L2_Cache_Fill_BW_2T() ; r.metric(n) ; o["L2_Cache_Fill_BW_2T"] = n n = Metric_L3_Cache_Fill_BW_2T() ; r.metric(n) ; o["L3_Cache_Fill_BW_2T"] = n n = Metric_L3_Cache_Access_BW_2T() ; r.metric(n) ; o["L3_Cache_Access_BW_2T"] = n n = Metric_L2_Evictions_Silent_PKI() ; r.metric(n) ; o["L2_Evictions_Silent_PKI"] = n n = Metric_L2_Evictions_NonSilent_PKI() ; r.metric(n) ; o["L2_Evictions_NonSilent_PKI"] = n n = Metric_Load_L2_Miss_Latency() ; r.metric(n) ; o["Load_L2_Miss_Latency"] = n n = Metric_Load_L3_Miss_Latency() ; r.metric(n) ; o["Load_L3_Miss_Latency"] = n n = Metric_Load_L2_MLP() ; r.metric(n) ; o["Load_L2_MLP"] = n n = Metric_Data_L2_MLP() ; r.metric(n) ; o["Data_L2_MLP"] = n n = Metric_UC_Load_PKI() ; r.metric(n) ; o["UC_Load_PKI"] = n n = Metric_Bus_Lock_PKI() ; r.metric(n) ; o["Bus_Lock_PKI"] = n n = Metric_CPU_Utilization() ; r.metric(n) ; o["CPU_Utilization"] = n n = Metric_CPUs_Utilized() ; r.metric(n) ; o["CPUs_Utilized"] = n n = Metric_Core_Frequency() ; r.metric(n) ; o["Core_Frequency"] = n n = Metric_Uncore_Frequency() ; r.metric(n) ; o["Uncore_Frequency"] = n n = Metric_GFLOPs() ; r.metric(n) ; o["GFLOPs"] = n n = Metric_Turbo_Utilization() ; r.metric(n) ; o["Turbo_Utilization"] = n n = Metric_Power_License0_Utilization() ; r.metric(n) ; o["Power_License0_Utilization"] = n n = Metric_Power_License1_Utilization() ; r.metric(n) ; o["Power_License1_Utilization"] = n n = Metric_Power_License2_Utilization() ; r.metric(n) ; o["Power_License2_Utilization"] = n n = Metric_SMT_2T_Utilization() ; r.metric(n) ; o["SMT_2T_Utilization"] = n n = Metric_Kernel_Utilization() ; r.metric(n) ; o["Kernel_Utilization"] = n n = Metric_Kernel_CPI() ; r.metric(n) ; o["Kernel_CPI"] = n n = Metric_DRAM_BW_Use() ; r.metric(n) ; o["DRAM_BW_Use"] = n n = Metric_MEM_Read_Latency() ; r.metric(n) ; o["MEM_Read_Latency"] = n n = Metric_MEM_Parallel_Reads() ; r.metric(n) ; o["MEM_Parallel_Reads"] = n n = Metric_MEM_PMM_Read_Latency() ; r.metric(n) ; o["MEM_PMM_Read_Latency"] = n n = Metric_MEM_DRAM_Read_Latency() ; r.metric(n) ; o["MEM_DRAM_Read_Latency"] = n n = Metric_PMM_Read_BW() ; r.metric(n) ; o["PMM_Read_BW"] = n n = Metric_PMM_Write_BW() ; r.metric(n) ; o["PMM_Write_BW"] = n n = Metric_IO_Read_BW() ; r.metric(n) ; o["IO_Read_BW"] = n n = Metric_IO_Write_BW() ; r.metric(n) ; o["IO_Write_BW"] = n n = Metric_Time() ; r.metric(n) ; o["Time"] = n n = Metric_Socket_CLKS() ; r.metric(n) ; o["Socket_CLKS"] = n n = Metric_IpFarBranch() ; r.metric(n) ; o["IpFarBranch"] = n # references between groups o["Branch_Resteers"].Unknown_Branches = o["Unknown_Branches"] o["Fetch_Bandwidth"].Frontend_Bound = o["Frontend_Bound"] o["Fetch_Bandwidth"].Fetch_Latency = o["Fetch_Latency"] o["Bad_Speculation"].Retiring = o["Retiring"] o["Bad_Speculation"].Frontend_Bound = o["Frontend_Bound"] o["Bad_Speculation"].Backend_Bound = o["Backend_Bound"] o["Branch_Mispredicts"].Retiring = o["Retiring"] o["Branch_Mispredicts"].Bad_Speculation = o["Bad_Speculation"] o["Branch_Mispredicts"].Frontend_Bound = o["Frontend_Bound"] o["Branch_Mispredicts"].Backend_Bound = o["Backend_Bound"] o["Other_Mispredicts"].Retiring = o["Retiring"] o["Other_Mispredicts"].Backend_Bound = o["Backend_Bound"] o["Other_Mispredicts"].Bad_Speculation = o["Bad_Speculation"] o["Other_Mispredicts"].Frontend_Bound = o["Frontend_Bound"] o["Other_Mispredicts"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Machine_Clears"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Machine_Clears"].Retiring = o["Retiring"] o["Machine_Clears"].Frontend_Bound = o["Frontend_Bound"] o["Machine_Clears"].Backend_Bound = o["Backend_Bound"] o["Machine_Clears"].Bad_Speculation = o["Bad_Speculation"] o["Other_Nukes"].Machine_Clears = o["Machine_Clears"] o["Other_Nukes"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Other_Nukes"].Retiring = o["Retiring"] o["Other_Nukes"].Backend_Bound = o["Backend_Bound"] o["Other_Nukes"].Bad_Speculation = o["Bad_Speculation"] o["Other_Nukes"].Frontend_Bound = o["Frontend_Bound"] o["Memory_Bound"].Retiring = o["Retiring"] o["Memory_Bound"].Backend_Bound = o["Backend_Bound"] o["Load_STLB_Hit"].Load_STLB_Miss = o["Load_STLB_Miss"] o["Load_STLB_Hit"].DTLB_Load = o["DTLB_Load"] o["DRAM_Bound"].L2_Bound = o["L2_Bound"] o["MEM_Latency"].MEM_Bandwidth = o["MEM_Bandwidth"] o["PMM_Bound"].L2_Bound = o["L2_Bound"] o["Store_STLB_Hit"].DTLB_Store = o["DTLB_Store"] o["Store_STLB_Hit"].Store_STLB_Miss = o["Store_STLB_Miss"] o["Core_Bound"].Memory_Bound = o["Memory_Bound"] o["Core_Bound"].Retiring = o["Retiring"] o["Core_Bound"].Backend_Bound = o["Backend_Bound"] o["Ports_Utilization"].Ports_Utilized_0 = o["Ports_Utilized_0"] o["Ports_Utilization"].Retiring = o["Retiring"] o["Retiring"].Heavy_Operations = o["Heavy_Operations"] o["Light_Operations"].Retiring = o["Retiring"] o["Light_Operations"].Heavy_Operations = o["Heavy_Operations"] o["Light_Operations"].Microcode_Sequencer = o["Microcode_Sequencer"] o["FP_Arith"].Retiring = o["Retiring"] o["FP_Arith"].FP_Scalar = o["FP_Scalar"] o["FP_Arith"].X87_Use = o["X87_Use"] o["FP_Arith"].FP_Vector = o["FP_Vector"] o["X87_Use"].Retiring = o["Retiring"] o["FP_Scalar"].Retiring = o["Retiring"] o["FP_Vector"].Retiring = o["Retiring"] o["FP_Vector_128b"].Retiring = o["Retiring"] o["FP_Vector_256b"].Retiring = o["Retiring"] o["FP_Vector_512b"].Retiring = o["Retiring"] o["Memory_Operations"].Retiring = o["Retiring"] o["Memory_Operations"].Light_Operations = o["Light_Operations"] o["Memory_Operations"].Heavy_Operations = o["Heavy_Operations"] o["Memory_Operations"].Microcode_Sequencer = o["Microcode_Sequencer"] o["Branch_Instructions"].Retiring = o["Retiring"] o["Branch_Instructions"].Light_Operations = o["Light_Operations"] o["Branch_Instructions"].Heavy_Operations = o["Heavy_Operations"] o["Branch_Instructions"].Microcode_Sequencer = o["Microcode_Sequencer"] o["Other_Light_Ops"].Light_Operations = o["Light_Operations"] o["Other_Light_Ops"].Retiring = o["Retiring"] o["Other_Light_Ops"].FP_Arith = o["FP_Arith"] o["Other_Light_Ops"].Heavy_Operations = o["Heavy_Operations"] o["Other_Light_Ops"].FP_Vector = o["FP_Vector"] o["Other_Light_Ops"].FP_Scalar = o["FP_Scalar"] o["Other_Light_Ops"].Branch_Instructions = o["Branch_Instructions"] o["Other_Light_Ops"].X87_Use = o["X87_Use"] o["Other_Light_Ops"].Microcode_Sequencer = o["Microcode_Sequencer"] o["Other_Light_Ops"].Memory_Operations = o["Memory_Operations"] o["Nop_Instructions"].Retiring = o["Retiring"] o["Nop_Instructions"].Light_Operations = o["Light_Operations"] o["Nop_Instructions"].Heavy_Operations = o["Heavy_Operations"] o["Nop_Instructions"].Microcode_Sequencer = o["Microcode_Sequencer"] o["Heavy_Operations"].Retiring = o["Retiring"] o["Heavy_Operations"].Microcode_Sequencer = o["Microcode_Sequencer"] o["Few_Uops_Instructions"].Retiring = o["Retiring"] o["Few_Uops_Instructions"].Heavy_Operations = o["Heavy_Operations"] o["Few_Uops_Instructions"].Microcode_Sequencer = o["Microcode_Sequencer"] o["CISC"].Microcode_Sequencer = o["Microcode_Sequencer"] o["CISC"].Assists = o["Assists"] o["Mispredictions"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Mispredictions"].LCP = o["LCP"] o["Mispredictions"].Retiring = o["Retiring"] o["Mispredictions"].Other_Mispredicts = o["Other_Mispredicts"] o["Mispredictions"].Microcode_Sequencer = o["Microcode_Sequencer"] o["Mispredictions"].Frontend_Bound = o["Frontend_Bound"] o["Mispredictions"].DSB_Switches = o["DSB_Switches"] o["Mispredictions"].Backend_Bound = o["Backend_Bound"] o["Mispredictions"].Branch_Resteers = o["Branch_Resteers"] o["Mispredictions"].ICache_Misses = o["ICache_Misses"] o["Mispredictions"].MS_Switches = o["MS_Switches"] o["Mispredictions"].Bad_Speculation = o["Bad_Speculation"] o["Mispredictions"].ITLB_Misses = o["ITLB_Misses"] o["Mispredictions"].Unknown_Branches = o["Unknown_Branches"] o["Mispredictions"].Fetch_Latency = o["Fetch_Latency"] o["Mispredictions"].Mispredicts_Resteers = o["Mispredicts_Resteers"] o["Big_Code"].LCP = o["LCP"] o["Big_Code"].ICache_Misses = o["ICache_Misses"] o["Big_Code"].DSB_Switches = o["DSB_Switches"] o["Big_Code"].Branch_Resteers = o["Branch_Resteers"] o["Big_Code"].MS_Switches = o["MS_Switches"] o["Big_Code"].ITLB_Misses = o["ITLB_Misses"] o["Big_Code"].Unknown_Branches = o["Unknown_Branches"] o["Big_Code"].Fetch_Latency = o["Fetch_Latency"] o["Instruction_Fetch_BW"].Retiring = o["Retiring"] o["Instruction_Fetch_BW"].Other_Mispredicts = o["Other_Mispredicts"] o["Instruction_Fetch_BW"].DSB_Switches = o["DSB_Switches"] o["Instruction_Fetch_BW"].Assists = o["Assists"] o["Instruction_Fetch_BW"].Backend_Bound = o["Backend_Bound"] o["Instruction_Fetch_BW"].Branch_Resteers = o["Branch_Resteers"] o["Instruction_Fetch_BW"].Heavy_Operations = o["Heavy_Operations"] o["Instruction_Fetch_BW"].Fetch_Latency = o["Fetch_Latency"] o["Instruction_Fetch_BW"].ICache_Misses = o["ICache_Misses"] o["Instruction_Fetch_BW"].Microcode_Sequencer = o["Microcode_Sequencer"] o["Instruction_Fetch_BW"].Frontend_Bound = o["Frontend_Bound"] o["Instruction_Fetch_BW"].Bad_Speculation = o["Bad_Speculation"] o["Instruction_Fetch_BW"].ITLB_Misses = o["ITLB_Misses"] o["Instruction_Fetch_BW"].Mispredicts_Resteers = o["Mispredicts_Resteers"] o["Instruction_Fetch_BW"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Instruction_Fetch_BW"].LCP = o["LCP"] o["Instruction_Fetch_BW"].Few_Uops_Instructions = o["Few_Uops_Instructions"] o["Instruction_Fetch_BW"].Clears_Resteers = o["Clears_Resteers"] o["Instruction_Fetch_BW"].MS_Switches = o["MS_Switches"] o["Instruction_Fetch_BW"].Unknown_Branches = o["Unknown_Branches"] o["Cache_Memory_Bandwidth"].L1_Bound = o["L1_Bound"] o["Cache_Memory_Bandwidth"].G4K_Aliasing = o["G4K_Aliasing"] o["Cache_Memory_Bandwidth"].Retiring = o["Retiring"] o["Cache_Memory_Bandwidth"].PMM_Bound = o["PMM_Bound"] o["Cache_Memory_Bandwidth"].Data_Sharing = o["Data_Sharing"] o["Cache_Memory_Bandwidth"].L2_Bound = o["L2_Bound"] o["Cache_Memory_Bandwidth"].Contested_Accesses = o["Contested_Accesses"] o["Cache_Memory_Bandwidth"].L1_Hit_Latency = o["L1_Hit_Latency"] o["Cache_Memory_Bandwidth"].MEM_Bandwidth = o["MEM_Bandwidth"] o["Cache_Memory_Bandwidth"].Backend_Bound = o["Backend_Bound"] o["Cache_Memory_Bandwidth"].L3_Hit_Latency = o["L3_Hit_Latency"] o["Cache_Memory_Bandwidth"].DTLB_Load = o["DTLB_Load"] o["Cache_Memory_Bandwidth"].Memory_Bound = o["Memory_Bound"] o["Cache_Memory_Bandwidth"].SQ_Full = o["SQ_Full"] o["Cache_Memory_Bandwidth"].Store_Bound = o["Store_Bound"] o["Cache_Memory_Bandwidth"].Split_Loads = o["Split_Loads"] o["Cache_Memory_Bandwidth"].L3_Bound = o["L3_Bound"] o["Cache_Memory_Bandwidth"].FB_Full = o["FB_Full"] o["Cache_Memory_Bandwidth"].Store_Fwd_Blk = o["Store_Fwd_Blk"] o["Cache_Memory_Bandwidth"].Lock_Latency = o["Lock_Latency"] o["Cache_Memory_Bandwidth"].MEM_Latency = o["MEM_Latency"] o["Cache_Memory_Bandwidth"].DRAM_Bound = o["DRAM_Bound"] o["Cache_Memory_Latency"].L1_Bound = o["L1_Bound"] o["Cache_Memory_Latency"].DTLB_Load = o["DTLB_Load"] o["Cache_Memory_Latency"].False_Sharing = o["False_Sharing"] o["Cache_Memory_Latency"].L1_Hit_Latency = o["L1_Hit_Latency"] o["Cache_Memory_Latency"].Retiring = o["Retiring"] o["Cache_Memory_Latency"].PMM_Bound = o["PMM_Bound"] o["Cache_Memory_Latency"].Data_Sharing = o["Data_Sharing"] o["Cache_Memory_Latency"].L2_Bound = o["L2_Bound"] o["Cache_Memory_Latency"].Memory_Bound = o["Memory_Bound"] o["Cache_Memory_Latency"].SQ_Full = o["SQ_Full"] o["Cache_Memory_Latency"].Store_Bound = o["Store_Bound"] o["Cache_Memory_Latency"].Split_Loads = o["Split_Loads"] o["Cache_Memory_Latency"].L3_Bound = o["L3_Bound"] o["Cache_Memory_Latency"].FB_Full = o["FB_Full"] o["Cache_Memory_Latency"].Streaming_Stores = o["Streaming_Stores"] o["Cache_Memory_Latency"].Contested_Accesses = o["Contested_Accesses"] o["Cache_Memory_Latency"].Store_Fwd_Blk = o["Store_Fwd_Blk"] o["Cache_Memory_Latency"].DTLB_Store = o["DTLB_Store"] o["Cache_Memory_Latency"].MEM_Bandwidth = o["MEM_Bandwidth"] o["Cache_Memory_Latency"].Store_Latency = o["Store_Latency"] o["Cache_Memory_Latency"].Split_Stores = o["Split_Stores"] o["Cache_Memory_Latency"].G4K_Aliasing = o["G4K_Aliasing"] o["Cache_Memory_Latency"].Lock_Latency = o["Lock_Latency"] o["Cache_Memory_Latency"].MEM_Latency = o["MEM_Latency"] o["Cache_Memory_Latency"].Backend_Bound = o["Backend_Bound"] o["Cache_Memory_Latency"].L3_Hit_Latency = o["L3_Hit_Latency"] o["Cache_Memory_Latency"].DRAM_Bound = o["DRAM_Bound"] o["Memory_Data_TLBs"].L1_Bound = o["L1_Bound"] o["Memory_Data_TLBs"].DTLB_Load = o["DTLB_Load"] o["Memory_Data_TLBs"].False_Sharing = o["False_Sharing"] o["Memory_Data_TLBs"].G4K_Aliasing = o["G4K_Aliasing"] o["Memory_Data_TLBs"].Retiring = o["Retiring"] o["Memory_Data_TLBs"].PMM_Bound = o["PMM_Bound"] o["Memory_Data_TLBs"].DTLB_Store = o["DTLB_Store"] o["Memory_Data_TLBs"].L2_Bound = o["L2_Bound"] o["Memory_Data_TLBs"].Memory_Bound = o["Memory_Bound"] o["Memory_Data_TLBs"].Store_Bound = o["Store_Bound"] o["Memory_Data_TLBs"].Split_Loads = o["Split_Loads"] o["Memory_Data_TLBs"].L3_Bound = o["L3_Bound"] o["Memory_Data_TLBs"].FB_Full = o["FB_Full"] o["Memory_Data_TLBs"].Streaming_Stores = o["Streaming_Stores"] o["Memory_Data_TLBs"].Store_Fwd_Blk = o["Store_Fwd_Blk"] o["Memory_Data_TLBs"].L1_Hit_Latency = o["L1_Hit_Latency"] o["Memory_Data_TLBs"].Store_Latency = o["Store_Latency"] o["Memory_Data_TLBs"].Split_Stores = o["Split_Stores"] o["Memory_Data_TLBs"].Lock_Latency = o["Lock_Latency"] o["Memory_Data_TLBs"].Backend_Bound = o["Backend_Bound"] o["Memory_Data_TLBs"].DRAM_Bound = o["DRAM_Bound"] o["Memory_Synchronization"].L1_Bound = o["L1_Bound"] o["Memory_Synchronization"].False_Sharing = o["False_Sharing"] o["Memory_Synchronization"].Retiring = o["Retiring"] o["Memory_Synchronization"].PMM_Bound = o["PMM_Bound"] o["Memory_Synchronization"].Frontend_Bound = o["Frontend_Bound"] o["Memory_Synchronization"].Machine_Clears = o["Machine_Clears"] o["Memory_Synchronization"].Data_Sharing = o["Data_Sharing"] o["Memory_Synchronization"].Memory_Bound = o["Memory_Bound"] o["Memory_Synchronization"].SQ_Full = o["SQ_Full"] o["Memory_Synchronization"].Store_Bound = o["Store_Bound"] o["Memory_Synchronization"].L3_Bound = o["L3_Bound"] o["Memory_Synchronization"].L2_Bound = o["L2_Bound"] o["Memory_Synchronization"].Streaming_Stores = o["Streaming_Stores"] o["Memory_Synchronization"].Contested_Accesses = o["Contested_Accesses"] o["Memory_Synchronization"].DTLB_Store = o["DTLB_Store"] o["Memory_Synchronization"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Memory_Synchronization"].Store_Latency = o["Store_Latency"] o["Memory_Synchronization"].Split_Stores = o["Split_Stores"] o["Memory_Synchronization"].Backend_Bound = o["Backend_Bound"] o["Memory_Synchronization"].Bad_Speculation = o["Bad_Speculation"] o["Memory_Synchronization"].L3_Hit_Latency = o["L3_Hit_Latency"] o["Memory_Synchronization"].Other_Nukes = o["Other_Nukes"] o["Memory_Synchronization"].DRAM_Bound = o["DRAM_Bound"] o["Compute_Bound_Est"].Serializing_Operation = o["Serializing_Operation"] o["Compute_Bound_Est"].Ports_Utilization = o["Ports_Utilization"] o["Compute_Bound_Est"].Retiring = o["Retiring"] o["Compute_Bound_Est"].Ports_Utilized_2 = o["Ports_Utilized_2"] o["Compute_Bound_Est"].Memory_Bound = o["Memory_Bound"] o["Compute_Bound_Est"].Ports_Utilized_1 = o["Ports_Utilized_1"] o["Compute_Bound_Est"].Core_Bound = o["Core_Bound"] o["Compute_Bound_Est"].Backend_Bound = o["Backend_Bound"] o["Compute_Bound_Est"].Ports_Utilized_3m = o["Ports_Utilized_3m"] o["Compute_Bound_Est"].Divider = o["Divider"] o["Compute_Bound_Est"].Ports_Utilized_0 = o["Ports_Utilized_0"] o["Irregular_Overhead"].Ports_Utilization = o["Ports_Utilization"] o["Irregular_Overhead"].Retiring = o["Retiring"] o["Irregular_Overhead"].ICache_Misses = o["ICache_Misses"] o["Irregular_Overhead"].Heavy_Operations = o["Heavy_Operations"] o["Irregular_Overhead"].Frontend_Bound = o["Frontend_Bound"] o["Irregular_Overhead"].Microcode_Sequencer = o["Microcode_Sequencer"] o["Irregular_Overhead"].Core_Bound = o["Core_Bound"] o["Irregular_Overhead"].Ports_Utilized_0 = o["Ports_Utilized_0"] o["Irregular_Overhead"].Bad_Speculation = o["Bad_Speculation"] o["Irregular_Overhead"].ITLB_Misses = o["ITLB_Misses"] o["Irregular_Overhead"].Divider = o["Divider"] o["Irregular_Overhead"].Mispredicts_Resteers = o["Mispredicts_Resteers"] o["Irregular_Overhead"].Serializing_Operation = o["Serializing_Operation"] o["Irregular_Overhead"].Machine_Clears = o["Machine_Clears"] o["Irregular_Overhead"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Irregular_Overhead"].LCP = o["LCP"] o["Irregular_Overhead"].Other_Mispredicts = o["Other_Mispredicts"] o["Irregular_Overhead"].Few_Uops_Instructions = o["Few_Uops_Instructions"] o["Irregular_Overhead"].DSB_Switches = o["DSB_Switches"] o["Irregular_Overhead"].Memory_Bound = o["Memory_Bound"] o["Irregular_Overhead"].Assists = o["Assists"] o["Irregular_Overhead"].Backend_Bound = o["Backend_Bound"] o["Irregular_Overhead"].Branch_Resteers = o["Branch_Resteers"] o["Irregular_Overhead"].Clears_Resteers = o["Clears_Resteers"] o["Irregular_Overhead"].MS_Switches = o["MS_Switches"] o["Irregular_Overhead"].Other_Nukes = o["Other_Nukes"] o["Irregular_Overhead"].Unknown_Branches = o["Unknown_Branches"] o["Irregular_Overhead"].Fetch_Latency = o["Fetch_Latency"] o["Other_Bottlenecks"].Retiring = o["Retiring"] o["Other_Bottlenecks"].Data_Sharing = o["Data_Sharing"] o["Other_Bottlenecks"].L2_Bound = o["L2_Bound"] o["Other_Bottlenecks"].Contested_Accesses = o["Contested_Accesses"] o["Other_Bottlenecks"].L3_Bound = o["L3_Bound"] o["Other_Bottlenecks"].Machine_Clears = o["Machine_Clears"] o["Other_Bottlenecks"].MEM_Bandwidth = o["MEM_Bandwidth"] o["Other_Bottlenecks"].Store_Latency = o["Store_Latency"] o["Other_Bottlenecks"].Other_Mispredicts = o["Other_Mispredicts"] o["Other_Bottlenecks"].DSB_Switches = o["DSB_Switches"] o["Other_Bottlenecks"].Ports_Utilized_0 = o["Ports_Utilized_0"] o["Other_Bottlenecks"].Ports_Utilized_1 = o["Ports_Utilized_1"] o["Other_Bottlenecks"].Ports_Utilized_2 = o["Ports_Utilized_2"] o["Other_Bottlenecks"].DTLB_Load = o["DTLB_Load"] o["Other_Bottlenecks"].ICache_Misses = o["ICache_Misses"] o["Other_Bottlenecks"].Streaming_Stores = o["Streaming_Stores"] o["Other_Bottlenecks"].Memory_Bound = o["Memory_Bound"] o["Other_Bottlenecks"].SQ_Full = o["SQ_Full"] o["Other_Bottlenecks"].Store_Bound = o["Store_Bound"] o["Other_Bottlenecks"].Bad_Speculation = o["Bad_Speculation"] o["Other_Bottlenecks"].FB_Full = o["FB_Full"] o["Other_Bottlenecks"].Mispredicts_Resteers = o["Mispredicts_Resteers"] o["Other_Bottlenecks"].Store_Fwd_Blk = o["Store_Fwd_Blk"] o["Other_Bottlenecks"].Split_Stores = o["Split_Stores"] o["Other_Bottlenecks"].Few_Uops_Instructions = o["Few_Uops_Instructions"] o["Other_Bottlenecks"].Other_Nukes = o["Other_Nukes"] o["Other_Bottlenecks"].Unknown_Branches = o["Unknown_Branches"] o["Other_Bottlenecks"].DRAM_Bound = o["DRAM_Bound"] o["Other_Bottlenecks"].L1_Bound = o["L1_Bound"] o["Other_Bottlenecks"].G4K_Aliasing = o["G4K_Aliasing"] o["Other_Bottlenecks"].PMM_Bound = o["PMM_Bound"] o["Other_Bottlenecks"].Core_Bound = o["Core_Bound"] o["Other_Bottlenecks"].Divider = o["Divider"] o["Other_Bottlenecks"].L1_Hit_Latency = o["L1_Hit_Latency"] o["Other_Bottlenecks"].Assists = o["Assists"] o["Other_Bottlenecks"].Backend_Bound = o["Backend_Bound"] o["Other_Bottlenecks"].Branch_Resteers = o["Branch_Resteers"] o["Other_Bottlenecks"].L3_Hit_Latency = o["L3_Hit_Latency"] o["Other_Bottlenecks"].Microcode_Sequencer = o["Microcode_Sequencer"] o["Other_Bottlenecks"].Fetch_Latency = o["Fetch_Latency"] o["Other_Bottlenecks"].Ports_Utilization = o["Ports_Utilization"] o["Other_Bottlenecks"].False_Sharing = o["False_Sharing"] o["Other_Bottlenecks"].Heavy_Operations = o["Heavy_Operations"] o["Other_Bottlenecks"].Frontend_Bound = o["Frontend_Bound"] o["Other_Bottlenecks"].Serializing_Operation = o["Serializing_Operation"] o["Other_Bottlenecks"].MEM_Latency = o["MEM_Latency"] o["Other_Bottlenecks"].Split_Loads = o["Split_Loads"] o["Other_Bottlenecks"].ITLB_Misses = o["ITLB_Misses"] o["Other_Bottlenecks"].DTLB_Store = o["DTLB_Store"] o["Other_Bottlenecks"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Other_Bottlenecks"].LCP = o["LCP"] o["Other_Bottlenecks"].Lock_Latency = o["Lock_Latency"] o["Other_Bottlenecks"].Clears_Resteers = o["Clears_Resteers"] o["Other_Bottlenecks"].MS_Switches = o["MS_Switches"] o["Other_Bottlenecks"].Ports_Utilized_3m = o["Ports_Utilized_3m"] o["Useful_Work"].Assists = o["Assists"] o["Useful_Work"].Retiring = o["Retiring"] o["Useful_Work"].Heavy_Operations = o["Heavy_Operations"] o["Useful_Work"].Few_Uops_Instructions = o["Few_Uops_Instructions"] o["Useful_Work"].Microcode_Sequencer = o["Microcode_Sequencer"] o["Core_Bound_Likely"].Memory_Bound = o["Memory_Bound"] o["Core_Bound_Likely"].Ports_Utilized_0 = o["Ports_Utilized_0"] o["Core_Bound_Likely"].Core_Bound = o["Core_Bound"] o["Core_Bound_Likely"].Backend_Bound = o["Backend_Bound"] o["Core_Bound_Likely"].Retiring = o["Retiring"] o["Core_Bound_Likely"].Ports_Utilization = o["Ports_Utilization"] o["UopPI"].Retiring = o["Retiring"] o["UpTB"].Retiring = o["Retiring"] o["Retire"].Retiring = o["Retiring"] o["DSB_Misses"].MITE = o["MITE"] o["DSB_Misses"].LCP = o["LCP"] o["DSB_Misses"].Fetch_Bandwidth = o["Fetch_Bandwidth"] o["DSB_Misses"].Frontend_Bound = o["Frontend_Bound"] o["DSB_Misses"].DSB_Switches = o["DSB_Switches"] o["DSB_Misses"].Branch_Resteers = o["Branch_Resteers"] o["DSB_Misses"].ICache_Misses = o["ICache_Misses"] o["DSB_Misses"].MS_Switches = o["MS_Switches"] o["DSB_Misses"].ITLB_Misses = o["ITLB_Misses"] o["DSB_Misses"].DSB = o["DSB"] o["DSB_Misses"].Unknown_Branches = o["Unknown_Branches"] o["DSB_Misses"].Fetch_Latency = o["Fetch_Latency"] o["DSB_Bandwidth"].Fetch_Bandwidth = o["Fetch_Bandwidth"] o["DSB_Bandwidth"].Frontend_Bound = o["Frontend_Bound"] o["DSB_Bandwidth"].DSB = o["DSB"] o["DSB_Bandwidth"].MITE = o["MITE"] o["DSB_Bandwidth"].Fetch_Latency = o["Fetch_Latency"] o["IC_Misses"].Fetch_Latency = o["Fetch_Latency"] o["IC_Misses"].LCP = o["LCP"] o["IC_Misses"].MS_Switches = o["MS_Switches"] o["IC_Misses"].ICache_Misses = o["ICache_Misses"] o["IC_Misses"].ITLB_Misses = o["ITLB_Misses"] o["IC_Misses"].Unknown_Branches = o["Unknown_Branches"] o["IC_Misses"].DSB_Switches = o["DSB_Switches"] o["IC_Misses"].Branch_Resteers = o["Branch_Resteers"] o["Branch_Misprediction_Cost"].Retiring = o["Retiring"] o["Branch_Misprediction_Cost"].ICache_Misses = o["ICache_Misses"] o["Branch_Misprediction_Cost"].Microcode_Sequencer = o["Microcode_Sequencer"] o["Branch_Misprediction_Cost"].Frontend_Bound = o["Frontend_Bound"] o["Branch_Misprediction_Cost"].Bad_Speculation = o["Bad_Speculation"] o["Branch_Misprediction_Cost"].ITLB_Misses = o["ITLB_Misses"] o["Branch_Misprediction_Cost"].Mispredicts_Resteers = o["Mispredicts_Resteers"] o["Branch_Misprediction_Cost"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Branch_Misprediction_Cost"].LCP = o["LCP"] o["Branch_Misprediction_Cost"].Other_Mispredicts = o["Other_Mispredicts"] o["Branch_Misprediction_Cost"].DSB_Switches = o["DSB_Switches"] o["Branch_Misprediction_Cost"].Backend_Bound = o["Backend_Bound"] o["Branch_Misprediction_Cost"].Branch_Resteers = o["Branch_Resteers"] o["Branch_Misprediction_Cost"].MS_Switches = o["MS_Switches"] o["Branch_Misprediction_Cost"].Unknown_Branches = o["Unknown_Branches"] o["Branch_Misprediction_Cost"].Fetch_Latency = o["Fetch_Latency"] # siblings cross-tree o["Mispredicts_Resteers"].sibling = (o["Branch_Mispredicts"],) o["Clears_Resteers"].sibling = (o["MS_Switches"], o["Machine_Clears"], o["L1_Bound"], o["Microcode_Sequencer"],) o["MS_Switches"].sibling = (o["Clears_Resteers"], o["Machine_Clears"], o["L1_Bound"], o["Serializing_Operation"], o["Mixing_Vectors"], o["Microcode_Sequencer"],) o["LCP"].sibling = (o["DSB_Switches"], o["Fetch_Bandwidth"],) o["DSB_Switches"].sibling = (o["LCP"], o["Fetch_Bandwidth"],) o["Fetch_Bandwidth"].sibling = (o["LCP"], o["DSB_Switches"],) o["Decoder0_Alone"].sibling = (o["Few_Uops_Instructions"],) o["Branch_Mispredicts"].sibling = (o["Mispredicts_Resteers"],) o["Machine_Clears"].sibling = (o["Clears_Resteers"], o["MS_Switches"], o["L1_Bound"], o["Contested_Accesses"], o["Data_Sharing"], o["Remote_Cache"], o["False_Sharing"], o["Microcode_Sequencer"],) o["L1_Bound"].sibling = (o["Clears_Resteers"], o["MS_Switches"], o["Machine_Clears"], o["Ports_Utilized_1"], o["Microcode_Sequencer"],) o["DTLB_Load"].sibling = (o["DTLB_Store"],) o["Lock_Latency"].sibling = (o["Store_Latency"],) o["FB_Full"].sibling = (o["SQ_Full"], o["MEM_Bandwidth"], o["Store_Latency"], o["Streaming_Stores"],) o["Contested_Accesses"].sibling = (o["Machine_Clears"], o["Data_Sharing"], o["Remote_Cache"], o["False_Sharing"],) o["Data_Sharing"].sibling = (o["Machine_Clears"], o["Contested_Accesses"], o["Remote_Cache"], o["False_Sharing"],) o["L3_Hit_Latency"].sibling = (o["MEM_Latency"],) o["L3_Hit_Latency"].overlap = True o["SQ_Full"].sibling = (o["FB_Full"], o["MEM_Bandwidth"],) o["MEM_Bandwidth"].sibling = (o["FB_Full"], o["SQ_Full"],) o["MEM_Latency"].sibling = (o["L3_Hit_Latency"],) o["Remote_Cache"].sibling = (o["Machine_Clears"], o["Contested_Accesses"], o["Data_Sharing"], o["False_Sharing"],) o["Store_Latency"].sibling = (o["Lock_Latency"], o["FB_Full"],) o["Store_Latency"].overlap = True o["False_Sharing"].sibling = (o["Machine_Clears"], o["Contested_Accesses"], o["Data_Sharing"], o["Remote_Cache"],) o["Streaming_Stores"].sibling = (o["FB_Full"],) o["DTLB_Store"].sibling = (o["DTLB_Load"],) o["Serializing_Operation"].sibling = (o["MS_Switches"],) o["Mixing_Vectors"].sibling = (o["MS_Switches"],) o["Ports_Utilized_1"].sibling = (o["L1_Bound"],) o["Ports_Utilized_2"].sibling = (o["Port_0"], o["Port_1"], o["Port_5"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["FP_Vector_512b"],) o["Port_0"].sibling = (o["Ports_Utilized_2"], o["Port_1"], o["Port_5"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["FP_Vector_512b"],) o["Port_1"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_5"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["FP_Vector_512b"],) o["Port_5"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["FP_Vector_512b"],) o["Port_6"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["FP_Vector_512b"],) o["FP_Scalar"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"], o["Port_6"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["FP_Vector_512b"],) o["FP_Vector"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"], o["Port_6"], o["FP_Scalar"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["FP_Vector_512b"],) o["FP_Vector_128b"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_256b"], o["FP_Vector_512b"],) o["FP_Vector_256b"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_512b"],) o["FP_Vector_512b"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"],) o["Few_Uops_Instructions"].sibling = (o["Decoder0_Alone"],) o["Microcode_Sequencer"].sibling = (o["Clears_Resteers"], o["MS_Switches"], o["Machine_Clears"], o["L1_Bound"],) o["Mispredictions"].sibling = (o["Mispredicts_Resteers"], o["Branch_Mispredicts"],) o["Cache_Memory_Bandwidth"].sibling = (o["FB_Full"], o["SQ_Full"], o["MEM_Bandwidth"],) o["Cache_Memory_Latency"].sibling = (o["L3_Hit_Latency"], o["MEM_Latency"],) o["Memory_Data_TLBs"].sibling = (o["DTLB_Load"], o["DTLB_Store"],) o["Memory_Synchronization"].sibling = (o["DTLB_Load"], o["DTLB_Store"],) o["Irregular_Overhead"].sibling = (o["MS_Switches"], o["Microcode_Sequencer"],) o["IpTB"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],) o["DSB_Coverage"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],) o["DSB_Misses"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],) o["DSB_Bandwidth"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],) o["Branch_Misprediction_Cost"].sibling = (o["Mispredicts_Resteers"], o["Branch_Mispredicts"],) o["DRAM_BW_Use"].sibling = (o["FB_Full"], o["SQ_Full"], o["MEM_Bandwidth"],)
254,370
Python
.py
5,811
37.822406
1,942
0.65704
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,918
csv_formats.py
andikleen_pmu-tools/csv_formats.py
# distinguish the bewildering variety of perf/toplev CSV formats from __future__ import print_function import sys import re from collections import namedtuple def is_val(n): return re.match(r'-?[0-9.]+%?|<.*>', n) is not None def is_cpu(n): return re.match(r'(CPU)|(S\d+(-C\d+)?)|C\d+|all', n) is not None def is_socket(n): return re.match(r'S\d+', n) is not None def is_event(n): return re.match(r'[a-zA-Z.-]+', n) is not None def is_number(n): return re.match(r'\s*[0-9]+', n) is not None def is_ts(n): return re.match(r'\s*[0-9.]+', n) is not None or n == "SUMMARY" def is_unit(n): return re.match(r'(% )?[a-zA-Z]*( <)?', n) is not None def is_running(n): return is_number(n) def is_enabled(n): return is_number(n) formats = ( # 0.100997872;CPU0;4612809;;inst_retired_any_0;3491526;2.88 new perf (is_ts, is_cpu, is_val, is_unit, is_event, is_enabled, is_running), # 1.354075473,0,cpu-migrations old perf w/o cpu (is_ts, is_val, is_event), # 1.354075473,CPU0,0,cpu-migrations old perf w/ cpu (is_ts, is_cpu, is_val, is_event), # 0.799553738,137765150,,branches new perf with unit (is_ts, is_val, is_unit, is_event), # 0.799553738,CPU1,137765150,,branches new perf with unit and cpu (is_ts, is_cpu, is_val, is_unit, is_event), # 0.100879059,402.603109,,task-clock,402596410,100.00 new perf with unit without cpu and stats (is_ts, is_val, is_unit, is_event, is_running, is_enabled), # 1.001131873,S0,Backend_Bound.Memory_Bound,13.3,% Slots <,,,0.0,3.0,, # 0.200584389,0,FrontendBound.Branch Resteers,15.87%,above,"", toplev w/ cpu (is_ts, is_cpu, is_event, is_val, is_unit), # 1.001365014,CPU2,1819888,,instructions,93286388,100.00 new perf w/ unit w/ cpu and stats (is_ts, is_cpu, is_val, is_unit, is_event, is_running, is_enabled), # 0.609113353,S0,4,405.454531,,task-clock,405454468,100.00 perf --per-socket with cores (is_ts, is_socket, is_number, is_val, is_unit, is_event, is_running, is_enabled), # 0.806231582,S0,4,812751,,instructions older perf --per-socket w/ cores w/o stats (is_ts, is_socket, is_number, is_val, is_unit, is_event), # 0.936482669,C1-T0,Frontend_Bound.Frontend_Latency.ITLB_Misses,0.39,%below,,itlb_misses.walk_completed,, # 0.301553743,C1,Retiring,31.81,%,,,, # 0.200584389,FrontendBound.Branch Resteers,15.87%,above,"", toplev single thread (is_ts, is_event, is_val), ) fmtmaps = { is_ts: 0, is_cpu: 1, is_event: 2, is_val: 3, is_enabled: 4, is_running: 5, is_unit: 6 } Row = namedtuple('Row', ['ts', 'cpu', 'ev', 'val', 'enabled', 'running', 'unit']) def check_format(fmt, row): if all([x(n.strip()) for (x, n) in zip(fmt, row)]): vals = [None] * 7 for i, j in enumerate(fmt): if j in fmtmaps: vals[fmtmaps[j]] = row[i] r = Row._make(vals) return r return False fmt_cache = formats[0] def parse_csv_row(row, error_exit=False): if len(row) == 0: return None global fmt_cache r = check_format(fmt_cache, row) if r: return r for fmt in formats: r = check_format(fmt, row) if r: fmt_cache = fmt return r if row[0].startswith("#"): # comment return None if ".csv" in row[0]: # fake-perf output return None if "Timestamp" in row[0]: return None print("PARSE-ERROR", row, file=sys.stderr) if error_exit: sys.exit(1) return None if __name__ == '__main__': def check(l, fields): n = l.split(",") r = parse_csv_row(n) assert r is not None rd = r._asdict() for a, v in fields.items(): assert rd[a] == n[v] check('1.001131873,S0,Backend_Bound.Memory_Bound,13.3,% Slots <,,,0.0,3.0,,', { "ts": 0, "cpu": 1, "ev": 2, "val": 3, "unit": 4 })
4,094
Python
.py
106
33.009434
105
0.585747
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,919
interval-plot.py
andikleen_pmu-tools/interval-plot.py
#!/usr/bin/env python3 # plot interval CSV output from perf/toplev # perf stat -I1000 -x, -o file ... # toplev -I1000 -x, -o file ... # interval-plot.py file (or stdin) # delimeter must be , # this is for data that is not normalized # TODO: move legend somewhere else where it doesn't overlap? from __future__ import print_function import os import csv import sys import collections import argparse import re import matplotlib if os.getenv("DISPLAY") is None: matplotlib.use('Agg') import matplotlib.pyplot as plt import csv_formats import gen_level import tl_io p = argparse.ArgumentParser( usage='plot interval CSV output from perf stat/toplev', description=''' perf stat -I1000 -x, -o file ... toplev -I1000 -x, -o file ... interval-plot.py file (or stdin) delimeter must be , this is for data that is not normalized.''') p.add_argument('--xkcd', action='store_true', help='enable xkcd mode') p.add_argument('--style', help='set mpltools style (e.g. ggplot)') p.add_argument('-l', '--level', type=int, help='max level to plot', default=2) p.add_argument('-m', '--metric', action='store_true', help='show metrics') p.add_argument('-a', '--add', help='add extra plot with metrics (comma separated)', default="") p.add_argument('file', help='CSV file to plot (otherwise using stdin). Can be .gz,.xz,.zstd', nargs='?') p.add_argument('--output', '-o', help='Output to file. Otherwise show.', nargs='?') args = p.parse_args() adds = set(args.add.split(",")) if args.style: try: from mpltools import style style.use(args.style) except ImportError: print("Need mpltools for setting styles (pip install mpltools)") try: import brewer2mpl all_colors = brewer2mpl.get_map('Paired', 'Qualitative', 12).hex_colors except ImportError: print("Install brewer2mpl for better colors (pip install brewer2mpl)") all_colors = ('green','orange','red','blue', 'black','olive','purple','#6960EC', '#F0FFFF', '#728C00', '#827B60', '#F87217', '#E55451', # 16 '#F88017', '#C11B17', '#17BFC2', '#C48793') # 20 cur_colors = collections.defaultdict(lambda: all_colors) assigned = {} if args.file: inf = tl_io.flex_open_r(args.file) else: inf = sys.stdin rc = csv.reader(inf) timestamps = {} value = {} def isnum(x): return re.match(r'[0-9.]+', x) def skip_event(event, unit): # heuristic to figure out nodes. should enhance CSV to add area is_node = unit and (re.match(r'(% )?Slots( <)?', unit) or "." in event) level = event.count(".") + 1 #print(event, "level", level, "unit", unit) if args.add and event in adds: return False if args.level and is_node and level > args.level: return True if args.metric is False and not is_node: return True return False val = "" for row in rc: r = csv_formats.parse_csv_row(row) if r is None: continue ts, cpu, event, val = r.ts, r.cpu, r.ev, r.val if ts == "SUMMARY" or skip_event(event, r.unit): continue if event not in assigned: level = gen_level.get_level(event) assigned[event] = cur_colors[level][0] cur_colors[level] = cur_colors[level][1:] if len(cur_colors[level]) == 0: cur_colors[level] = all_colors value[event] = [] timestamps[event] = [] timestamps[event].append(float(ts)) try: value[event].append(float(val.replace("%",""))) except ValueError: value[event].append(0.0) k = set(assigned.keys()) - adds levels = set(map(gen_level.get_level, k)) | adds if args.xkcd: try: plt.xkcd() except NameError: print("Please update matplotlib. Cannot enable xkcd mode.") n = 1 for l in levels: ax = plt.subplot(len(levels), 1, n) if val.find('%') >= 0: ax.set_ylim(0, 100) t = [] for j in assigned.keys(): print(j, gen_level.get_level(j), l) if gen_level.get_level(j) == l or j == l: t.append(j) if 'style' not in globals(): ax.plot(timestamps[j], value[j], assigned[j]) else: ax.plot(timestamps[j], value[j]) leg = ax.legend(t, loc='upper left') leg.get_frame().set_alpha(0.5) n += 1 plt.xlabel('Time') if val.find('%') >= 0: plt.ylabel('Bottleneck %') else: plt.ylabel("Counter value") if args.output: plt.savefig(args.output) else: plt.show()
4,454
Python
.py
132
29.022727
104
0.635097
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,920
snb_client_ratios.py
andikleen_pmu-tools/snb_client_ratios.py
# -*- coding: latin-1 -*- # # auto generated TopDown/TMA 4.8-full-perf description for Intel 2nd gen Core (code named SandyBridge) # Please see http://ark.intel.com for more details on these CPUs. # # References: # http://bit.ly/tma-ispass14 # http://halobates.de/blog/p/262 # https://sites.google.com/site/analysismethods/yasin-pubs # https://download.01.org/perfmon/ # https://github.com/andikleen/pmu-tools/wiki/toplev-manual # # Helpers print_error = lambda msg: False smt_enabled = False ebs_mode = False version = "4.8-full-perf" base_frequency = -1.0 Memory = 0 Average_Frequency = 0.0 num_cores = 1 num_threads = 1 num_sockets = 1 def handle_error(obj, msg): print_error(msg) obj.errcount += 1 obj.val = 0 obj.thresh = False def handle_error_metric(obj, msg): print_error(msg) obj.errcount += 1 obj.val = 0 # Constants Exe_Ports = 6 Mem_L3_Weight = 7 Mem_STLB_Hit_Cost = 7 BAClear_Cost = 12 MS_Switches_Cost = 3 Pipeline_Width = 4 OneMillion = 1000000 OneBillion = 1000000000 EBS_Mode = 0 DS = 0 # Aux. formulas def Backend_Bound_Cycles(self, EV, level): return (STALLS_TOTAL(self, EV, level) + EV("UOPS_DISPATCHED.THREAD:c1", level) - Few_Uops_Executed_Threshold(self, EV, level) - Frontend_RS_Empty_Cycles(self, EV, level) + EV("RESOURCE_STALLS.SB", level)) def DurationTimeInSeconds(self, EV, level): return EV("interval-ms", 0) / 1000 def Execute_Cycles(self, EV, level): return (EV("UOPS_DISPATCHED.CORE:c1", level) / 2) if smt_enabled else EV("UOPS_DISPATCHED.CORE:c1", level) def Fetched_Uops(self, EV, level): return (EV("IDQ.DSB_UOPS", level) + EV("LSD.UOPS", level) + EV("IDQ.MITE_UOPS", level) + EV("IDQ.MS_UOPS", level)) def Few_Uops_Executed_Threshold(self, EV, level): EV("UOPS_DISPATCHED.THREAD:c3", level) EV("UOPS_DISPATCHED.THREAD:c2", level) return EV("UOPS_DISPATCHED.THREAD:c3", level) if (IPC(self, EV, level)> 1.8) else EV("UOPS_DISPATCHED.THREAD:c2", level) # Floating Point computational (arithmetic) Operations Count def FLOP_Count(self, EV, level): return (1 *(EV("FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE", level) + EV("FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE", level)) + 2 * EV("FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE", level) + 4 *(EV("FP_COMP_OPS_EXE.SSE_PACKED_SINGLE", level) + EV("SIMD_FP_256.PACKED_DOUBLE", level)) + 8 * EV("SIMD_FP_256.PACKED_SINGLE", level)) # Floating Point computational (arithmetic) Operations Count def FP_Arith_Scalar(self, EV, level): return EV("FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE", level) + EV("FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE", level) # Floating Point computational (arithmetic) Operations Count def FP_Arith_Vector(self, EV, level): return EV("FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE", level) + EV("FP_COMP_OPS_EXE.SSE_PACKED_SINGLE", level) + EV("SIMD_FP_256.PACKED_SINGLE", level) + EV("SIMD_FP_256.PACKED_DOUBLE", level) def Frontend_RS_Empty_Cycles(self, EV, level): EV("RS_EVENTS.EMPTY_CYCLES", level) return EV("RS_EVENTS.EMPTY_CYCLES", level) if (self.Fetch_Latency.compute(EV)> 0.1) else 0 def Frontend_Latency_Cycles(self, EV, level): return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE", level)) , level ) def HighIPC(self, EV, level): val = IPC(self, EV, level) / Pipeline_Width return val def ITLB_Miss_Cycles(self, EV, level): return (12 * EV("ITLB_MISSES.STLB_HIT", level) + EV("ITLB_MISSES.WALK_DURATION", level)) def Mem_L3_Hit_Fraction(self, EV, level): return EV("MEM_LOAD_UOPS_RETIRED.LLC_HIT", level) / (EV("MEM_LOAD_UOPS_RETIRED.LLC_HIT", level) + Mem_L3_Weight * EV("MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS", level)) def Memory_Bound_Fraction(self, EV, level): return (STALLS_MEM_ANY(self, EV, level) + EV("RESOURCE_STALLS.SB", level)) / Backend_Bound_Cycles(self, EV, level) def Mispred_Clears_Fraction(self, EV, level): return EV("BR_MISP_RETIRED.ALL_BRANCHES", level) / (EV("BR_MISP_RETIRED.ALL_BRANCHES", level) + EV("MACHINE_CLEARS.COUNT", level)) def ORO_DRD_Any_Cycles(self, EV, level): return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", level)) , level ) def ORO_DRD_BW_Cycles(self, EV, level): return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD:c6", level)) , level ) def STALLS_MEM_ANY(self, EV, level): return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("CYCLE_ACTIVITY.STALLS_L1D_PENDING", level)) , level ) def STALLS_TOTAL(self, EV, level): return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("CYCLE_ACTIVITY.CYCLES_NO_DISPATCH", level)) , level ) def Recovery_Cycles(self, EV, level): return (EV("INT_MISC.RECOVERY_CYCLES_ANY", level) / 2) if smt_enabled else EV("INT_MISC.RECOVERY_CYCLES", level) def Retire_Fraction(self, EV, level): return Retired_Slots(self, EV, level) / EV("UOPS_ISSUED.ANY", level) # Retired slots per Logical Processor def Retired_Slots(self, EV, level): return EV("UOPS_RETIRED.RETIRE_SLOTS", level) # Number of logical processors (enabled or online) on the target system def Num_CPUs(self, EV, level): return 8 if smt_enabled else 4 # Instructions Per Cycle (per Logical Processor) def IPC(self, EV, level): return EV("INST_RETIRED.ANY", level) / CLKS(self, EV, level) # Uops Per Instruction def UopPI(self, EV, level): val = Retired_Slots(self, EV, level) / EV("INST_RETIRED.ANY", level) self.thresh = (val > 1.05) return val # Cycles Per Instruction (per Logical Processor) def CPI(self, EV, level): return 1 / IPC(self, EV, level) # Per-Logical Processor actual clocks when the Logical Processor is active. def CLKS(self, EV, level): return EV("CPU_CLK_UNHALTED.THREAD", level) # Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward) def SLOTS(self, EV, level): return Pipeline_Width * CORE_CLKS(self, EV, level) # The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of "execute" at rename stage. def Execute_per_Issue(self, EV, level): return EV("UOPS_DISPATCHED.THREAD", level) / EV("UOPS_ISSUED.ANY", level) # Instructions Per Cycle across hyper-threads (per physical core) def CoreIPC(self, EV, level): return EV("INST_RETIRED.ANY", level) / CORE_CLKS(self, EV, level) # Floating Point Operations Per Cycle def FLOPc(self, EV, level): return FLOP_Count(self, EV, level) / CORE_CLKS(self, EV, level) # Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor) def ILP(self, EV, level): return EV("UOPS_DISPATCHED.THREAD", level) / Execute_Cycles(self, EV, level) # Core actual clocks when any Logical Processor is active on the Physical Core def CORE_CLKS(self, EV, level): return ((EV("CPU_CLK_UNHALTED.THREAD", level) / 2) * (1 + EV("CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE", level) / EV("CPU_CLK_UNHALTED.REF_XCLK", level))) if ebs_mode else(EV("CPU_CLK_UNHALTED.THREAD_ANY", level) / 2) if smt_enabled else CLKS(self, EV, level) # Total number of retired Instructions def Instructions(self, EV, level): return EV("INST_RETIRED.ANY", level) # Average number of Uops retired in cycles where at least one uop has retired. def Retire(self, EV, level): return Retired_Slots(self, EV, level) / EV("UOPS_RETIRED.RETIRE_SLOTS:c1", level) # Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). See section 'Decoded ICache' in Optimization Manual. http://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-optimization-manual.html def DSB_Coverage(self, EV, level): val = EV("IDQ.DSB_UOPS", level) / Fetched_Uops(self, EV, level) self.thresh = (val < 0.7) and HighIPC(self, EV, 1) return val # Average CPU Utilization (percentage) def CPU_Utilization(self, EV, level): return CPUs_Utilized(self, EV, level) / Num_CPUs(self, EV, level) # Average number of utilized CPUs def CPUs_Utilized(self, EV, level): return EV("CPU_CLK_UNHALTED.REF_TSC", level) / EV("msr/tsc/", 0) # Measured Average Core Frequency for unhalted processors [GHz] def Core_Frequency(self, EV, level): return Turbo_Utilization(self, EV, level) * EV("msr/tsc/", 0) / OneBillion / Time(self, EV, level) # Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width def GFLOPs(self, EV, level): return (FLOP_Count(self, EV, level) / OneBillion) / Time(self, EV, level) # Average Frequency Utilization relative nominal frequency def Turbo_Utilization(self, EV, level): return CLKS(self, EV, level) / EV("CPU_CLK_UNHALTED.REF_TSC", level) # Fraction of cycles where both hardware Logical Processors were active def SMT_2T_Utilization(self, EV, level): return 1 - EV("CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE", level) / (EV("CPU_CLK_UNHALTED.REF_XCLK_ANY", level) / 2) if smt_enabled else 0 # Fraction of cycles spent in the Operating System (OS) Kernel mode def Kernel_Utilization(self, EV, level): val = EV("CPU_CLK_UNHALTED.THREAD_P:SUP", level) / EV("CPU_CLK_UNHALTED.THREAD", level) self.thresh = (val > 0.05) return val # Cycles Per Instruction for the Operating System (OS) Kernel mode def Kernel_CPI(self, EV, level): return EV("CPU_CLK_UNHALTED.THREAD_P:SUP", level) / EV("INST_RETIRED.ANY_P:SUP", level) # Average external Memory Bandwidth Use for reads and writes [GB / sec] def DRAM_BW_Use(self, EV, level): return 64 *(EV("UNC_ARB_TRK_REQUESTS.ALL", level) + EV("UNC_ARB_COH_TRK_REQUESTS.ALL", level)) / OneMillion / Time(self, EV, level) / 1000 # Run duration time in seconds def Time(self, EV, level): val = EV("interval-s", 0) self.thresh = (val < 1) return val # Socket actual clocks when any core is active on that socket def Socket_CLKS(self, EV, level): return EV("UNC_CLOCK.SOCKET", level) # Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate] def IpFarBranch(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.FAR_BRANCH:USER", level) self.thresh = (val < 1000000) return val # Event groups class Frontend_Bound: name = "Frontend_Bound" domain = "Slots" area = "FE" level = 1 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvFB', 'BvIO', 'TmaL1', 'PGO']) maxval = None def compute(self, EV): try: self.val = EV("IDQ_UOPS_NOT_DELIVERED.CORE", 1) / SLOTS(self, EV, 1) self.thresh = (self.val > 0.15) except ZeroDivisionError: handle_error(self, "Frontend_Bound zero division") return self.val desc = """ This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.""" class Fetch_Latency: name = "Fetch_Latency" domain = "Slots" area = "FE" level = 2 htoff = False sample = ['RS_EVENTS.EMPTY_END'] errcount = 0 sibling = None metricgroup = frozenset(['Frontend', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = Pipeline_Width * Frontend_Latency_Cycles(self, EV, 2) / SLOTS(self, EV, 2) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Fetch_Latency zero division") return self.val desc = """ This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction- cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period.""" class ITLB_Misses: name = "ITLB_Misses" domain = "Clocks" area = "FE" level = 3 htoff = False sample = ['ITLB_MISSES.WALK_COMPLETED'] errcount = 0 sibling = None metricgroup = frozenset(['BigFootprint', 'BvBC', 'FetchLat', 'MemoryTLB']) maxval = None def compute(self, EV): try: self.val = ITLB_Miss_Cycles(self, EV, 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "ITLB_Misses zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses.. Consider large 2M pages for code (selectively prefer hot large-size function, due to limited 2M entries). Linux options: standard binaries use libhugetlbfs; Hfsort.. https://github. com/libhugetlbfs/libhugetlbfs;https://research.fb.com/public ations/optimizing-function-placement-for-large-scale-data- center-applications-2/""" class Branch_Resteers: name = "Branch_Resteers" domain = "Clocks" area = "FE" level = 3 htoff = False sample = ['BR_MISP_RETIRED.ALL_BRANCHES'] errcount = 0 sibling = None metricgroup = frozenset(['FetchLat']) maxval = None def compute(self, EV): try: self.val = BAClear_Cost *(EV("BR_MISP_RETIRED.ALL_BRANCHES", 3) + EV("MACHINE_CLEARS.COUNT", 3) + EV("BACLEARS.ANY", 3)) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Branch_Resteers zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings.""" class MS_Switches: name = "MS_Switches" domain = "Clocks_Estimated" area = "FE" level = 3 htoff = False sample = ['IDQ.MS_SWITCHES'] errcount = 0 sibling = None metricgroup = frozenset(['FetchLat', 'MicroSeq']) maxval = 1.0 def compute(self, EV): try: self.val = MS_Switches_Cost * EV("IDQ.MS_SWITCHES", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "MS_Switches zero division") return self.val desc = """ This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals.""" class LCP: name = "LCP" domain = "Clocks" area = "FE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['FetchLat']) maxval = None def compute(self, EV): try: self.val = EV("ILD_STALL.LCP", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "LCP zero division") return self.val desc = """ This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this.""" class DSB_Switches: name = "DSB_Switches" domain = "Clocks" area = "FE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['DSBmiss', 'FetchLat']) maxval = None def compute(self, EV): try: self.val = EV("DSB2MITE_SWITCHES.PENALTY_CYCLES", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "DSB_Switches zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty.. See section 'Optimization for Decoded Icache' in Optimization Manual:. http://www.intel.com/content/www/us/en /architecture-and-technology/64-ia-32-architectures- optimization-manual.html""" class Fetch_Bandwidth: name = "Fetch_Bandwidth" domain = "Slots" area = "FE" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['FetchBW', 'Frontend', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = self.Frontend_Bound.compute(EV) - self.Fetch_Latency.compute(EV) self.thresh = (self.val > 0.2) except ZeroDivisionError: handle_error(self, "Fetch_Bandwidth zero division") return self.val desc = """ This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend.""" class Bad_Speculation: name = "Bad_Speculation" domain = "Slots" area = "BAD" level = 1 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['TmaL1']) maxval = None def compute(self, EV): try: self.val = (EV("UOPS_ISSUED.ANY", 1) - Retired_Slots(self, EV, 1) + Pipeline_Width * Recovery_Cycles(self, EV, 1)) / SLOTS(self, EV, 1) self.thresh = (self.val > 0.15) except ZeroDivisionError: handle_error(self, "Bad_Speculation zero division") return self.val desc = """ This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss- predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.""" class Branch_Mispredicts: name = "Branch_Mispredicts" domain = "Slots" area = "BAD" level = 2 htoff = False sample = ['BR_MISP_RETIRED.ALL_BRANCHES:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BadSpec', 'BrMispredicts', 'BvMP', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = Mispred_Clears_Fraction(self, EV, 2) * self.Bad_Speculation.compute(EV) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Branch_Mispredicts zero division") return self.val desc = """ This metric represents fraction of slots the CPU has wasted due to Branch Misprediction. These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path.. Using profile feedback in the compiler may help. Please see the Optimization Manual for general strategies for addressing branch misprediction issues.. http://www.intel.com/content/www/us/en/architecture-and- technology/64-ia-32-architectures-optimization-manual.html""" class Machine_Clears: name = "Machine_Clears" domain = "Slots" area = "BAD" level = 2 htoff = False sample = ['MACHINE_CLEARS.COUNT'] errcount = 0 sibling = None metricgroup = frozenset(['BadSpec', 'BvMS', 'MachineClears', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = self.Bad_Speculation.compute(EV) - self.Branch_Mispredicts.compute(EV) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Machine_Clears zero division") return self.val desc = """ This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes.. See \"Memory Disambiguation\" in Optimization Manual and:. https://software.intel.com/sites/default/files/ m/d/4/1/d/8/sma.pdf""" class Backend_Bound: name = "Backend_Bound" domain = "Slots" area = "BE" level = 1 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvOB', 'TmaL1']) maxval = None def compute(self, EV): try: self.val = 1 -(self.Frontend_Bound.compute(EV) + self.Bad_Speculation.compute(EV) + self.Retiring.compute(EV)) self.thresh = (self.val > 0.2) except ZeroDivisionError: handle_error(self, "Backend_Bound zero division") return self.val desc = """ This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.""" class Memory_Bound: name = "Memory_Bound" domain = "Slots" area = "BE/Mem" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Backend', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = Memory_Bound_Fraction(self, EV, 2) * self.Backend_Bound.compute(EV) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Memory_Bound zero division") return self.val desc = """ This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).""" class DTLB_Load: name = "DTLB_Load" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_UOPS_RETIRED.STLB_MISS_LOADS:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvMT', 'MemoryTLB']) maxval = 1.0 def compute(self, EV): try: self.val = (Mem_STLB_Hit_Cost * EV("DTLB_LOAD_MISSES.STLB_HIT", 4) + EV("DTLB_LOAD_MISSES.WALK_DURATION", 4)) / CLKS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "DTLB_Load zero division") return self.val desc = """ This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss..""" class L3_Bound: name = "L3_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = ['MEM_LOAD_UOPS_RETIRED.LLC_HIT:pp'] errcount = 0 sibling = None metricgroup = frozenset(['CacheHits', 'MemoryBound', 'TmaL3mem']) maxval = None def compute(self, EV): try: self.val = Mem_L3_Hit_Fraction(self, EV, 3) * EV("CYCLE_ACTIVITY.STALLS_L2_PENDING", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "L3_Bound zero division") return self.val desc = """ This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance.""" class DRAM_Bound: name = "DRAM_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = ['MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS:pp'] errcount = 0 sibling = None metricgroup = frozenset(['MemoryBound', 'TmaL3mem']) maxval = 1.0 def compute(self, EV): try: self.val = (1 - Mem_L3_Hit_Fraction(self, EV, 3)) * EV("CYCLE_ACTIVITY.STALLS_L2_PENDING", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "DRAM_Bound zero division") return self.val desc = """ This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance.""" class MEM_Bandwidth: name = "MEM_Bandwidth" domain = "Clocks" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvMB', 'MemoryBW', 'Offcore']) maxval = None def compute(self, EV): try: self.val = ORO_DRD_BW_Cycles(self, EV, 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "MEM_Bandwidth zero division") return self.val desc = """ This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that).. Improve data accesses to reduce cacheline transfers from/to memory. Examples: 1) Consume all bytes of a each cacheline before it is evicted (e.g. reorder structure elements and split non-hot ones), 2) merge computed-limited with BW-limited loops, 3) NUMA optimizations in multi-socket system. Note: software prefetches will not help BW-limited application..""" class MEM_Latency: name = "MEM_Latency" domain = "Clocks" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvML', 'MemoryLat', 'Offcore']) maxval = None def compute(self, EV): try: self.val = ORO_DRD_Any_Cycles(self, EV, 4) / CLKS(self, EV, 4) - self.MEM_Bandwidth.compute(EV) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "MEM_Latency zero division") return self.val desc = """ This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that).. Improve data accesses or interleave them with compute. Examples: 1) Data layout re-structuring, 2) Software Prefetches (also through the compiler)..""" class Store_Bound: name = "Store_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = ['MEM_UOPS_RETIRED.ALL_STORES:pp'] errcount = 0 sibling = None metricgroup = frozenset(['MemoryBound', 'TmaL3mem']) maxval = None def compute(self, EV): try: self.val = EV("RESOURCE_STALLS.SB", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Store_Bound zero division") return self.val desc = """ This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck.""" class Core_Bound: name = "Core_Bound" domain = "Slots" area = "BE/Core" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Backend', 'TmaL2', 'Compute']) maxval = None def compute(self, EV): try: self.val = self.Backend_Bound.compute(EV) - self.Memory_Bound.compute(EV) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Core_Bound zero division") return self.val desc = """ This metric represents fraction of slots where Core non- memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).. Tip: consider Port Saturation analysis as next step.""" class Divider: name = "Divider" domain = "Clocks" area = "BE/Core" level = 3 htoff = False sample = ['ARITH.FPU_DIV_ACTIVE'] errcount = 0 sibling = None metricgroup = frozenset(['BvCB']) maxval = 1.0 def compute(self, EV): try: self.val = EV("ARITH.FPU_DIV_ACTIVE", 3) / CORE_CLKS(self, EV, 3) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Divider zero division") return self.val desc = """ This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication.""" class Ports_Utilization: name = "Ports_Utilization" domain = "Clocks" area = "BE/Core" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['PortsUtil']) maxval = None def compute(self, EV): try: self.val = (Backend_Bound_Cycles(self, EV, 3) - EV("RESOURCE_STALLS.SB", 3) - STALLS_MEM_ANY(self, EV, 3)) / CLKS(self, EV, 3) self.thresh = (self.val > 0.15) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Ports_Utilization zero division") return self.val desc = """ This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.. Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop.""" class Retiring: name = "Retiring" domain = "Slots" area = "RET" level = 1 htoff = False sample = ['UOPS_RETIRED.RETIRE_SLOTS'] errcount = 0 sibling = None metricgroup = frozenset(['BvUW', 'TmaL1']) maxval = None def compute(self, EV): try: self.val = Retired_Slots(self, EV, 1) / SLOTS(self, EV, 1) self.thresh = (self.val > 0.7) or self.Heavy_Operations.thresh except ZeroDivisionError: handle_error(self, "Retiring zero division") return self.val desc = """ This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved. Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. . A high Retiring value for non-vectorized code may be a good hint for programmer to consider vectorizing his code. Doing so essentially lets more computations be done without significantly increasing number of instructions thus improving the performance.""" class Light_Operations: name = "Light_Operations" domain = "Slots" area = "RET" level = 2 htoff = False sample = ['INST_RETIRED.PREC_DIST'] errcount = 0 sibling = None metricgroup = frozenset(['Retire', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = self.Retiring.compute(EV) - self.Heavy_Operations.compute(EV) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Light_Operations zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. . Focus on techniques that reduce instruction count or result in more efficient instructions generation such as vectorization.""" class FP_Arith: name = "FP_Arith" domain = "Uops" area = "RET" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['HPC']) maxval = None def compute(self, EV): try: self.val = self.X87_Use.compute(EV) + self.FP_Scalar.compute(EV) + self.FP_Vector.compute(EV) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "FP_Arith zero division") return self.val desc = """ This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.""" class X87_Use: name = "X87_Use" domain = "Uops" area = "RET" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Compute']) maxval = None def compute(self, EV): try: self.val = Retired_Slots(self, EV, 4) * EV("FP_COMP_OPS_EXE.X87", 4) / EV("UOPS_DISPATCHED.THREAD", 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "X87_Use zero division") return self.val desc = """ This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.. Tip: consider compiler flags to generate newer AVX (or SSE) instruction sets; which typically perform better and feature vectors.""" class FP_Scalar: name = "FP_Scalar" domain = "Uops" area = "RET" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Compute', 'Flops']) maxval = None def compute(self, EV): try: self.val = FP_Arith_Scalar(self, EV, 4) / EV("UOPS_DISPATCHED.THREAD", 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "FP_Scalar zero division") return self.val desc = """ This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting.. Investigate what limits (compiler) generation of vector code.""" class FP_Vector: name = "FP_Vector" domain = "Uops" area = "RET" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Compute', 'Flops']) maxval = 1.0 def compute(self, EV): try: self.val = FP_Arith_Vector(self, EV, 4) / EV("UOPS_DISPATCHED.THREAD", 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "FP_Vector zero division") return self.val desc = """ This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting.. Check if vector width is expected""" class FP_Vector_128b: name = "FP_Vector_128b" domain = "Uops" area = "RET" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Compute', 'Flops']) maxval = 1.0 def compute(self, EV): try: self.val = (EV("FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE", 5) + EV("FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE", 5)) / EV("UOPS_DISPATCHED.THREAD", 5) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "FP_Vector_128b zero division") return self.val desc = """ This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting.. Try to exploit wider vector length""" class FP_Vector_256b: name = "FP_Vector_256b" domain = "Uops" area = "RET" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Compute', 'Flops']) maxval = 1.0 def compute(self, EV): try: self.val = (EV("SIMD_FP_256.PACKED_DOUBLE", 5) + EV("SIMD_FP_256.PACKED_SINGLE", 5)) / EV("UOPS_DISPATCHED.THREAD", 5) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "FP_Vector_256b zero division") return self.val desc = """ This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting.. Try to exploit wider vector length""" class Heavy_Operations: name = "Heavy_Operations" domain = "Slots" area = "RET" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Retire', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = self.Microcode_Sequencer.compute(EV) self.thresh = (self.val > 0.1) except ZeroDivisionError: handle_error(self, "Heavy_Operations zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.""" class Microcode_Sequencer: name = "Microcode_Sequencer" domain = "Slots" area = "RET" level = 3 htoff = False sample = ['IDQ.MS_UOPS'] errcount = 0 sibling = None metricgroup = frozenset(['MicroSeq']) maxval = None def compute(self, EV): try: self.val = Retire_Fraction(self, EV, 3) * EV("IDQ.MS_UOPS", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Microcode_Sequencer zero division") return self.val desc = """ This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided..""" class Metric_IPC: name = "IPC" domain = "Metric" maxval = Pipeline_Width + 2 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Ret', 'Summary']) sibling = None def compute(self, EV): try: self.val = IPC(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IPC zero division") desc = """ Instructions Per Cycle (per Logical Processor)""" class Metric_UopPI: name = "UopPI" domain = "Metric" maxval = 2.0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Pipeline', 'Ret', 'Retire']) sibling = None def compute(self, EV): try: self.val = UopPI(self, EV, 0) self.thresh = (self.val > 1.05) except ZeroDivisionError: handle_error_metric(self, "UopPI zero division") desc = """ Uops Per Instruction""" class Metric_CPI: name = "CPI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Pipeline', 'Mem']) sibling = None def compute(self, EV): try: self.val = CPI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CPI zero division") desc = """ Cycles Per Instruction (per Logical Processor)""" class Metric_CLKS: name = "CLKS" domain = "Count" maxval = 0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Pipeline']) sibling = None def compute(self, EV): try: self.val = CLKS(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CLKS zero division") desc = """ Per-Logical Processor actual clocks when the Logical Processor is active.""" class Metric_SLOTS: name = "SLOTS" domain = "Count" maxval = 0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['TmaL1']) sibling = None def compute(self, EV): try: self.val = SLOTS(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "SLOTS zero division") desc = """ Total issue-pipeline slots (per-Physical Core till ICL; per- Logical Processor ICL onward)""" class Metric_Execute_per_Issue: name = "Execute_per_Issue" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Cor', 'Pipeline']) sibling = None def compute(self, EV): try: self.val = Execute_per_Issue(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Execute_per_Issue zero division") desc = """ The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage.""" class Metric_CoreIPC: name = "CoreIPC" domain = "Core_Metric" maxval = Pipeline_Width + 2 errcount = 0 area = "Info.Core" metricgroup = frozenset(['Ret', 'SMT', 'TmaL1']) sibling = None def compute(self, EV): try: self.val = CoreIPC(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CoreIPC zero division") desc = """ Instructions Per Cycle across hyper-threads (per physical core)""" class Metric_FLOPc: name = "FLOPc" domain = "Core_Metric" maxval = 10.0 errcount = 0 area = "Info.Core" metricgroup = frozenset(['Ret', 'Flops']) sibling = None def compute(self, EV): try: self.val = FLOPc(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "FLOPc zero division") desc = """ Floating Point Operations Per Cycle""" class Metric_ILP: name = "ILP" domain = "Metric" maxval = Exe_Ports errcount = 0 area = "Info.Core" metricgroup = frozenset(['Backend', 'Cor', 'Pipeline', 'PortsUtil']) sibling = None def compute(self, EV): try: self.val = ILP(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "ILP zero division") desc = """ Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical- processor)""" class Metric_CORE_CLKS: name = "CORE_CLKS" domain = "Count" maxval = 0 errcount = 0 area = "Info.Core" metricgroup = frozenset(['SMT']) sibling = None def compute(self, EV): try: self.val = CORE_CLKS(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CORE_CLKS zero division") desc = """ Core actual clocks when any Logical Processor is active on the Physical Core""" class Metric_Instructions: name = "Instructions" domain = "Count" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Summary', 'TmaL1']) sibling = None def compute(self, EV): try: self.val = Instructions(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Instructions zero division") desc = """ Total number of retired Instructions""" class Metric_Retire: name = "Retire" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Pipeline" metricgroup = frozenset(['Pipeline', 'Ret']) sibling = None def compute(self, EV): try: self.val = Retire(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Retire zero division") desc = """ Average number of Uops retired in cycles where at least one uop has retired.""" class Metric_DSB_Coverage: name = "DSB_Coverage" domain = "Metric" maxval = 1.0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['DSB', 'Fed', 'FetchBW']) sibling = None def compute(self, EV): try: self.val = DSB_Coverage(self, EV, 0) self.thresh = (self.val < 0.7) and HighIPC(self, EV, 1) except ZeroDivisionError: handle_error_metric(self, "DSB_Coverage zero division") desc = """ Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). See section 'Decoded ICache' in Optimization Manual. http://www.intel.com/content/www/us/en/architecture- and-technology/64-ia-32-architectures-optimization- manual.html""" class Metric_CPU_Utilization: name = "CPU_Utilization" domain = "Metric" maxval = 1 errcount = 0 area = "Info.System" metricgroup = frozenset(['HPC', 'Summary']) sibling = None def compute(self, EV): try: self.val = CPU_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CPU_Utilization zero division") desc = """ Average CPU Utilization (percentage)""" class Metric_CPUs_Utilized: name = "CPUs_Utilized" domain = "Metric" maxval = 300 errcount = 0 area = "Info.System" metricgroup = frozenset(['Summary']) sibling = None def compute(self, EV): try: self.val = CPUs_Utilized(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CPUs_Utilized zero division") desc = """ Average number of utilized CPUs""" class Metric_Core_Frequency: name = "Core_Frequency" domain = "SystemMetric" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Summary', 'Power']) sibling = None def compute(self, EV): try: self.val = Core_Frequency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Core_Frequency zero division") desc = """ Measured Average Core Frequency for unhalted processors [GHz]""" class Metric_GFLOPs: name = "GFLOPs" domain = "Metric" maxval = 200 errcount = 0 area = "Info.System" metricgroup = frozenset(['Cor', 'Flops', 'HPC']) sibling = None def compute(self, EV): try: self.val = GFLOPs(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "GFLOPs zero division") desc = """ Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width""" class Metric_Turbo_Utilization: name = "Turbo_Utilization" domain = "Core_Metric" maxval = 10.0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Power']) sibling = None def compute(self, EV): try: self.val = Turbo_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Turbo_Utilization zero division") desc = """ Average Frequency Utilization relative nominal frequency""" class Metric_SMT_2T_Utilization: name = "SMT_2T_Utilization" domain = "Core_Metric" maxval = 1.0 errcount = 0 area = "Info.System" metricgroup = frozenset(['SMT']) sibling = None def compute(self, EV): try: self.val = SMT_2T_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "SMT_2T_Utilization zero division") desc = """ Fraction of cycles where both hardware Logical Processors were active""" class Metric_Kernel_Utilization: name = "Kernel_Utilization" domain = "Metric" maxval = 1.0 errcount = 0 area = "Info.System" metricgroup = frozenset(['OS']) sibling = None def compute(self, EV): try: self.val = Kernel_Utilization(self, EV, 0) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error_metric(self, "Kernel_Utilization zero division") desc = """ Fraction of cycles spent in the Operating System (OS) Kernel mode""" class Metric_Kernel_CPI: name = "Kernel_CPI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['OS']) sibling = None def compute(self, EV): try: self.val = Kernel_CPI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Kernel_CPI zero division") desc = """ Cycles Per Instruction for the Operating System (OS) Kernel mode""" class Metric_DRAM_BW_Use: name = "DRAM_BW_Use" domain = "GB/sec" maxval = 200 errcount = 0 area = "Info.System" metricgroup = frozenset(['HPC', 'MemOffcore', 'MemoryBW', 'SoC']) sibling = None def compute(self, EV): try: self.val = DRAM_BW_Use(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "DRAM_BW_Use zero division") desc = """ Average external Memory Bandwidth Use for reads and writes [GB / sec]""" class Metric_Time: name = "Time" domain = "Seconds" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Summary']) sibling = None def compute(self, EV): try: self.val = Time(self, EV, 0) self.thresh = (self.val < 1) except ZeroDivisionError: handle_error_metric(self, "Time zero division") desc = """ Run duration time in seconds""" class Metric_Socket_CLKS: name = "Socket_CLKS" domain = "Count" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['SoC']) sibling = None def compute(self, EV): try: self.val = Socket_CLKS(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Socket_CLKS zero division") desc = """ Socket actual clocks when any core is active on that socket""" class Metric_IpFarBranch: name = "IpFarBranch" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Branches', 'OS']) sibling = None def compute(self, EV): try: self.val = IpFarBranch(self, EV, 0) self.thresh = (self.val < 1000000) except ZeroDivisionError: handle_error_metric(self, "IpFarBranch zero division") desc = """ Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]""" # Schedule class Setup: def __init__(self, r): o = dict() n = Frontend_Bound() ; r.run(n) ; o["Frontend_Bound"] = n n = Fetch_Latency() ; r.run(n) ; o["Fetch_Latency"] = n n = ITLB_Misses() ; r.run(n) ; o["ITLB_Misses"] = n n = Branch_Resteers() ; r.run(n) ; o["Branch_Resteers"] = n n = MS_Switches() ; r.run(n) ; o["MS_Switches"] = n n = LCP() ; r.run(n) ; o["LCP"] = n n = DSB_Switches() ; r.run(n) ; o["DSB_Switches"] = n n = Fetch_Bandwidth() ; r.run(n) ; o["Fetch_Bandwidth"] = n n = Bad_Speculation() ; r.run(n) ; o["Bad_Speculation"] = n n = Branch_Mispredicts() ; r.run(n) ; o["Branch_Mispredicts"] = n n = Machine_Clears() ; r.run(n) ; o["Machine_Clears"] = n n = Backend_Bound() ; r.run(n) ; o["Backend_Bound"] = n n = Memory_Bound() ; r.run(n) ; o["Memory_Bound"] = n n = DTLB_Load() ; r.run(n) ; o["DTLB_Load"] = n n = L3_Bound() ; r.run(n) ; o["L3_Bound"] = n n = DRAM_Bound() ; r.run(n) ; o["DRAM_Bound"] = n n = MEM_Bandwidth() ; r.run(n) ; o["MEM_Bandwidth"] = n n = MEM_Latency() ; r.run(n) ; o["MEM_Latency"] = n n = Store_Bound() ; r.run(n) ; o["Store_Bound"] = n n = Core_Bound() ; r.run(n) ; o["Core_Bound"] = n n = Divider() ; r.run(n) ; o["Divider"] = n n = Ports_Utilization() ; r.run(n) ; o["Ports_Utilization"] = n n = Retiring() ; r.run(n) ; o["Retiring"] = n n = Light_Operations() ; r.run(n) ; o["Light_Operations"] = n n = FP_Arith() ; r.run(n) ; o["FP_Arith"] = n n = X87_Use() ; r.run(n) ; o["X87_Use"] = n n = FP_Scalar() ; r.run(n) ; o["FP_Scalar"] = n n = FP_Vector() ; r.run(n) ; o["FP_Vector"] = n n = FP_Vector_128b() ; r.run(n) ; o["FP_Vector_128b"] = n n = FP_Vector_256b() ; r.run(n) ; o["FP_Vector_256b"] = n n = Heavy_Operations() ; r.run(n) ; o["Heavy_Operations"] = n n = Microcode_Sequencer() ; r.run(n) ; o["Microcode_Sequencer"] = n # parents o["Fetch_Latency"].parent = o["Frontend_Bound"] o["ITLB_Misses"].parent = o["Fetch_Latency"] o["Branch_Resteers"].parent = o["Fetch_Latency"] o["MS_Switches"].parent = o["Fetch_Latency"] o["LCP"].parent = o["Fetch_Latency"] o["DSB_Switches"].parent = o["Fetch_Latency"] o["Fetch_Bandwidth"].parent = o["Frontend_Bound"] o["Branch_Mispredicts"].parent = o["Bad_Speculation"] o["Machine_Clears"].parent = o["Bad_Speculation"] o["Memory_Bound"].parent = o["Backend_Bound"] o["DTLB_Load"].parent = o["Memory_Bound"] o["L3_Bound"].parent = o["Memory_Bound"] o["DRAM_Bound"].parent = o["Memory_Bound"] o["MEM_Bandwidth"].parent = o["DRAM_Bound"] o["MEM_Latency"].parent = o["DRAM_Bound"] o["Store_Bound"].parent = o["Memory_Bound"] o["Core_Bound"].parent = o["Backend_Bound"] o["Divider"].parent = o["Core_Bound"] o["Ports_Utilization"].parent = o["Core_Bound"] o["Light_Operations"].parent = o["Retiring"] o["FP_Arith"].parent = o["Light_Operations"] o["X87_Use"].parent = o["FP_Arith"] o["FP_Scalar"].parent = o["FP_Arith"] o["FP_Vector"].parent = o["FP_Arith"] o["FP_Vector_128b"].parent = o["FP_Vector"] o["FP_Vector_256b"].parent = o["FP_Vector"] o["Heavy_Operations"].parent = o["Retiring"] o["Microcode_Sequencer"].parent = o["Heavy_Operations"] # user visible metrics n = Metric_IPC() ; r.metric(n) ; o["IPC"] = n n = Metric_UopPI() ; r.metric(n) ; o["UopPI"] = n n = Metric_CPI() ; r.metric(n) ; o["CPI"] = n n = Metric_CLKS() ; r.metric(n) ; o["CLKS"] = n n = Metric_SLOTS() ; r.metric(n) ; o["SLOTS"] = n n = Metric_Execute_per_Issue() ; r.metric(n) ; o["Execute_per_Issue"] = n n = Metric_CoreIPC() ; r.metric(n) ; o["CoreIPC"] = n n = Metric_FLOPc() ; r.metric(n) ; o["FLOPc"] = n n = Metric_ILP() ; r.metric(n) ; o["ILP"] = n n = Metric_CORE_CLKS() ; r.metric(n) ; o["CORE_CLKS"] = n n = Metric_Instructions() ; r.metric(n) ; o["Instructions"] = n n = Metric_Retire() ; r.metric(n) ; o["Retire"] = n n = Metric_DSB_Coverage() ; r.metric(n) ; o["DSB_Coverage"] = n n = Metric_CPU_Utilization() ; r.metric(n) ; o["CPU_Utilization"] = n n = Metric_CPUs_Utilized() ; r.metric(n) ; o["CPUs_Utilized"] = n n = Metric_Core_Frequency() ; r.metric(n) ; o["Core_Frequency"] = n n = Metric_GFLOPs() ; r.metric(n) ; o["GFLOPs"] = n n = Metric_Turbo_Utilization() ; r.metric(n) ; o["Turbo_Utilization"] = n n = Metric_SMT_2T_Utilization() ; r.metric(n) ; o["SMT_2T_Utilization"] = n n = Metric_Kernel_Utilization() ; r.metric(n) ; o["Kernel_Utilization"] = n n = Metric_Kernel_CPI() ; r.metric(n) ; o["Kernel_CPI"] = n n = Metric_DRAM_BW_Use() ; r.metric(n) ; o["DRAM_BW_Use"] = n n = Metric_Time() ; r.metric(n) ; o["Time"] = n n = Metric_Socket_CLKS() ; r.metric(n) ; o["Socket_CLKS"] = n n = Metric_IpFarBranch() ; r.metric(n) ; o["IpFarBranch"] = n # references between groups o["Fetch_Bandwidth"].Frontend_Bound = o["Frontend_Bound"] o["Fetch_Bandwidth"].Fetch_Latency = o["Fetch_Latency"] o["Branch_Mispredicts"].Bad_Speculation = o["Bad_Speculation"] o["Machine_Clears"].Bad_Speculation = o["Bad_Speculation"] o["Machine_Clears"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Backend_Bound"].Retiring = o["Retiring"] o["Backend_Bound"].Bad_Speculation = o["Bad_Speculation"] o["Backend_Bound"].Frontend_Bound = o["Frontend_Bound"] o["Memory_Bound"].Retiring = o["Retiring"] o["Memory_Bound"].Bad_Speculation = o["Bad_Speculation"] o["Memory_Bound"].Frontend_Bound = o["Frontend_Bound"] o["Memory_Bound"].Backend_Bound = o["Backend_Bound"] o["Memory_Bound"].Fetch_Latency = o["Fetch_Latency"] o["MEM_Latency"].MEM_Bandwidth = o["MEM_Bandwidth"] o["Core_Bound"].Retiring = o["Retiring"] o["Core_Bound"].Frontend_Bound = o["Frontend_Bound"] o["Core_Bound"].Memory_Bound = o["Memory_Bound"] o["Core_Bound"].Backend_Bound = o["Backend_Bound"] o["Core_Bound"].Bad_Speculation = o["Bad_Speculation"] o["Core_Bound"].Fetch_Latency = o["Fetch_Latency"] o["Ports_Utilization"].Fetch_Latency = o["Fetch_Latency"] o["Retiring"].Heavy_Operations = o["Heavy_Operations"] o["Light_Operations"].Retiring = o["Retiring"] o["Light_Operations"].Heavy_Operations = o["Heavy_Operations"] o["Light_Operations"].Microcode_Sequencer = o["Microcode_Sequencer"] o["FP_Arith"].FP_Scalar = o["FP_Scalar"] o["FP_Arith"].X87_Use = o["X87_Use"] o["FP_Arith"].FP_Vector = o["FP_Vector"] o["Heavy_Operations"].Microcode_Sequencer = o["Microcode_Sequencer"] # siblings cross-tree o["MS_Switches"].sibling = (o["Machine_Clears"], o["Microcode_Sequencer"],) o["LCP"].sibling = (o["DSB_Switches"], o["Fetch_Bandwidth"],) o["DSB_Switches"].sibling = (o["LCP"], o["Fetch_Bandwidth"],) o["Fetch_Bandwidth"].sibling = (o["LCP"], o["DSB_Switches"],) o["Machine_Clears"].sibling = (o["MS_Switches"], o["Microcode_Sequencer"],) o["FP_Scalar"].sibling = (o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"],) o["FP_Vector"].sibling = (o["FP_Scalar"], o["FP_Vector_128b"], o["FP_Vector_256b"],) o["FP_Vector_128b"].sibling = (o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_256b"],) o["FP_Vector_256b"].sibling = (o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"],) o["Microcode_Sequencer"].sibling = (o["MS_Switches"], o["Machine_Clears"],) o["DSB_Coverage"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],) o["DRAM_BW_Use"].sibling = (o["MEM_Bandwidth"],)
64,349
Python
.py
1,608
34.54291
306
0.666219
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,921
ehl_ratios.py
andikleen_pmu-tools/ehl_ratios.py
# -*- coding: latin-1 -*- # # auto generated TopDown/TMA 2.0 description for Intel Elkhart Lake # Please see http://ark.intel.com for more details on these CPUs. # # References: # http://bit.ly/tma-ispass14 # http://halobates.de/blog/p/262 # https://sites.google.com/site/analysismethods/yasin-pubs # https://download.01.org/perfmon/ # https://github.com/andikleen/pmu-tools/wiki/toplev-manual # # Helpers print_error = lambda msg: False version = "2.0" base_frequency = -1.0 Memory = 0 Average_Frequency = 0.0 use_aux = False def handle_error(obj, msg): print_error(msg) obj.errcount += 1 obj.val = 0 obj.thresh = False def handle_error_metric(obj, msg): print_error(msg) obj.errcount += 1 obj.val = 0 # Constants # Aux. formulas # pipeline allocation width def Pipeline_Width(self, EV, level): return 4 def CLKS(self, EV, level): return EV("CPU_CLK_UNHALTED.CORE", level) def CLKS_P(self, EV, level): return EV("CPU_CLK_UNHALTED.CORE_P", level) def SLOTS(self, EV, level): return Pipeline_Width(self, EV, level) * CLKS(self, EV, level) # Instructions Per Cycle def IPC(self, EV, level): return EV("INST_RETIRED.ANY", level) / CLKS(self, EV, level) # Cycles Per Instruction def CPI(self, EV, level): return CLKS(self, EV, level) / EV("INST_RETIRED.ANY", level) # Uops Per Instruction def UPI(self, EV, level): return EV("UOPS_RETIRED.ALL", level) / EV("INST_RETIRED.ANY", level) # Percentage of total non-speculative loads with a store forward or unknown store address block def Store_Fwd_Blocks(self, EV, level): return 100 * EV("LD_BLOCKS.DATA_UNKNOWN", level) / EV("MEM_UOPS_RETIRED.ALL_LOADS", level) # Percentage of total non-speculative loads with a address aliasing block def Address_Alias_Blocks(self, EV, level): return 100 * EV("LD_BLOCKS.4K_ALIAS", level) / EV("MEM_UOPS_RETIRED.ALL_LOADS", level) # Percentage of total non-speculative loads that are splits def Load_Splits(self, EV, level): return 100 * EV("MEM_UOPS_RETIRED.SPLIT_LOADS", level) / EV("MEM_UOPS_RETIRED.ALL_LOADS", level) # Instructions per Branch (lower number means higher occurance rate) def IpBranch(self, EV, level): return EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.ALL_BRANCHES", level) # Instruction per (near) call (lower number means higher occurance rate) def IpCall(self, EV, level): return EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.CALL", level) # Instructions per Load def IpLoad(self, EV, level): return EV("INST_RETIRED.ANY", level) / EV("MEM_UOPS_RETIRED.ALL_LOADS", level) # Instructions per Store def IpStore(self, EV, level): return EV("INST_RETIRED.ANY", level) / EV("MEM_UOPS_RETIRED.ALL_STORES", level) # Number of Instructions per non-speculative Branch Misprediction def IpMispredict(self, EV, level): return EV("INST_RETIRED.ANY", level) / EV("BR_MISP_RETIRED.ALL_BRANCHES", level) # Instructions per Far Branch def IpFarBranch(self, EV, level): return EV("INST_RETIRED.ANY", level) / (EV("BR_INST_RETIRED.FAR_BRANCH", level) / 2 ) # Ratio of all branches which mispredict def Branch_Mispredict_Ratio(self, EV, level): return EV("BR_MISP_RETIRED.ALL_BRANCHES", level) / EV("BR_INST_RETIRED.ALL_BRANCHES", level) # Ratio between Mispredicted branches and unknown branches def Branch_Mispredict_to_Unknown_Branch_Ratio(self, EV, level): return EV("BR_MISP_RETIRED.ALL_BRANCHES", level) / EV("BACLEARS.ANY", level) # Percentage of all uops which are ucode ops def Microcode_Uop_Ratio(self, EV, level): return 100 * EV("UOPS_RETIRED.MS", level) / EV("UOPS_RETIRED.ALL", level) # Percentage of all uops which are FPDiv uops def FPDiv_Uop_Ratio(self, EV, level): return 100 * EV("UOPS_RETIRED.FPDIV", level) / EV("UOPS_RETIRED.ALL", level) # Percentage of all uops which are IDiv uops def IDiv_Uop_Ratio(self, EV, level): return 100 * EV("UOPS_RETIRED.IDIV", level) / EV("UOPS_RETIRED.ALL", level) # Average Frequency Utilization relative nominal frequency def Turbo_Utilization(self, EV, level): return CLKS(self, EV, level) / EV("CPU_CLK_UNHALTED.REF_TSC", level) # Fraction of cycles spent in Kernel mode def Kernel_Utilization(self, EV, level): return EV("CPU_CLK_UNHALTED.CORE_P:sup", level) / EV("CPU_CLK_UNHALTED.CORE_P", level) # Average CPU Utilization def CPU_Utilization(self, EV, level): return EV("CPU_CLK_UNHALTED.REF_TSC", level) / EV("msr/tsc/", 0) # Cycle cost per L2 hit def Cycles_per_Demand_Load_L2_Hit(self, EV, level): return EV("MEM_BOUND_STALLS.LOAD_L2_HIT", level) / EV("MEM_LOAD_UOPS_RETIRED.L2_HIT", level) # Cycle cost per LLC hit def Cycles_per_Demand_Load_L3_Hit(self, EV, level): return EV("MEM_BOUND_STALLS.LOAD_LLC_HIT", level) / EV("MEM_LOAD_UOPS_RETIRED.L3_HIT", level) # Cycle cost per DRAM hit def Cycles_per_Demand_Load_DRAM_Hit(self, EV, level): return EV("MEM_BOUND_STALLS.LOAD_DRAM_HIT", level) / EV("MEM_LOAD_UOPS_RETIRED.DRAM_HIT", level) # load ops retired per 1000 instruction def MemLoadPKI(self, EV, level): return 1000 * EV("MEM_UOPS_RETIRED.ALL_LOADS", level) / EV("INST_RETIRED.ANY", level) # Event groups class Frontend_Bound: name = "Frontend_Bound" domain = "Slots" area = "FE" level = 1 htoff = False sample = [] errcount = 0 sibling = None server = False metricgroup = frozenset([]) def compute(self, EV): try: self.val = EV("TOPDOWN_FE_BOUND.ALL", 1) / SLOTS(self, EV, 1) self.thresh = (self.val > 0.20) except ZeroDivisionError: handle_error(self, "Frontend_Bound zero division") return self.val desc = """ Counts the number of issue slots that were not consumed by the backend due to frontend stalls.""" class Bad_Speculation: name = "Bad_Speculation" domain = "Slots" area = "BAD" level = 1 htoff = False sample = [] errcount = 0 sibling = None server = False metricgroup = frozenset([]) def compute(self, EV): try: self.val = (EV("TOPDOWN_BAD_SPECULATION.MISPREDICT", 1) + EV("TOPDOWN_BAD_SPECULATION.MONUKE", 1)) / SLOTS(self, EV, 1) self.thresh = (self.val > 0.15) except ZeroDivisionError: handle_error(self, "Bad_Speculation zero division") return self.val desc = """ Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window including relevant microcode flows and while uops are not yet available in the instruction queue (IQ). Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear.""" class Branch_Mispredicts: name = "Branch_Mispredicts" domain = "Slots" area = "BAD" level = 2 htoff = False sample = [] errcount = 0 sibling = None server = False metricgroup = frozenset([]) def compute(self, EV): try: self.val = EV("TOPDOWN_BAD_SPECULATION.MISPREDICT", 2) / SLOTS(self, EV, 2) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "Branch_Mispredicts zero division") return self.val desc = """ Counts the number of issue slots that were not consumed by the backend due to branch mispredicts.""" class Machine_Clears: name = "Machine_Clears" domain = "Slots" area = "BAD" level = 2 htoff = False sample = [] errcount = 0 sibling = None server = False metricgroup = frozenset([]) def compute(self, EV): try: self.val = (EV("TOPDOWN_BAD_SPECULATION.MONUKE", 2)) / SLOTS(self, EV, 2) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "Machine_Clears zero division") return self.val desc = """ Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a machine clear (nuke) of any kind including memory ordering and memory disambiguation.""" class Fast_Nuke: name = "Fast_Nuke" domain = "Slots" area = "BAD" level = 3 htoff = False sample = [] errcount = 0 sibling = None server = False metricgroup = frozenset([]) def compute(self, EV): try: self.val = EV("TOPDOWN_BAD_SPECULATION.MONUKE", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "Fast_Nuke zero division") return self.val desc = """ Counts the number of issue slots that were not consumed by the backend due to a machine clear classified as a fast nuke due to memory ordering, memory disambiguation and memory renaming.""" class Backend_Bound: name = "Backend_Bound" domain = "Slots" area = "BE" level = 1 htoff = False sample = [] errcount = 0 sibling = None server = False metricgroup = frozenset([]) def compute(self, EV): try: self.val = EV("TOPDOWN_BE_BOUND.ALL", 1) / SLOTS(self, EV, 1) self.thresh = (self.val > 0.10) except ZeroDivisionError: handle_error(self, "Backend_Bound zero division") return self.val desc = """ Counts the total number of issue slots that were not consumed by the backend due to backend stalls. Note that uops must be available for consumption in order for this event to count. If a uop is not available (IQ is empty), this event will not count. The rest of these subevents count backend stalls, in cycles, due to an outstanding request which is memory bound vs core bound. The subevents are not slot based events and therefore can not be precisely added or subtracted from the Backend_Bound_Aux subevents which are slot based.""" class Load_Store_Bound: name = "Load_Store_Bound" domain = "Cycles" area = "BE" level = 2 htoff = False sample = [] errcount = 0 sibling = None server = False metricgroup = frozenset([]) def compute(self, EV): try: self.val = (EV("MEM_BOUND_STALLS.LOAD_L2_HIT", 2) + EV("MEM_BOUND_STALLS.LOAD_LLC_HIT", 2) + EV("MEM_BOUND_STALLS.LOAD_DRAM_HIT", 2)) / CLKS(self, EV, 2) self.thresh = (self.val > 0.20) except ZeroDivisionError: handle_error(self, "Load_Store_Bound zero division") return self.val desc = """ Counts the number of cycles the core is stalled due to stores or loads.""" class L2_Bound: name = "L2_Bound" domain = "Cycles" area = "BE" level = 3 htoff = False sample = [] errcount = 0 sibling = None server = False metricgroup = frozenset([]) def compute(self, EV): try: self.val = EV("MEM_BOUND_STALLS.LOAD_L2_HIT", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.10) except ZeroDivisionError: handle_error(self, "L2_Bound zero division") return self.val desc = """ Counts the number of cycles a core is stalled due to a demand load which hit in the L2 Cache.""" class L3_Bound: name = "L3_Bound" domain = "Cycles" area = "BE" level = 3 htoff = False sample = [] errcount = 0 sibling = None server = False metricgroup = frozenset([]) def compute(self, EV): try: self.val = EV("MEM_BOUND_STALLS.LOAD_LLC_HIT", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.10) except ZeroDivisionError: handle_error(self, "L3_Bound zero division") return self.val desc = """ Counts the number of cycles a core is stalled due to a demand load which hit in the Last Level Cache (LLC) or other core with HITE/F/M.""" class DRAM_Bound: name = "DRAM_Bound" domain = "Cycles" area = "BE" level = 3 htoff = False sample = [] errcount = 0 sibling = None server = False metricgroup = frozenset([]) def compute(self, EV): try: self.val = EV("MEM_BOUND_STALLS.LOAD_DRAM_HIT", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.10) except ZeroDivisionError: handle_error(self, "DRAM_Bound zero division") return self.val desc = """ Counts the number of cycles the core is stalled due to a demand load miss which hit in DRAM or MMIO (Non-DRAM).""" class Backend_Bound_Aux: name = "Backend_Bound_Aux" domain = "Slots" area = "BE_aux" level = 1 htoff = False sample = [] errcount = 0 sibling = None server = False metricgroup = frozenset([]) def compute(self, EV): try: self.val = self.Backend_Bound.compute(EV) self.thresh = (self.val > 0.20) except ZeroDivisionError: handle_error(self, "Backend_Bound_Aux zero division") return self.val desc = """ Counts the total number of issue slots that were not consumed by the backend due to backend stalls. Note that UOPS must be available for consumption in order for this event to count. If a uop is not available (IQ is empty), this event will not count. All of these subevents count backend stalls, in slots, due to a resource limitation. These are not cycle based events and therefore can not be precisely added or subtracted from the Backend_Bound subevents which are cycle based. These subevents are supplementary to Backend_Bound and can be used to analyze results from a resource perspective at allocation.""" class Resource_Bound: name = "Resource_Bound" domain = "Slots" area = "BE_aux" level = 2 htoff = False sample = [] errcount = 0 sibling = None server = False metricgroup = frozenset([]) def compute(self, EV): try: self.val = self.Backend_Bound.compute(EV) self.thresh = (self.val > 0.20) except ZeroDivisionError: handle_error(self, "Resource_Bound zero division") return self.val desc = """ Counts the total number of issue slots that were not consumed by the backend due to backend stalls. Note that uops must be available for consumption in order for this event to count. If a uop is not available (IQ is empty), this event will not count.""" class Mem_Scheduler: name = "Mem_Scheduler" domain = "Slots" area = "BE_aux" level = 3 htoff = False sample = [] errcount = 0 sibling = None server = False metricgroup = frozenset([]) def compute(self, EV): try: self.val = EV("TOPDOWN_BE_BOUND.MEM_SCHEDULER", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.10) except ZeroDivisionError: handle_error(self, "Mem_Scheduler zero division") return self.val desc = """ Counts the number of issue slots that were not consumed by the backend due to memory reservation stalls in which a scheduler is not able to accept uops.""" class Non_Mem_Scheduler: name = "Non_Mem_Scheduler" domain = "Slots" area = "BE_aux" level = 3 htoff = False sample = [] errcount = 0 sibling = None server = False metricgroup = frozenset([]) def compute(self, EV): try: self.val = EV("TOPDOWN_BE_BOUND.NON_MEM_SCHEDULER", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.10) except ZeroDivisionError: handle_error(self, "Non_Mem_Scheduler zero division") return self.val desc = """ Counts the number of issue slots that were not consumed by the backend due to IEC or FPC RAT stalls, which can be due to FIQ or IEC reservation stalls in which the integer, floating point or SIMD scheduler is not able to accept uops.""" class Register: name = "Register" domain = "Slots" area = "BE_aux" level = 3 htoff = False sample = [] errcount = 0 sibling = None server = False metricgroup = frozenset([]) def compute(self, EV): try: self.val = EV("TOPDOWN_BE_BOUND.REGISTER", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.10) except ZeroDivisionError: handle_error(self, "Register zero division") return self.val desc = """ Counts the number of issue slots that were not consumed by the backend due to the physical register file unable to accept an entry (marble stalls).""" class Reorder_Buffer: name = "Reorder_Buffer" domain = "Slots" area = "BE_aux" level = 3 htoff = False sample = [] errcount = 0 sibling = None server = False metricgroup = frozenset([]) def compute(self, EV): try: self.val = EV("TOPDOWN_BE_BOUND.REORDER_BUFFER", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.10) except ZeroDivisionError: handle_error(self, "Reorder_Buffer zero division") return self.val desc = """ Counts the number of issue slots that were not consumed by the backend due to the reorder buffer being full (ROB stalls).""" class Store_Buffer: name = "Store_Buffer" domain = "Slots" area = "BE_aux" level = 3 htoff = False sample = [] errcount = 0 sibling = None server = False metricgroup = frozenset([]) def compute(self, EV): try: self.val = EV("TOPDOWN_BE_BOUND.STORE_BUFFER", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.10) except ZeroDivisionError: handle_error(self, "Store_Buffer zero division") return self.val desc = """ Counts the number of issue slots that were not consumed by the backend due to store buffers stalls.""" class Alloc_Restriction: name = "Alloc_Restriction" domain = "Slots" area = "BE_aux" level = 3 htoff = False sample = [] errcount = 0 sibling = None server = False metricgroup = frozenset([]) def compute(self, EV): try: self.val = EV("TOPDOWN_BE_BOUND.ALLOC_RESTRICTIONS", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.10) except ZeroDivisionError: handle_error(self, "Alloc_Restriction zero division") return self.val desc = """ Counts the number of issue slots that were not consumed by the backend due to certain allocation restrictions.""" class Serialization: name = "Serialization" domain = "Slots" area = "BE_aux" level = 3 htoff = False sample = [] errcount = 0 sibling = None server = False metricgroup = frozenset([]) def compute(self, EV): try: self.val = EV("TOPDOWN_BE_BOUND.SERIALIZATION", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.10) except ZeroDivisionError: handle_error(self, "Serialization zero division") return self.val desc = """ Counts the number of issue slots that were not consumed by the backend due to scoreboards from the instruction queue (IQ), jump execution unit (JEU), or microcode sequencer (MS).""" class Retiring: name = "Retiring" domain = "Slots" area = "RET" level = 1 htoff = False sample = [] errcount = 0 sibling = None server = False metricgroup = frozenset([]) def compute(self, EV): try: self.val = EV("TOPDOWN_RETIRING.ALL", 1) / SLOTS(self, EV, 1) self.thresh = (self.val > 0.75) except ZeroDivisionError: handle_error(self, "Retiring zero division") return self.val desc = """ Counts the numer of issue slots that result in retirement slots.""" class Base: name = "Base" domain = "Slots" area = "RET" level = 2 htoff = False sample = [] errcount = 0 sibling = None server = False metricgroup = frozenset([]) def compute(self, EV): try: self.val = (EV("TOPDOWN_RETIRING.ALL", 2) - EV("UOPS_RETIRED.MS", 2)) / SLOTS(self, EV, 2) self.thresh = (self.val > 0.60) except ZeroDivisionError: handle_error(self, "Base zero division") return self.val desc = """ Counts the number of uops that are not from the microsequencer.""" class FP_uops: name = "FP_uops" domain = "Slots" area = "RET" level = 3 htoff = False sample = [] errcount = 0 sibling = None server = False metricgroup = frozenset([]) def compute(self, EV): try: self.val = EV("UOPS_RETIRED.FPDIV", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.20) except ZeroDivisionError: handle_error(self, "FP_uops zero division") return self.val desc = """ Counts the number of floating point divide uops retired (x87 and SSE, including x87 sqrt).""" class Other_Ret: name = "Other_Ret" domain = "Slots" area = "RET" level = 3 htoff = False sample = [] errcount = 0 sibling = None server = False metricgroup = frozenset([]) def compute(self, EV): try: self.val = (EV("TOPDOWN_RETIRING.ALL", 3) - EV("UOPS_RETIRED.MS", 3) - EV("UOPS_RETIRED.FPDIV", 3)) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.30) except ZeroDivisionError: handle_error(self, "Other_Ret zero division") return self.val desc = """ Counts the number of uops retired excluding ms and fp div uops.""" class MS_uops: name = "MS_uops" domain = "Slots" area = "RET" level = 2 htoff = False sample = [] errcount = 0 sibling = None server = False metricgroup = frozenset([]) def compute(self, EV): try: self.val = EV("UOPS_RETIRED.MS", 2) / SLOTS(self, EV, 2) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error(self, "MS_uops zero division") return self.val desc = """ Counts the number of uops that are from the complex flows issued by the micro-sequencer (MS). This includes uops from flows due to complex instructions, faults, assists, and inserted flows.""" class Metric_CLKS: name = "CLKS" domain = "Cycles" maxval = 0 server = False errcount = 0 area = "Info.Core" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = CLKS(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CLKS zero division") desc = """ """ class Metric_CLKS_P: name = "CLKS_P" domain = "Cycles" maxval = 0 server = False errcount = 0 area = "Info.Core" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = CLKS_P(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CLKS_P zero division") desc = """ """ class Metric_SLOTS: name = "SLOTS" domain = "Cycles" maxval = 0 server = False errcount = 0 area = "Info.Core" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = SLOTS(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "SLOTS zero division") desc = """ """ class Metric_IPC: name = "IPC" domain = "" maxval = 0 server = False errcount = 0 area = "Info.Core" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = IPC(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IPC zero division") desc = """ Instructions Per Cycle""" class Metric_CPI: name = "CPI" domain = "" maxval = 0 server = False errcount = 0 area = "Info.Core" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = CPI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CPI zero division") desc = """ Cycles Per Instruction""" class Metric_UPI: name = "UPI" domain = "" maxval = 0 server = False errcount = 0 area = "Info.Core" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = UPI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "UPI zero division") desc = """ Uops Per Instruction""" class Metric_Store_Fwd_Blocks: name = "Store_Fwd_Blocks" domain = "" maxval = 0 server = False errcount = 0 area = "Info.L1_Bound" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = Store_Fwd_Blocks(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Store_Fwd_Blocks zero division") desc = """ Percentage of total non-speculative loads with a store forward or unknown store address block""" class Metric_Address_Alias_Blocks: name = "Address_Alias_Blocks" domain = "" maxval = 0 server = False errcount = 0 area = "Info.L1_Bound" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = Address_Alias_Blocks(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Address_Alias_Blocks zero division") desc = """ Percentage of total non-speculative loads with a address aliasing block""" class Metric_Load_Splits: name = "Load_Splits" domain = "" maxval = 0 server = False errcount = 0 area = "Info.L1_Bound" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = Load_Splits(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Load_Splits zero division") desc = """ Percentage of total non-speculative loads that are splits""" class Metric_IpBranch: name = "IpBranch" domain = "" maxval = 0 server = False errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = IpBranch(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IpBranch zero division") desc = """ Instructions per Branch (lower number means higher occurance rate)""" class Metric_IpCall: name = "IpCall" domain = "" maxval = 0 server = False errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = IpCall(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IpCall zero division") desc = """ Instruction per (near) call (lower number means higher occurance rate)""" class Metric_IpLoad: name = "IpLoad" domain = "" maxval = 0 server = False errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = IpLoad(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IpLoad zero division") desc = """ Instructions per Load""" class Metric_IpStore: name = "IpStore" domain = "" maxval = 0 server = False errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = IpStore(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IpStore zero division") desc = """ Instructions per Store""" class Metric_IpMispredict: name = "IpMispredict" domain = "" maxval = 0 server = False errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = IpMispredict(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IpMispredict zero division") desc = """ Number of Instructions per non-speculative Branch Misprediction""" class Metric_IpFarBranch: name = "IpFarBranch" domain = "" maxval = 0 server = False errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = IpFarBranch(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IpFarBranch zero division") desc = """ Instructions per Far Branch""" class Metric_Branch_Mispredict_Ratio: name = "Branch_Mispredict_Ratio" domain = "" maxval = 0 server = False errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = Branch_Mispredict_Ratio(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Branch_Mispredict_Ratio zero division") desc = """ Ratio of all branches which mispredict""" class Metric_Branch_Mispredict_to_Unknown_Branch_Ratio: name = "Branch_Mispredict_to_Unknown_Branch_Ratio" domain = "" maxval = 0 server = False errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = Branch_Mispredict_to_Unknown_Branch_Ratio(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Branch_Mispredict_to_Unknown_Branch_Ratio zero division") desc = """ Ratio between Mispredicted branches and unknown branches""" class Metric_Microcode_Uop_Ratio: name = "Microcode_Uop_Ratio" domain = "" maxval = 0 server = False errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = Microcode_Uop_Ratio(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Microcode_Uop_Ratio zero division") desc = """ Percentage of all uops which are ucode ops""" class Metric_FPDiv_Uop_Ratio: name = "FPDiv_Uop_Ratio" domain = "" maxval = 0 server = False errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = FPDiv_Uop_Ratio(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "FPDiv_Uop_Ratio zero division") desc = """ Percentage of all uops which are FPDiv uops""" class Metric_IDiv_Uop_Ratio: name = "IDiv_Uop_Ratio" domain = "" maxval = 0 server = False errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = IDiv_Uop_Ratio(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IDiv_Uop_Ratio zero division") desc = """ Percentage of all uops which are IDiv uops""" class Metric_Turbo_Utilization: name = "Turbo_Utilization" domain = "" maxval = 0 server = False errcount = 0 area = "Info.System" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = Turbo_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Turbo_Utilization zero division") desc = """ Average Frequency Utilization relative nominal frequency""" class Metric_Kernel_Utilization: name = "Kernel_Utilization" domain = "" maxval = 0 server = False errcount = 0 area = "Info.System" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = Kernel_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Kernel_Utilization zero division") desc = """ Fraction of cycles spent in Kernel mode""" class Metric_CPU_Utilization: name = "CPU_Utilization" domain = "" maxval = 0 server = False errcount = 0 area = "Info.System" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = CPU_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CPU_Utilization zero division") desc = """ Average CPU Utilization""" class Metric_Cycles_per_Demand_Load_L2_Hit: name = "Cycles_per_Demand_Load_L2_Hit" domain = "" maxval = 0 server = False errcount = 0 area = "Info.Memory" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = Cycles_per_Demand_Load_L2_Hit(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Cycles_per_Demand_Load_L2_Hit zero division") desc = """ Cycle cost per L2 hit""" class Metric_Cycles_per_Demand_Load_L3_Hit: name = "Cycles_per_Demand_Load_L3_Hit" domain = "" maxval = 0 server = False errcount = 0 area = "Info.Memory" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = Cycles_per_Demand_Load_L3_Hit(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Cycles_per_Demand_Load_L3_Hit zero division") desc = """ Cycle cost per LLC hit""" class Metric_Cycles_per_Demand_Load_DRAM_Hit: name = "Cycles_per_Demand_Load_DRAM_Hit" domain = "" maxval = 0 server = False errcount = 0 area = "Info.Memory" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = Cycles_per_Demand_Load_DRAM_Hit(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Cycles_per_Demand_Load_DRAM_Hit zero division") desc = """ Cycle cost per DRAM hit""" class Metric_MemLoadPKI: name = "MemLoadPKI" domain = "" maxval = 0 server = False errcount = 0 area = "Info.Memory" metricgroup = frozenset([]) sibling = None def compute(self, EV): try: self.val = MemLoadPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "MemLoadPKI zero division") desc = """ load ops retired per 1000 instruction""" # Schedule class Setup: def __init__(self, r): o = dict() n = Frontend_Bound() ; r.run(n) ; o["Frontend_Bound"] = n n = Bad_Speculation() ; r.run(n) ; o["Bad_Speculation"] = n n = Branch_Mispredicts() ; r.run(n) ; o["Branch_Mispredicts"] = n n = Machine_Clears() ; r.run(n) ; o["Machine_Clears"] = n n = Fast_Nuke() ; r.run(n) ; o["Fast_Nuke"] = n n = Backend_Bound() ; r.run(n) ; o["Backend_Bound"] = n n = Load_Store_Bound() ; r.run(n) ; o["Load_Store_Bound"] = n n = L2_Bound() ; r.run(n) ; o["L2_Bound"] = n n = L3_Bound() ; r.run(n) ; o["L3_Bound"] = n n = DRAM_Bound() ; r.run(n) ; o["DRAM_Bound"] = n if use_aux: n = Backend_Bound_Aux() ; r.run(n) ; o["Backend_Bound_Aux"] = n if use_aux: n = Resource_Bound() ; r.run(n) ; o["Resource_Bound"] = n if use_aux: n = Mem_Scheduler() ; r.run(n) ; o["Mem_Scheduler"] = n if use_aux: n = Non_Mem_Scheduler() ; r.run(n) ; o["Non_Mem_Scheduler"] = n if use_aux: n = Register() ; r.run(n) ; o["Register"] = n if use_aux: n = Reorder_Buffer() ; r.run(n) ; o["Reorder_Buffer"] = n if use_aux: n = Store_Buffer() ; r.run(n) ; o["Store_Buffer"] = n if use_aux: n = Alloc_Restriction() ; r.run(n) ; o["Alloc_Restriction"] = n if use_aux: n = Serialization() ; r.run(n) ; o["Serialization"] = n n = Retiring() ; r.run(n) ; o["Retiring"] = n n = Base() ; r.run(n) ; o["Base"] = n n = FP_uops() ; r.run(n) ; o["FP_uops"] = n n = Other_Ret() ; r.run(n) ; o["Other_Ret"] = n n = MS_uops() ; r.run(n) ; o["MS_uops"] = n # parents o["Branch_Mispredicts"].parent = o["Bad_Speculation"] o["Machine_Clears"].parent = o["Bad_Speculation"] o["Fast_Nuke"].parent = o["Machine_Clears"] o["Load_Store_Bound"].parent = o["Backend_Bound"] o["L2_Bound"].parent = o["Load_Store_Bound"] o["L3_Bound"].parent = o["Load_Store_Bound"] o["DRAM_Bound"].parent = o["Load_Store_Bound"] if use_aux: o["Resource_Bound"].parent = o["Backend_Bound_Aux"] if use_aux: o["Mem_Scheduler"].parent = o["Resource_Bound"] if use_aux: o["Non_Mem_Scheduler"].parent = o["Resource_Bound"] if use_aux: o["Register"].parent = o["Resource_Bound"] if use_aux: o["Reorder_Buffer"].parent = o["Resource_Bound"] if use_aux: o["Store_Buffer"].parent = o["Resource_Bound"] if use_aux: o["Alloc_Restriction"].parent = o["Resource_Bound"] if use_aux: o["Serialization"].parent = o["Resource_Bound"] o["Base"].parent = o["Retiring"] o["FP_uops"].parent = o["Base"] o["Other_Ret"].parent = o["Base"] o["MS_uops"].parent = o["Retiring"] # user visible metrics n = Metric_CLKS() ; r.metric(n) ; o["CLKS"] = n n = Metric_CLKS_P() ; r.metric(n) ; o["CLKS_P"] = n n = Metric_SLOTS() ; r.metric(n) ; o["SLOTS"] = n n = Metric_IPC() ; r.metric(n) ; o["IPC"] = n n = Metric_CPI() ; r.metric(n) ; o["CPI"] = n n = Metric_UPI() ; r.metric(n) ; o["UPI"] = n n = Metric_Store_Fwd_Blocks() ; r.metric(n) ; o["Store_Fwd_Blocks"] = n n = Metric_Address_Alias_Blocks() ; r.metric(n) ; o["Address_Alias_Blocks"] = n n = Metric_Load_Splits() ; r.metric(n) ; o["Load_Splits"] = n n = Metric_IpBranch() ; r.metric(n) ; o["IpBranch"] = n n = Metric_IpCall() ; r.metric(n) ; o["IpCall"] = n n = Metric_IpLoad() ; r.metric(n) ; o["IpLoad"] = n n = Metric_IpStore() ; r.metric(n) ; o["IpStore"] = n n = Metric_IpMispredict() ; r.metric(n) ; o["IpMispredict"] = n n = Metric_IpFarBranch() ; r.metric(n) ; o["IpFarBranch"] = n n = Metric_Branch_Mispredict_Ratio() ; r.metric(n) ; o["Branch_Mispredict_Ratio"] = n n = Metric_Branch_Mispredict_to_Unknown_Branch_Ratio() ; r.metric(n) ; o["Branch_Mispredict_to_Unknown_Branch_Ratio"] = n n = Metric_Microcode_Uop_Ratio() ; r.metric(n) ; o["Microcode_Uop_Ratio"] = n n = Metric_FPDiv_Uop_Ratio() ; r.metric(n) ; o["FPDiv_Uop_Ratio"] = n n = Metric_IDiv_Uop_Ratio() ; r.metric(n) ; o["IDiv_Uop_Ratio"] = n n = Metric_Turbo_Utilization() ; r.metric(n) ; o["Turbo_Utilization"] = n n = Metric_Kernel_Utilization() ; r.metric(n) ; o["Kernel_Utilization"] = n n = Metric_CPU_Utilization() ; r.metric(n) ; o["CPU_Utilization"] = n n = Metric_Cycles_per_Demand_Load_L2_Hit() ; r.metric(n) ; o["Cycles_per_Demand_Load_L2_Hit"] = n n = Metric_Cycles_per_Demand_Load_L3_Hit() ; r.metric(n) ; o["Cycles_per_Demand_Load_L3_Hit"] = n n = Metric_Cycles_per_Demand_Load_DRAM_Hit() ; r.metric(n) ; o["Cycles_per_Demand_Load_DRAM_Hit"] = n n = Metric_MemLoadPKI() ; r.metric(n) ; o["MemLoadPKI"] = n # references between groups if use_aux: o["Backend_Bound_Aux"].Backend_Bound = o["Backend_Bound"] if use_aux: o["Resource_Bound"].Backend_Bound = o["Backend_Bound"] # siblings cross-tree
40,880
Python
.py
1,221
27.366093
165
0.62374
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,922
hsx_server_ratios.py
andikleen_pmu-tools/hsx_server_ratios.py
# -*- coding: latin-1 -*- # # auto generated TopDown/TMA 4.8-full-perf description for Intel Xeon E5 v3 (code Named Haswell EP) # Please see http://ark.intel.com for more details on these CPUs. # # References: # http://bit.ly/tma-ispass14 # http://halobates.de/blog/p/262 # https://sites.google.com/site/analysismethods/yasin-pubs # https://download.01.org/perfmon/ # https://github.com/andikleen/pmu-tools/wiki/toplev-manual # # Helpers print_error = lambda msg: False smt_enabled = False ebs_mode = False version = "4.8-full-perf" base_frequency = -1.0 Memory = 0 Average_Frequency = 0.0 num_cores = 1 num_threads = 1 num_sockets = 1 def handle_error(obj, msg): print_error(msg) obj.errcount += 1 obj.val = 0 obj.thresh = False def handle_error_metric(obj, msg): print_error(msg) obj.errcount += 1 obj.val = 0 # Constants Exe_Ports = 8 Mem_L2_Store_Cost = 9 Mem_L3_Weight = 7 Mem_STLB_Hit_Cost = 8 BAClear_Cost = 12 MS_Switches_Cost = 2 Avg_Assist_Cost = 66 Pipeline_Width = 4 OneMillion = 1000000 OneBillion = 1000000000 Energy_Unit = 61 EBS_Mode = 0 DS = 1 # Aux. formulas def Backend_Bound_Cycles(self, EV, level): return (STALLS_TOTAL(self, EV, level) + (EV("UOPS_EXECUTED.CORE:c1", level) - Few_Uops_Executed_Threshold(self, EV, level)) / 2 - Frontend_RS_Empty_Cycles(self, EV, level) + EV("RESOURCE_STALLS.SB", level)) if smt_enabled else(STALLS_TOTAL(self, EV, level) + EV("UOPS_EXECUTED.CORE:c1", level) - Few_Uops_Executed_Threshold(self, EV, level) - Frontend_RS_Empty_Cycles(self, EV, level) + EV("RESOURCE_STALLS.SB", level)) def Cycles_0_Ports_Utilized(self, EV, level): return (EV("UOPS_EXECUTED.CORE:i1:c1", level)) / 2 if smt_enabled else(STALLS_TOTAL(self, EV, level) - Frontend_RS_Empty_Cycles(self, EV, level)) def Cycles_1_Port_Utilized(self, EV, level): return (EV("UOPS_EXECUTED.CORE:c1", level) - EV("UOPS_EXECUTED.CORE:c2", level)) / 2 if smt_enabled else(EV("UOPS_EXECUTED.CORE:c1", level) - EV("UOPS_EXECUTED.CORE:c2", level)) def Cycles_2_Ports_Utilized(self, EV, level): return (EV("UOPS_EXECUTED.CORE:c2", level) - EV("UOPS_EXECUTED.CORE:c3", level)) / 2 if smt_enabled else(EV("UOPS_EXECUTED.CORE:c2", level) - EV("UOPS_EXECUTED.CORE:c3", level)) def Cycles_3m_Ports_Utilized(self, EV, level): return (EV("UOPS_EXECUTED.CORE:c3", level) / 2) if smt_enabled else EV("UOPS_EXECUTED.CORE:c3", level) def DurationTimeInSeconds(self, EV, level): return EV("interval-ms", 0) / 1000 def Execute_Cycles(self, EV, level): return (EV("UOPS_EXECUTED.CORE:c1", level) / 2) if smt_enabled else EV("UOPS_EXECUTED.CORE:c1", level) def Fetched_Uops(self, EV, level): return (EV("IDQ.DSB_UOPS", level) + EV("LSD.UOPS", level) + EV("IDQ.MITE_UOPS", level) + EV("IDQ.MS_UOPS", level)) def Few_Uops_Executed_Threshold(self, EV, level): EV("UOPS_EXECUTED.CORE:c2", level) EV("UOPS_EXECUTED.CORE:c3", level) return EV("UOPS_EXECUTED.CORE:c3", level) if (IPC(self, EV, level)> 1.8) else EV("UOPS_EXECUTED.CORE:c2", level) def Frontend_RS_Empty_Cycles(self, EV, level): EV("RS_EVENTS.EMPTY_CYCLES", level) return EV("RS_EVENTS.EMPTY_CYCLES", level) if (self.Fetch_Latency.compute(EV)> 0.1) else 0 def Frontend_Latency_Cycles(self, EV, level): return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE", level)) , level ) def HighIPC(self, EV, level): val = IPC(self, EV, level) / Pipeline_Width return val def ITLB_Miss_Cycles(self, EV, level): return (14 * EV("ITLB_MISSES.STLB_HIT", level) + EV("ITLB_MISSES.WALK_DURATION", level)) def LOAD_L1_MISS(self, EV, level): return EV("MEM_LOAD_UOPS_RETIRED.L2_HIT", level) + EV("MEM_LOAD_UOPS_RETIRED.L3_HIT", level) + EV("MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT", level) + EV("MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM", level) + EV("MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS", level) def LOAD_L1_MISS_NET(self, EV, level): return LOAD_L1_MISS(self, EV, level) + EV("MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM", level) + EV("MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM", level) + EV("MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM", level) + EV("MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD", level) def LOAD_L3_HIT(self, EV, level): return EV("MEM_LOAD_UOPS_RETIRED.L3_HIT", level) * (1 + EV("MEM_LOAD_UOPS_RETIRED.HIT_LFB", level) / LOAD_L1_MISS_NET(self, EV, level)) def LOAD_LCL_MEM(self, EV, level): return EV("MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM", level) * (1 + EV("MEM_LOAD_UOPS_RETIRED.HIT_LFB", level) / LOAD_L1_MISS_NET(self, EV, level)) def LOAD_RMT_FWD(self, EV, level): return EV("MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD", level) * (1 + EV("MEM_LOAD_UOPS_RETIRED.HIT_LFB", level) / LOAD_L1_MISS_NET(self, EV, level)) def LOAD_RMT_HITM(self, EV, level): return EV("MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM", level) * (1 + EV("MEM_LOAD_UOPS_RETIRED.HIT_LFB", level) / LOAD_L1_MISS_NET(self, EV, level)) def LOAD_RMT_MEM(self, EV, level): return EV("MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM", level) * (1 + EV("MEM_LOAD_UOPS_RETIRED.HIT_LFB", level) / LOAD_L1_MISS_NET(self, EV, level)) def LOAD_XSNP_HIT(self, EV, level): return EV("MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT", level) * (1 + EV("MEM_LOAD_UOPS_RETIRED.HIT_LFB", level) / LOAD_L1_MISS_NET(self, EV, level)) def LOAD_XSNP_HITM(self, EV, level): return EV("MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM", level) * (1 + EV("MEM_LOAD_UOPS_RETIRED.HIT_LFB", level) / LOAD_L1_MISS_NET(self, EV, level)) def LOAD_XSNP_MISS(self, EV, level): return EV("MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS", level) * (1 + EV("MEM_LOAD_UOPS_RETIRED.HIT_LFB", level) / LOAD_L1_MISS_NET(self, EV, level)) def Mem_L3_Hit_Fraction(self, EV, level): return EV("MEM_LOAD_UOPS_RETIRED.L3_HIT", level) / (EV("MEM_LOAD_UOPS_RETIRED.L3_HIT", level) + Mem_L3_Weight * EV("MEM_LOAD_UOPS_RETIRED.L3_MISS", level)) def Mem_Lock_St_Fraction(self, EV, level): return EV("MEM_UOPS_RETIRED.LOCK_LOADS", level) / EV("MEM_UOPS_RETIRED.ALL_STORES", level) def Memory_Bound_Fraction(self, EV, level): return (STALLS_MEM_ANY(self, EV, level) + EV("RESOURCE_STALLS.SB", level)) / Backend_Bound_Cycles(self, EV, level) def Mispred_Clears_Fraction(self, EV, level): return EV("BR_MISP_RETIRED.ALL_BRANCHES", level) / (EV("BR_MISP_RETIRED.ALL_BRANCHES", level) + EV("MACHINE_CLEARS.COUNT", level)) def ORO_Demand_RFO_C1(self, EV, level): return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO", level)) , level ) def ORO_DRD_Any_Cycles(self, EV, level): return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", level)) , level ) def ORO_DRD_BW_Cycles(self, EV, level): return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD:c6", level)) , level ) def SQ_Full_Cycles(self, EV, level): return (EV("OFFCORE_REQUESTS_BUFFER.SQ_FULL", level) / 2) if smt_enabled else EV("OFFCORE_REQUESTS_BUFFER.SQ_FULL", level) def STALLS_MEM_ANY(self, EV, level): return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("CYCLE_ACTIVITY.STALLS_LDM_PENDING", level)) , level ) def STALLS_TOTAL(self, EV, level): return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("CYCLE_ACTIVITY.CYCLES_NO_EXECUTE", level)) , level ) def Store_L2_Hit_Cycles(self, EV, level): return EV("L2_RQSTS.RFO_HIT", level) * Mem_L2_Store_Cost *(1 - Mem_Lock_St_Fraction(self, EV, level)) def Mem_XSNP_HitM_Cost(self, EV, level): return 60 def Mem_XSNP_Hit_Cost(self, EV, level): return 43 def Mem_XSNP_None_Cost(self, EV, level): return 41 def Mem_Local_DRAM_Cost(self, EV, level): return 200 def Mem_Remote_DRAM_Cost(self, EV, level): return 310 def Mem_Remote_HitM_Cost(self, EV, level): return 200 def Mem_Remote_Fwd_Cost(self, EV, level): return 180 def Recovery_Cycles(self, EV, level): return (EV("INT_MISC.RECOVERY_CYCLES_ANY", level) / 2) if smt_enabled else EV("INT_MISC.RECOVERY_CYCLES", level) def Retire_Fraction(self, EV, level): return Retired_Slots(self, EV, level) / EV("UOPS_ISSUED.ANY", level) # Retired slots per Logical Processor def Retired_Slots(self, EV, level): return EV("UOPS_RETIRED.RETIRE_SLOTS", level) # Number of logical processors (enabled or online) on the target system def Num_CPUs(self, EV, level): return 8 if smt_enabled else 4 # Instructions Per Cycle (per Logical Processor) def IPC(self, EV, level): return EV("INST_RETIRED.ANY", level) / CLKS(self, EV, level) # Uops Per Instruction def UopPI(self, EV, level): val = Retired_Slots(self, EV, level) / EV("INST_RETIRED.ANY", level) self.thresh = (val > 1.05) return val # Uops per taken branch def UpTB(self, EV, level): val = Retired_Slots(self, EV, level) / EV("BR_INST_RETIRED.NEAR_TAKEN", level) self.thresh = val < Pipeline_Width * 1.5 return val # Cycles Per Instruction (per Logical Processor) def CPI(self, EV, level): return 1 / IPC(self, EV, level) # Per-Logical Processor actual clocks when the Logical Processor is active. def CLKS(self, EV, level): return EV("CPU_CLK_UNHALTED.THREAD", level) # Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward) def SLOTS(self, EV, level): return Pipeline_Width * CORE_CLKS(self, EV, level) # Instructions Per Cycle across hyper-threads (per physical core) def CoreIPC(self, EV, level): return EV("INST_RETIRED.ANY", level) / CORE_CLKS(self, EV, level) # Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor) def ILP(self, EV, level): return (EV("UOPS_EXECUTED.CORE", level) / 2 / Execute_Cycles(self, EV, level)) if smt_enabled else EV("UOPS_EXECUTED.CORE", level) / Execute_Cycles(self, EV, level) # Core actual clocks when any Logical Processor is active on the Physical Core def CORE_CLKS(self, EV, level): return ((EV("CPU_CLK_UNHALTED.THREAD", level) / 2) * (1 + EV("CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE", level) / EV("CPU_CLK_UNHALTED.REF_XCLK", level))) if ebs_mode else(EV("CPU_CLK_UNHALTED.THREAD_ANY", level) / 2) if smt_enabled else CLKS(self, EV, level) # Instructions per Load (lower number means higher occurrence rate). Tip: reduce memory accesses. #Link Opt Guide section: Minimize Register Spills def IpLoad(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("MEM_UOPS_RETIRED.ALL_LOADS", level) self.thresh = (val < 3) return val # Instructions per Store (lower number means higher occurrence rate). Tip: reduce memory accesses. #Link Opt Guide section: Minimize Register Spills def IpStore(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("MEM_UOPS_RETIRED.ALL_STORES", level) self.thresh = (val < 8) return val # Instructions per Branch (lower number means higher occurrence rate) def IpBranch(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.ALL_BRANCHES", level) self.thresh = (val < 8) return val # Instructions per (near) call (lower number means higher occurrence rate) def IpCall(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.NEAR_CALL", level) self.thresh = (val < 200) return val # Instructions per taken branch def IpTB(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.NEAR_TAKEN", level) self.thresh = val < Pipeline_Width * 2 + 1 return val # Branch instructions per taken branch. . Can be used to approximate PGO-likelihood for non-loopy codes. def BpTkBranch(self, EV, level): return EV("BR_INST_RETIRED.ALL_BRANCHES", level) / EV("BR_INST_RETIRED.NEAR_TAKEN", level) # Total number of retired Instructions def Instructions(self, EV, level): return EV("INST_RETIRED.ANY", level) # Average number of Uops retired in cycles where at least one uop has retired. def Retire(self, EV, level): return Retired_Slots(self, EV, level) / EV("UOPS_RETIRED.RETIRE_SLOTS:c1", level) # Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). See section 'Decoded ICache' in Optimization Manual. http://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-optimization-manual.html def DSB_Coverage(self, EV, level): val = EV("IDQ.DSB_UOPS", level) / Fetched_Uops(self, EV, level) self.thresh = (val < 0.7) and HighIPC(self, EV, 1) return val # Instructions per speculative Unknown Branch Misprediction (BAClear) (lower number means higher occurrence rate) def IpUnknown_Branch(self, EV, level): return Instructions(self, EV, level) / EV("BACLEARS.ANY", level) # Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate) def IpMispredict(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_MISP_RETIRED.ALL_BRANCHES", level) self.thresh = (val < 200) return val # Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate). def IpMisp_Indirect(self, EV, level): val = Instructions(self, EV, level) / (Retire_Fraction(self, EV, level) * EV("BR_MISP_EXEC.INDIRECT", level)) self.thresh = (val < 1000) return val # Actual Average Latency for L1 data-cache miss demand load operations (in core cycles) def Load_Miss_Real_Latency(self, EV, level): return EV("L1D_PEND_MISS.PENDING", level) / (EV("MEM_LOAD_UOPS_RETIRED.L1_MISS", level) + EV("MEM_LOAD_UOPS_RETIRED.HIT_LFB", level)) # Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor) def MLP(self, EV, level): return EV("L1D_PEND_MISS.PENDING", level) / EV("L1D_PEND_MISS.PENDING_CYCLES", level) # L1 cache true misses per kilo instruction for retired demand loads def L1MPKI(self, EV, level): return 1000 * EV("MEM_LOAD_UOPS_RETIRED.L1_MISS", level) / EV("INST_RETIRED.ANY", level) # L2 cache true misses per kilo instruction for retired demand loads def L2MPKI(self, EV, level): return 1000 * EV("MEM_LOAD_UOPS_RETIRED.L2_MISS", level) / EV("INST_RETIRED.ANY", level) # Offcore requests (L2 cache miss) per kilo instruction for demand RFOs def L2MPKI_RFO(self, EV, level): return 1000 * EV("OFFCORE_REQUESTS.DEMAND_RFO", level) / EV("INST_RETIRED.ANY", level) # L3 cache true misses per kilo instruction for retired demand loads def L3MPKI(self, EV, level): return 1000 * EV("MEM_LOAD_UOPS_RETIRED.L3_MISS", level) / EV("INST_RETIRED.ANY", level) def L1D_Cache_Fill_BW(self, EV, level): return 64 * EV("L1D.REPLACEMENT", level) / OneBillion / Time(self, EV, level) def L2_Cache_Fill_BW(self, EV, level): return 64 * EV("L2_LINES_IN.ALL", level) / OneBillion / Time(self, EV, level) def L3_Cache_Fill_BW(self, EV, level): return 64 * EV("LONGEST_LAT_CACHE.MISS", level) / OneBillion / Time(self, EV, level) # Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses def Page_Walks_Utilization(self, EV, level): val = (EV("ITLB_MISSES.WALK_DURATION", level) + EV("DTLB_LOAD_MISSES.WALK_DURATION", level) + EV("DTLB_STORE_MISSES.WALK_DURATION", level)) / CORE_CLKS(self, EV, level) self.thresh = (val > 0.5) return val # Average per-core data fill bandwidth to the L1 data cache [GB / sec] def L1D_Cache_Fill_BW_2T(self, EV, level): return L1D_Cache_Fill_BW(self, EV, level) # Average per-core data fill bandwidth to the L2 cache [GB / sec] def L2_Cache_Fill_BW_2T(self, EV, level): return L2_Cache_Fill_BW(self, EV, level) # Average per-core data fill bandwidth to the L3 cache [GB / sec] def L3_Cache_Fill_BW_2T(self, EV, level): return L3_Cache_Fill_BW(self, EV, level) # Average Latency for L2 cache miss demand Loads def Load_L2_Miss_Latency(self, EV, level): return EV("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD", level) / EV("OFFCORE_REQUESTS.DEMAND_DATA_RD", level) # Average Parallel L2 cache miss demand Loads def Load_L2_MLP(self, EV, level): return EV("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD", level) / EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD", level) # Average Parallel L2 cache miss data reads def Data_L2_MLP(self, EV, level): return EV("OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD", level) / EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", level) # Average CPU Utilization (percentage) def CPU_Utilization(self, EV, level): return CPUs_Utilized(self, EV, level) / Num_CPUs(self, EV, level) # Average number of utilized CPUs def CPUs_Utilized(self, EV, level): return EV("CPU_CLK_UNHALTED.REF_TSC", level) / EV("msr/tsc/", 0) # Measured Average Core Frequency for unhalted processors [GHz] def Core_Frequency(self, EV, level): return Turbo_Utilization(self, EV, level) * EV("msr/tsc/", 0) / OneBillion / Time(self, EV, level) # Measured Average Uncore Frequency for the SoC [GHz] def Uncore_Frequency(self, EV, level): return Socket_CLKS(self, EV, level) / 1e9 / Time(self, EV, level) # Average Frequency Utilization relative nominal frequency def Turbo_Utilization(self, EV, level): return CLKS(self, EV, level) / EV("CPU_CLK_UNHALTED.REF_TSC", level) # Fraction of cycles where both hardware Logical Processors were active def SMT_2T_Utilization(self, EV, level): return 1 - EV("CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE", level) / (EV("CPU_CLK_UNHALTED.REF_XCLK_ANY", level) / 2) if smt_enabled else 0 # Fraction of cycles spent in the Operating System (OS) Kernel mode def Kernel_Utilization(self, EV, level): val = EV("CPU_CLK_UNHALTED.THREAD_P:SUP", level) / EV("CPU_CLK_UNHALTED.THREAD", level) self.thresh = (val > 0.05) return val # Cycles Per Instruction for the Operating System (OS) Kernel mode def Kernel_CPI(self, EV, level): return EV("CPU_CLK_UNHALTED.THREAD_P:SUP", level) / EV("INST_RETIRED.ANY_P:SUP", level) # Average external Memory Bandwidth Use for reads and writes [GB / sec] def DRAM_BW_Use(self, EV, level): return (64 *(EV("UNC_M_CAS_COUNT.RD", level) + EV("UNC_M_CAS_COUNT.WR", level)) / OneBillion) / Time(self, EV, level) # Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches. def MEM_Read_Latency(self, EV, level): return OneBillion *(EV("UNC_C_TOR_OCCUPANCY.MISS_OPCODE:opc=0x182", level) / EV("UNC_C_TOR_INSERTS.MISS_OPCODE:opc=0x182", level)) / (Socket_CLKS(self, EV, level) / Time(self, EV, level)) # Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches def MEM_Parallel_Reads(self, EV, level): return EV("UNC_C_TOR_OCCUPANCY.MISS_OPCODE:opc=0x182", level) / EV("UNC_C_TOR_OCCUPANCY.MISS_OPCODE:opc=0x182:c1", level) # Run duration time in seconds def Time(self, EV, level): val = EV("interval-s", 0) self.thresh = (val < 1) return val # Socket actual clocks when any core is active on that socket def Socket_CLKS(self, EV, level): return EV("UNC_C_CLOCKTICKS:one_unit", level) # Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate] def IpFarBranch(self, EV, level): val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.FAR_BRANCH:USER", level) self.thresh = (val < 1000000) return val # Event groups class Frontend_Bound: name = "Frontend_Bound" domain = "Slots" area = "FE" level = 1 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvFB', 'BvIO', 'TmaL1', 'PGO']) maxval = None def compute(self, EV): try: self.val = EV("IDQ_UOPS_NOT_DELIVERED.CORE", 1) / SLOTS(self, EV, 1) self.thresh = (self.val > 0.15) except ZeroDivisionError: handle_error(self, "Frontend_Bound zero division") return self.val desc = """ This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.""" class Fetch_Latency: name = "Fetch_Latency" domain = "Slots" area = "FE" level = 2 htoff = False sample = ['RS_EVENTS.EMPTY_END'] errcount = 0 sibling = None metricgroup = frozenset(['Frontend', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = Pipeline_Width * Frontend_Latency_Cycles(self, EV, 2) / SLOTS(self, EV, 2) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Fetch_Latency zero division") return self.val desc = """ This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction- cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period.""" class ICache_Misses: name = "ICache_Misses" domain = "Clocks" area = "FE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BigFootprint', 'BvBC', 'FetchLat', 'IcMiss']) maxval = None def compute(self, EV): try: self.val = EV("ICACHE.IFDATA_STALL", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "ICache_Misses zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to instruction cache misses.. Using compiler's Profile-Guided Optimization (PGO) can reduce i-cache misses through improved hot code layout.""" class ITLB_Misses: name = "ITLB_Misses" domain = "Clocks" area = "FE" level = 3 htoff = False sample = ['ITLB_MISSES.WALK_COMPLETED'] errcount = 0 sibling = None metricgroup = frozenset(['BigFootprint', 'BvBC', 'FetchLat', 'MemoryTLB']) maxval = None def compute(self, EV): try: self.val = ITLB_Miss_Cycles(self, EV, 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "ITLB_Misses zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses.. Consider large 2M pages for code (selectively prefer hot large-size function, due to limited 2M entries). Linux options: standard binaries use libhugetlbfs; Hfsort.. https://github. com/libhugetlbfs/libhugetlbfs;https://research.fb.com/public ations/optimizing-function-placement-for-large-scale-data- center-applications-2/""" class Branch_Resteers: name = "Branch_Resteers" domain = "Clocks" area = "FE" level = 3 htoff = False sample = ['BR_MISP_RETIRED.ALL_BRANCHES'] errcount = 0 sibling = None metricgroup = frozenset(['FetchLat']) maxval = None def compute(self, EV): try: self.val = BAClear_Cost *(EV("BR_MISP_RETIRED.ALL_BRANCHES", 3) + EV("MACHINE_CLEARS.COUNT", 3) + EV("BACLEARS.ANY", 3)) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Branch_Resteers zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings.""" class MS_Switches: name = "MS_Switches" domain = "Clocks_Estimated" area = "FE" level = 3 htoff = False sample = ['IDQ.MS_SWITCHES'] errcount = 0 sibling = None metricgroup = frozenset(['FetchLat', 'MicroSeq']) maxval = 1.0 def compute(self, EV): try: self.val = MS_Switches_Cost * EV("IDQ.MS_SWITCHES", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "MS_Switches zero division") return self.val desc = """ This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals.""" class LCP: name = "LCP" domain = "Clocks" area = "FE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['FetchLat']) maxval = None def compute(self, EV): try: self.val = EV("ILD_STALL.LCP", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "LCP zero division") return self.val desc = """ This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this.""" class DSB_Switches: name = "DSB_Switches" domain = "Clocks" area = "FE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['DSBmiss', 'FetchLat']) maxval = None def compute(self, EV): try: self.val = EV("DSB2MITE_SWITCHES.PENALTY_CYCLES", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "DSB_Switches zero division") return self.val desc = """ This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty.. See section 'Optimization for Decoded Icache' in Optimization Manual:. http://www.intel.com/content/www/us/en /architecture-and-technology/64-ia-32-architectures- optimization-manual.html""" class Fetch_Bandwidth: name = "Fetch_Bandwidth" domain = "Slots" area = "FE" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['FetchBW', 'Frontend', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = self.Frontend_Bound.compute(EV) - self.Fetch_Latency.compute(EV) self.thresh = (self.val > 0.2) except ZeroDivisionError: handle_error(self, "Fetch_Bandwidth zero division") return self.val desc = """ This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend.""" class MITE: name = "MITE" domain = "Slots_Estimated" area = "FE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['DSBmiss', 'FetchBW']) maxval = None def compute(self, EV): try: self.val = (EV("IDQ.ALL_MITE_CYCLES_ANY_UOPS", 3) - EV("IDQ.ALL_MITE_CYCLES_4_UOPS", 3)) / CORE_CLKS(self, EV, 3) / 2 self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "MITE zero division") return self.val desc = """ This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck.. Consider tuning codegen of 'small hotspots' that can fit in DSB. Read about 'Decoded ICache' in Optimization Manual:. http://www.intel.com/content/www/us/en /architecture-and-technology/64-ia-32-architectures- optimization-manual.html""" class DSB: name = "DSB" domain = "Slots_Estimated" area = "FE" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['DSB', 'FetchBW']) maxval = None def compute(self, EV): try: self.val = (EV("IDQ.ALL_DSB_CYCLES_ANY_UOPS", 3) - EV("IDQ.ALL_DSB_CYCLES_4_UOPS", 3)) / CORE_CLKS(self, EV, 3) / 2 self.thresh = (self.val > 0.15) and self.parent.thresh except ZeroDivisionError: handle_error(self, "DSB zero division") return self.val desc = """ This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.""" class Bad_Speculation: name = "Bad_Speculation" domain = "Slots" area = "BAD" level = 1 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['TmaL1']) maxval = None def compute(self, EV): try: self.val = (EV("UOPS_ISSUED.ANY", 1) - Retired_Slots(self, EV, 1) + Pipeline_Width * Recovery_Cycles(self, EV, 1)) / SLOTS(self, EV, 1) self.thresh = (self.val > 0.15) except ZeroDivisionError: handle_error(self, "Bad_Speculation zero division") return self.val desc = """ This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss- predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.""" class Branch_Mispredicts: name = "Branch_Mispredicts" domain = "Slots" area = "BAD" level = 2 htoff = False sample = ['BR_MISP_RETIRED.ALL_BRANCHES:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BadSpec', 'BrMispredicts', 'BvMP', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = Mispred_Clears_Fraction(self, EV, 2) * self.Bad_Speculation.compute(EV) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Branch_Mispredicts zero division") return self.val desc = """ This metric represents fraction of slots the CPU has wasted due to Branch Misprediction. These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path.. Using profile feedback in the compiler may help. Please see the Optimization Manual for general strategies for addressing branch misprediction issues.. http://www.intel.com/content/www/us/en/architecture-and- technology/64-ia-32-architectures-optimization-manual.html""" class Machine_Clears: name = "Machine_Clears" domain = "Slots" area = "BAD" level = 2 htoff = False sample = ['MACHINE_CLEARS.COUNT'] errcount = 0 sibling = None metricgroup = frozenset(['BadSpec', 'BvMS', 'MachineClears', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = self.Bad_Speculation.compute(EV) - self.Branch_Mispredicts.compute(EV) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Machine_Clears zero division") return self.val desc = """ This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes.. See \"Memory Disambiguation\" in Optimization Manual and:. https://software.intel.com/sites/default/files/ m/d/4/1/d/8/sma.pdf""" class Backend_Bound: name = "Backend_Bound" domain = "Slots" area = "BE" level = 1 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvOB', 'TmaL1']) maxval = None def compute(self, EV): try: self.val = 1 -(self.Frontend_Bound.compute(EV) + self.Bad_Speculation.compute(EV) + self.Retiring.compute(EV)) self.thresh = (self.val > 0.2) except ZeroDivisionError: handle_error(self, "Backend_Bound zero division") return self.val desc = """ This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.""" class Memory_Bound: name = "Memory_Bound" domain = "Slots" area = "BE/Mem" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Backend', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = Memory_Bound_Fraction(self, EV, 2) * self.Backend_Bound.compute(EV) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Memory_Bound zero division") return self.val desc = """ This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).""" class L1_Bound: name = "L1_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = ['MEM_LOAD_UOPS_RETIRED.L1_HIT:pp', 'MEM_LOAD_UOPS_RETIRED.HIT_LFB:pp'] errcount = 0 sibling = None metricgroup = frozenset(['CacheHits', 'MemoryBound', 'TmaL3mem']) maxval = None def compute(self, EV): try: self.val = max((STALLS_MEM_ANY(self, EV, 3) - EV("CYCLE_ACTIVITY.STALLS_L1D_PENDING", 3)) / CLKS(self, EV, 3) , 0 ) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "L1_Bound zero division") return self.val desc = """ This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache.""" class DTLB_Load: name = "DTLB_Load" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_UOPS_RETIRED.STLB_MISS_LOADS:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvMT', 'MemoryTLB']) maxval = 1.0 def compute(self, EV): try: self.val = (Mem_STLB_Hit_Cost * EV("DTLB_LOAD_MISSES.STLB_HIT", 4) + EV("DTLB_LOAD_MISSES.WALK_DURATION", 4)) / CLKS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "DTLB_Load zero division") return self.val desc = """ This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss..""" class Store_Fwd_Blk: name = "Store_Fwd_Blk" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = 1.0 def compute(self, EV): try: self.val = 13 * EV("LD_BLOCKS.STORE_FORWARD", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Store_Fwd_Blk zero division") return self.val desc = """ This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading.""" class Lock_Latency: name = "Lock_Latency" domain = "Clocks" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_UOPS_RETIRED.LOCK_LOADS:pp'] errcount = 0 sibling = None metricgroup = frozenset(['Offcore']) maxval = 1.0 def compute(self, EV): try: self.val = Mem_Lock_St_Fraction(self, EV, 4) * ORO_Demand_RFO_C1(self, EV, 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Lock_Latency zero division") return self.val desc = """ This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them.""" class Split_Loads: name = "Split_Loads" domain = "Clocks_Calculated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_UOPS_RETIRED.SPLIT_LOADS:pp'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = 1.0 def compute(self, EV): try: self.val = Load_Miss_Real_Latency(self, EV, 4) * EV("LD_BLOCKS.NO_SR", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Split_Loads zero division") return self.val desc = """ This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. . Consider aligning data or hot structure fields. See the Optimization Manual for more details""" class G4K_Aliasing: name = "4K_Aliasing" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("LD_BLOCKS_PARTIAL.ADDRESS_ALIAS", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "G4K_Aliasing zero division") return self.val desc = """ This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset. False match is possible; which incur a few cycles load re-issue. However; the short re-issue duration is often hidden by the out-of-order core and HW optimizations; hence a user may safely ignore a high value of this metric unless it manages to propagate up into parent nodes of the hierarchy (e.g. to L1_Bound).. Consider reducing independent loads/stores accesses with 4K offsets. See the Optimization Manual for more details""" class FB_Full: name = "FB_Full" domain = "Clocks_Calculated" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvMB', 'MemoryBW']) maxval = None def compute(self, EV): try: self.val = Load_Miss_Real_Latency(self, EV, 4) * EV("L1D_PEND_MISS.REQUEST_FB_FULL:c1", 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.3) except ZeroDivisionError: handle_error(self, "FB_Full zero division") return self.val desc = """ This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory).. See $issueBW and $issueSL hints. Avoid software prefetches if indeed memory BW limited.""" class L2_Bound: name = "L2_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = ['MEM_LOAD_UOPS_RETIRED.L2_HIT:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvML', 'CacheHits', 'MemoryBound', 'TmaL3mem']) maxval = None def compute(self, EV): try: self.val = (EV("CYCLE_ACTIVITY.STALLS_L1D_PENDING", 3) - EV("CYCLE_ACTIVITY.STALLS_L2_PENDING", 3)) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "L2_Bound zero division") return self.val desc = """ This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance.""" class L3_Bound: name = "L3_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = ['MEM_LOAD_UOPS_RETIRED.L3_HIT:pp'] errcount = 0 sibling = None metricgroup = frozenset(['CacheHits', 'MemoryBound', 'TmaL3mem']) maxval = None def compute(self, EV): try: self.val = Mem_L3_Hit_Fraction(self, EV, 3) * EV("CYCLE_ACTIVITY.STALLS_L2_PENDING", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "L3_Bound zero division") return self.val desc = """ This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance.""" class Contested_Accesses: name = "Contested_Accesses" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM:pp', 'MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvMS', 'DataSharing', 'Offcore', 'Snoop']) maxval = 1.0 def compute(self, EV): try: self.val = (Mem_XSNP_HitM_Cost(self, EV, 4) * LOAD_XSNP_HITM(self, EV, 4) + Mem_XSNP_Hit_Cost(self, EV, 4) * LOAD_XSNP_MISS(self, EV, 4)) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Contested_Accesses zero division") return self.val desc = """ This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing.""" class Data_Sharing: name = "Data_Sharing" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvMS', 'Offcore', 'Snoop']) maxval = 1.0 def compute(self, EV): try: self.val = Mem_XSNP_Hit_Cost(self, EV, 4) * LOAD_XSNP_HIT(self, EV, 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Data_Sharing zero division") return self.val desc = """ This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance.""" class L3_Hit_Latency: name = "L3_Hit_Latency" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_LOAD_UOPS_RETIRED.L3_HIT:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvML', 'MemoryLat']) maxval = 1.0 def compute(self, EV): try: self.val = Mem_XSNP_None_Cost(self, EV, 4) * LOAD_L3_HIT(self, EV, 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "L3_Hit_Latency zero division") return self.val desc = """ This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings.""" class SQ_Full: name = "SQ_Full" domain = "Clocks" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvMB', 'MemoryBW', 'Offcore']) maxval = None def compute(self, EV): try: self.val = SQ_Full_Cycles(self, EV, 4) / CORE_CLKS(self, EV, 4) self.thresh = (self.val > 0.3) and self.parent.thresh except ZeroDivisionError: handle_error(self, "SQ_Full zero division") return self.val desc = """ This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors).""" class DRAM_Bound: name = "DRAM_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = ['MEM_LOAD_UOPS_RETIRED.L3_MISS:pp'] errcount = 0 sibling = None metricgroup = frozenset(['MemoryBound', 'TmaL3mem']) maxval = 1.0 def compute(self, EV): try: self.val = (1 - Mem_L3_Hit_Fraction(self, EV, 3)) * EV("CYCLE_ACTIVITY.STALLS_L2_PENDING", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "DRAM_Bound zero division") return self.val desc = """ This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance.""" class MEM_Bandwidth: name = "MEM_Bandwidth" domain = "Clocks" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvMB', 'MemoryBW', 'Offcore']) maxval = None def compute(self, EV): try: self.val = ORO_DRD_BW_Cycles(self, EV, 4) / CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "MEM_Bandwidth zero division") return self.val desc = """ This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that).. Improve data accesses to reduce cacheline transfers from/to memory. Examples: 1) Consume all bytes of a each cacheline before it is evicted (e.g. reorder structure elements and split non-hot ones), 2) merge computed-limited with BW-limited loops, 3) NUMA optimizations in multi-socket system. Note: software prefetches will not help BW-limited application..""" class MEM_Latency: name = "MEM_Latency" domain = "Clocks" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvML', 'MemoryLat', 'Offcore']) maxval = None def compute(self, EV): try: self.val = ORO_DRD_Any_Cycles(self, EV, 4) / CLKS(self, EV, 4) - self.MEM_Bandwidth.compute(EV) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "MEM_Latency zero division") return self.val desc = """ This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that).. Improve data accesses or interleave them with compute. Examples: 1) Data layout re-structuring, 2) Software Prefetches (also through the compiler)..""" class Local_MEM: name = "Local_MEM" domain = "Clocks_Estimated" area = "BE/Mem" level = 5 htoff = False sample = ['MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM:pp'] errcount = 0 sibling = None metricgroup = frozenset(['Server']) maxval = 1.0 def compute(self, EV): try: self.val = Mem_Local_DRAM_Cost(self, EV, 5) * LOAD_LCL_MEM(self, EV, 5) / CLKS(self, EV, 5) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Local_MEM zero division") return self.val desc = """ This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory. Caching will improve the latency and increase performance.""" class Remote_MEM: name = "Remote_MEM" domain = "Clocks_Estimated" area = "BE/Mem" level = 5 htoff = False sample = ['MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM:pp'] errcount = 0 sibling = None metricgroup = frozenset(['Server', 'Snoop']) maxval = 1.0 def compute(self, EV): try: self.val = Mem_Remote_DRAM_Cost(self, EV, 5) * LOAD_RMT_MEM(self, EV, 5) / CLKS(self, EV, 5) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Remote_MEM zero division") return self.val desc = """ This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory. This is caused often due to non-optimal NUMA allocations.""" class Remote_Cache: name = "Remote_Cache" domain = "Clocks_Estimated" area = "BE/Mem" level = 5 htoff = False sample = ['MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM:pp', 'MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD:pp'] errcount = 0 sibling = None metricgroup = frozenset(['Offcore', 'Server', 'Snoop']) maxval = 1.0 def compute(self, EV): try: self.val = (Mem_Remote_HitM_Cost(self, EV, 5) * LOAD_RMT_HITM(self, EV, 5) + Mem_Remote_Fwd_Cost(self, EV, 5) * LOAD_RMT_FWD(self, EV, 5)) / CLKS(self, EV, 5) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Remote_Cache zero division") return self.val desc = """ This metric estimates fraction of cycles while the memory subsystem was handling loads from remote cache in other sockets including synchronizations issues. This is caused often due to non-optimal NUMA allocations.""" class Store_Bound: name = "Store_Bound" domain = "Stalls" area = "BE/Mem" level = 3 htoff = False sample = ['MEM_UOPS_RETIRED.ALL_STORES:pp'] errcount = 0 sibling = None metricgroup = frozenset(['MemoryBound', 'TmaL3mem']) maxval = None def compute(self, EV): try: self.val = EV("RESOURCE_STALLS.SB", 3) / CLKS(self, EV, 3) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Store_Bound zero division") return self.val desc = """ This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck.""" class Store_Latency: name = "Store_Latency" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvML', 'MemoryLat', 'Offcore']) maxval = 1.0 def compute(self, EV): try: self.val = (Store_L2_Hit_Cycles(self, EV, 4) + (1 - Mem_Lock_St_Fraction(self, EV, 4)) * ORO_Demand_RFO_C1(self, EV, 4)) / CLKS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Store_Latency zero division") return self.val desc = """ This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Consider to avoid/reduce unnecessary (or easily load-able/computable) memory store.""" class False_Sharing: name = "False_Sharing" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM:pp', 'MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM:pp', 'OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE', 'OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.REMOTE_HITM'] errcount = 0 sibling = None metricgroup = frozenset(['BvMS', 'DataSharing', 'Offcore', 'Snoop']) maxval = 1.0 def compute(self, EV): try: self.val = (Mem_Remote_HitM_Cost(self, EV, 4) * EV("OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.REMOTE_HITM", 4) + Mem_XSNP_HitM_Cost(self, EV, 4) * EV("OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE", 4)) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "False_Sharing zero division") return self.val desc = """ This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. . False Sharing can be easily avoided by padding to make Logical Processors access different lines.""" class Split_Stores: name = "Split_Stores" domain = "Core_Utilization" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_UOPS_RETIRED.SPLIT_STORES:pp'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = 2 * EV("MEM_UOPS_RETIRED.SPLIT_STORES", 4) / CORE_CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Split_Stores zero division") return self.val desc = """ This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity.""" class DTLB_Store: name = "DTLB_Store" domain = "Clocks_Estimated" area = "BE/Mem" level = 4 htoff = False sample = ['MEM_UOPS_RETIRED.STLB_MISS_STORES:pp'] errcount = 0 sibling = None metricgroup = frozenset(['BvMT', 'MemoryTLB']) maxval = 1.0 def compute(self, EV): try: self.val = (Mem_STLB_Hit_Cost * EV("DTLB_STORE_MISSES.STLB_HIT", 4) + EV("DTLB_STORE_MISSES.WALK_DURATION", 4)) / CLKS(self, EV, 4) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "DTLB_Store zero division") return self.val desc = """ This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently- used data.""" class Core_Bound: name = "Core_Bound" domain = "Slots" area = "BE/Core" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Backend', 'TmaL2', 'Compute']) maxval = None def compute(self, EV): try: self.val = self.Backend_Bound.compute(EV) - self.Memory_Bound.compute(EV) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Core_Bound zero division") return self.val desc = """ This metric represents fraction of slots where Core non- memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).. Tip: consider Port Saturation analysis as next step.""" class Divider: name = "Divider" domain = "Clocks" area = "BE/Core" level = 3 htoff = False sample = ['ARITH.DIVIDER_UOPS'] errcount = 0 sibling = None metricgroup = frozenset(['BvCB']) maxval = 1.0 def compute(self, EV): try: self.val = 10 * EV("ARITH.DIVIDER_UOPS", 3) / CORE_CLKS(self, EV, 3) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Divider zero division") return self.val desc = """ This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication.""" class Ports_Utilization: name = "Ports_Utilization" domain = "Clocks" area = "BE/Core" level = 3 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['PortsUtil']) maxval = None def compute(self, EV): try: self.val = (Backend_Bound_Cycles(self, EV, 3) - EV("RESOURCE_STALLS.SB", 3) - STALLS_MEM_ANY(self, EV, 3)) / CLKS(self, EV, 3) self.thresh = (self.val > 0.15) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Ports_Utilization zero division") return self.val desc = """ This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.. Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop.""" class Ports_Utilized_0: name = "Ports_Utilized_0" domain = "Clocks" area = "BE/Core" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['PortsUtil']) maxval = None def compute(self, EV): try: self.val = Cycles_0_Ports_Utilized(self, EV, 4) / CORE_CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Ports_Utilized_0 zero division") return self.val desc = """ This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric.. Check assembly view and Appendix C in Optimization Manual to find out instructions with say 5 or more cycles latency.. http://www.intel.com/content/www/us/en/architecture-and- technology/64-ia-32-architectures-optimization-manual.html""" class Ports_Utilized_1: name = "Ports_Utilized_1" domain = "Clocks" area = "BE/Core" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['PortsUtil']) maxval = None def compute(self, EV): try: self.val = Cycles_1_Port_Utilized(self, EV, 4) / CORE_CLKS(self, EV, 4) self.thresh = (self.val > 0.2) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Ports_Utilized_1 zero division") return self.val desc = """ This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). This can be due to heavy data-dependency among software instructions; or over oversubscribing a particular hardware resource. In some other cases with high 1_Port_Utilized and L1_Bound; this metric can point to L1 data-cache latency bottleneck that may not necessarily manifest with complete execution starvation (due to the short L1 latency e.g. walking a linked list) - looking at the assembly can be helpful.""" class Ports_Utilized_2: name = "Ports_Utilized_2" domain = "Clocks" area = "BE/Core" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['PortsUtil']) maxval = None def compute(self, EV): try: self.val = Cycles_2_Ports_Utilized(self, EV, 4) / CORE_CLKS(self, EV, 4) self.thresh = (self.val > 0.15) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Ports_Utilized_2 zero division") return self.val desc = """ This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto- Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop.""" class Ports_Utilized_3m: name = "Ports_Utilized_3m" domain = "Clocks" area = "BE/Core" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['BvCB', 'PortsUtil']) maxval = None def compute(self, EV): try: self.val = Cycles_3m_Ports_Utilized(self, EV, 4) / CORE_CLKS(self, EV, 4) self.thresh = (self.val > 0.4) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Ports_Utilized_3m zero division") return self.val desc = """ This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise).""" class ALU_Op_Utilization: name = "ALU_Op_Utilization" domain = "Core_Execution" area = "BE/Core" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = (EV("UOPS_DISPATCHED_PORT.PORT_0", 5) + EV("UOPS_DISPATCHED_PORT.PORT_1", 5) + EV("UOPS_DISPATCHED_PORT.PORT_5", 5) + EV("UOPS_DISPATCHED_PORT.PORT_6", 5)) / (4 * CORE_CLKS(self, EV, 5)) self.thresh = (self.val > 0.4) except ZeroDivisionError: handle_error(self, "ALU_Op_Utilization zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.""" class Port_0: name = "Port_0" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED_PORT.PORT_0'] errcount = 0 sibling = None metricgroup = frozenset(['Compute']) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_0", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_0 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ALU and 2nd branch""" class Port_1: name = "Port_1" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED_PORT.PORT_1'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_1", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_1 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU)""" class Port_5: name = "Port_5" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED_PORT.PORT_5'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_5", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_5 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ALU. See section 'Handling Port 5 Pressure' in Optimization Manual:. http://www.intel.com/content/www/us/en/architecture-and- technology/64-ia-32-architectures-optimization-manual.html""" class Port_6: name = "Port_6" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED_PORT.PORT_6'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_6", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_6 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 Primary Branch and simple ALU""" class Load_Op_Utilization: name = "Load_Op_Utilization" domain = "Core_Execution" area = "BE/Core" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = (EV("UOPS_DISPATCHED_PORT.PORT_2", 5) + EV("UOPS_DISPATCHED_PORT.PORT_3", 5) + EV("UOPS_DISPATCHED_PORT.PORT_7", 5) - EV("UOPS_DISPATCHED_PORT.PORT_4", 5)) / (2 * CORE_CLKS(self, EV, 5)) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Load_Op_Utilization zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations""" class Port_2: name = "Port_2" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED_PORT.PORT_2'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_2", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_2 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 2 Loads and Store-address""" class Port_3: name = "Port_3" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED_PORT.PORT_3'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_3", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_3 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 3 Loads and Store-address""" class Store_Op_Utilization: name = "Store_Op_Utilization" domain = "Core_Execution" area = "BE/Core" level = 5 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_4", 5) / CORE_CLKS(self, EV, 5) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Store_Op_Utilization zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port for Store operations""" class Port_4: name = "Port_4" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED_PORT.PORT_4'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_4", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_4 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 4 (Store-data)""" class Port_7: name = "Port_7" domain = "Core_Clocks" area = "BE/Core" level = 6 htoff = False sample = ['UOPS_DISPATCHED_PORT.PORT_7'] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = EV("UOPS_DISPATCHED_PORT.PORT_7", 6) / CORE_CLKS(self, EV, 6) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Port_7 zero division") return self.val desc = """ This metric represents Core fraction of cycles CPU dispatched uops on execution port 7 simple Store-address""" class Retiring: name = "Retiring" domain = "Slots" area = "RET" level = 1 htoff = False sample = ['UOPS_RETIRED.RETIRE_SLOTS'] errcount = 0 sibling = None metricgroup = frozenset(['BvUW', 'TmaL1']) maxval = None def compute(self, EV): try: self.val = Retired_Slots(self, EV, 1) / SLOTS(self, EV, 1) self.thresh = (self.val > 0.7) or self.Heavy_Operations.thresh except ZeroDivisionError: handle_error(self, "Retiring zero division") return self.val desc = """ This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved. Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. . A high Retiring value for non-vectorized code may be a good hint for programmer to consider vectorizing his code. Doing so essentially lets more computations be done without significantly increasing number of instructions thus improving the performance.""" class Light_Operations: name = "Light_Operations" domain = "Slots" area = "RET" level = 2 htoff = False sample = ['INST_RETIRED.PREC_DIST'] errcount = 0 sibling = None metricgroup = frozenset(['Retire', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = self.Retiring.compute(EV) - self.Heavy_Operations.compute(EV) self.thresh = (self.val > 0.6) except ZeroDivisionError: handle_error(self, "Light_Operations zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. . Focus on techniques that reduce instruction count or result in more efficient instructions generation such as vectorization.""" class Heavy_Operations: name = "Heavy_Operations" domain = "Slots" area = "RET" level = 2 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset(['Retire', 'TmaL2']) maxval = None def compute(self, EV): try: self.val = self.Microcode_Sequencer.compute(EV) self.thresh = (self.val > 0.1) except ZeroDivisionError: handle_error(self, "Heavy_Operations zero division") return self.val desc = """ This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.""" class Microcode_Sequencer: name = "Microcode_Sequencer" domain = "Slots" area = "RET" level = 3 htoff = False sample = ['IDQ.MS_UOPS'] errcount = 0 sibling = None metricgroup = frozenset(['MicroSeq']) maxval = None def compute(self, EV): try: self.val = Retire_Fraction(self, EV, 3) * EV("IDQ.MS_UOPS", 3) / SLOTS(self, EV, 3) self.thresh = (self.val > 0.05) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Microcode_Sequencer zero division") return self.val desc = """ This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided..""" class Assists: name = "Assists" domain = "Slots_Estimated" area = "RET" level = 4 htoff = False sample = ['OTHER_ASSISTS.ANY_WB_ASSIST'] errcount = 0 sibling = None metricgroup = frozenset(['BvIO']) maxval = 1.0 def compute(self, EV): try: self.val = Avg_Assist_Cost * EV("OTHER_ASSISTS.ANY_WB_ASSIST", 4) / SLOTS(self, EV, 4) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "Assists zero division") return self.val desc = """ This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases.""" class CISC: name = "CISC" domain = "Slots" area = "RET" level = 4 htoff = False sample = [] errcount = 0 sibling = None metricgroup = frozenset([]) maxval = None def compute(self, EV): try: self.val = max(0 , self.Microcode_Sequencer.compute(EV) - self.Assists.compute(EV)) self.thresh = (self.val > 0.1) and self.parent.thresh except ZeroDivisionError: handle_error(self, "CISC zero division") return self.val desc = """ This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources.""" class Metric_IPC: name = "IPC" domain = "Metric" maxval = Pipeline_Width + 2 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Ret', 'Summary']) sibling = None def compute(self, EV): try: self.val = IPC(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IPC zero division") desc = """ Instructions Per Cycle (per Logical Processor)""" class Metric_UopPI: name = "UopPI" domain = "Metric" maxval = 2.0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Pipeline', 'Ret', 'Retire']) sibling = None def compute(self, EV): try: self.val = UopPI(self, EV, 0) self.thresh = (self.val > 1.05) except ZeroDivisionError: handle_error_metric(self, "UopPI zero division") desc = """ Uops Per Instruction""" class Metric_UpTB: name = "UpTB" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Branches', 'Fed', 'FetchBW']) sibling = None def compute(self, EV): try: self.val = UpTB(self, EV, 0) self.thresh = self.val < Pipeline_Width * 1.5 except ZeroDivisionError: handle_error_metric(self, "UpTB zero division") desc = """ Uops per taken branch""" class Metric_CPI: name = "CPI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Pipeline', 'Mem']) sibling = None def compute(self, EV): try: self.val = CPI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CPI zero division") desc = """ Cycles Per Instruction (per Logical Processor)""" class Metric_CLKS: name = "CLKS" domain = "Count" maxval = 0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['Pipeline']) sibling = None def compute(self, EV): try: self.val = CLKS(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CLKS zero division") desc = """ Per-Logical Processor actual clocks when the Logical Processor is active.""" class Metric_SLOTS: name = "SLOTS" domain = "Count" maxval = 0 errcount = 0 area = "Info.Thread" metricgroup = frozenset(['TmaL1']) sibling = None def compute(self, EV): try: self.val = SLOTS(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "SLOTS zero division") desc = """ Total issue-pipeline slots (per-Physical Core till ICL; per- Logical Processor ICL onward)""" class Metric_CoreIPC: name = "CoreIPC" domain = "Core_Metric" maxval = Pipeline_Width + 2 errcount = 0 area = "Info.Core" metricgroup = frozenset(['Ret', 'SMT', 'TmaL1']) sibling = None def compute(self, EV): try: self.val = CoreIPC(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CoreIPC zero division") desc = """ Instructions Per Cycle across hyper-threads (per physical core)""" class Metric_ILP: name = "ILP" domain = "Metric" maxval = Exe_Ports errcount = 0 area = "Info.Core" metricgroup = frozenset(['Backend', 'Cor', 'Pipeline', 'PortsUtil']) sibling = None def compute(self, EV): try: self.val = ILP(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "ILP zero division") desc = """ Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical- processor)""" class Metric_CORE_CLKS: name = "CORE_CLKS" domain = "Count" maxval = 0 errcount = 0 area = "Info.Core" metricgroup = frozenset(['SMT']) sibling = None def compute(self, EV): try: self.val = CORE_CLKS(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CORE_CLKS zero division") desc = """ Core actual clocks when any Logical Processor is active on the Physical Core""" class Metric_IpLoad: name = "IpLoad" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['InsType']) sibling = None def compute(self, EV): try: self.val = IpLoad(self, EV, 0) self.thresh = (self.val < 3) except ZeroDivisionError: handle_error_metric(self, "IpLoad zero division") desc = """ Instructions per Load (lower number means higher occurrence rate). Tip: reduce memory accesses.""" class Metric_IpStore: name = "IpStore" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['InsType']) sibling = None def compute(self, EV): try: self.val = IpStore(self, EV, 0) self.thresh = (self.val < 8) except ZeroDivisionError: handle_error_metric(self, "IpStore zero division") desc = """ Instructions per Store (lower number means higher occurrence rate). Tip: reduce memory accesses.""" class Metric_IpBranch: name = "IpBranch" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Branches', 'Fed', 'InsType']) sibling = None def compute(self, EV): try: self.val = IpBranch(self, EV, 0) self.thresh = (self.val < 8) except ZeroDivisionError: handle_error_metric(self, "IpBranch zero division") desc = """ Instructions per Branch (lower number means higher occurrence rate)""" class Metric_IpCall: name = "IpCall" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Branches', 'Fed', 'PGO']) sibling = None def compute(self, EV): try: self.val = IpCall(self, EV, 0) self.thresh = (self.val < 200) except ZeroDivisionError: handle_error_metric(self, "IpCall zero division") desc = """ Instructions per (near) call (lower number means higher occurrence rate)""" class Metric_IpTB: name = "IpTB" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Branches', 'Fed', 'FetchBW', 'Frontend', 'PGO']) sibling = None def compute(self, EV): try: self.val = IpTB(self, EV, 0) self.thresh = self.val < Pipeline_Width * 2 + 1 except ZeroDivisionError: handle_error_metric(self, "IpTB zero division") desc = """ Instructions per taken branch""" class Metric_BpTkBranch: name = "BpTkBranch" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Branches', 'Fed', 'PGO']) sibling = None def compute(self, EV): try: self.val = BpTkBranch(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "BpTkBranch zero division") desc = """ Branch instructions per taken branch. . Can be used to approximate PGO-likelihood for non-loopy codes.""" class Metric_Instructions: name = "Instructions" domain = "Count" maxval = 0 errcount = 0 area = "Info.Inst_Mix" metricgroup = frozenset(['Summary', 'TmaL1']) sibling = None def compute(self, EV): try: self.val = Instructions(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Instructions zero division") desc = """ Total number of retired Instructions""" class Metric_Retire: name = "Retire" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Pipeline" metricgroup = frozenset(['Pipeline', 'Ret']) sibling = None def compute(self, EV): try: self.val = Retire(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Retire zero division") desc = """ Average number of Uops retired in cycles where at least one uop has retired.""" class Metric_DSB_Coverage: name = "DSB_Coverage" domain = "Metric" maxval = 1.0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['DSB', 'Fed', 'FetchBW']) sibling = None def compute(self, EV): try: self.val = DSB_Coverage(self, EV, 0) self.thresh = (self.val < 0.7) and HighIPC(self, EV, 1) except ZeroDivisionError: handle_error_metric(self, "DSB_Coverage zero division") desc = """ Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). See section 'Decoded ICache' in Optimization Manual. http://www.intel.com/content/www/us/en/architecture- and-technology/64-ia-32-architectures-optimization- manual.html""" class Metric_IpUnknown_Branch: name = "IpUnknown_Branch" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Frontend" metricgroup = frozenset(['Fed']) sibling = None def compute(self, EV): try: self.val = IpUnknown_Branch(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "IpUnknown_Branch zero division") desc = """ Instructions per speculative Unknown Branch Misprediction (BAClear) (lower number means higher occurrence rate)""" class Metric_IpMispredict: name = "IpMispredict" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Bad_Spec" metricgroup = frozenset(['Bad', 'BadSpec', 'BrMispredicts']) sibling = None def compute(self, EV): try: self.val = IpMispredict(self, EV, 0) self.thresh = (self.val < 200) except ZeroDivisionError: handle_error_metric(self, "IpMispredict zero division") desc = """ Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)""" class Metric_IpMisp_Indirect: name = "IpMisp_Indirect" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.Bad_Spec" metricgroup = frozenset(['Bad', 'BrMispredicts']) sibling = None def compute(self, EV): try: self.val = IpMisp_Indirect(self, EV, 0) self.thresh = (self.val < 1000) except ZeroDivisionError: handle_error_metric(self, "IpMisp_Indirect zero division") desc = """ Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).""" class Metric_Load_Miss_Real_Latency: name = "Load_Miss_Real_Latency" domain = "Clocks_Latency" maxval = 1000 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBound', 'MemoryLat']) sibling = None def compute(self, EV): try: self.val = Load_Miss_Real_Latency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Load_Miss_Real_Latency zero division") desc = """ Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)""" class Metric_MLP: name = "MLP" domain = "Metric" maxval = 10.0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBound', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = MLP(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "MLP zero division") desc = """ Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)""" class Metric_L1MPKI: name = "L1MPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheHits', 'Mem']) sibling = None def compute(self, EV): try: self.val = L1MPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L1MPKI zero division") desc = """ L1 cache true misses per kilo instruction for retired demand loads""" class Metric_L2MPKI: name = "L2MPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'Backend', 'CacheHits']) sibling = None def compute(self, EV): try: self.val = L2MPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2MPKI zero division") desc = """ L2 cache true misses per kilo instruction for retired demand loads""" class Metric_L2MPKI_RFO: name = "L2MPKI_RFO" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['CacheMisses', 'Offcore']) sibling = None def compute(self, EV): try: self.val = L2MPKI_RFO(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2MPKI_RFO zero division") desc = """ Offcore requests (L2 cache miss) per kilo instruction for demand RFOs""" class Metric_L3MPKI: name = "L3MPKI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem']) sibling = None def compute(self, EV): try: self.val = L3MPKI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L3MPKI zero division") desc = """ L3 cache true misses per kilo instruction for retired demand loads""" class Metric_L1D_Cache_Fill_BW: name = "L1D_Cache_Fill_BW" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L1D_Cache_Fill_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L1D_Cache_Fill_BW zero division") desc = """ """ class Metric_L2_Cache_Fill_BW: name = "L2_Cache_Fill_BW" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L2_Cache_Fill_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2_Cache_Fill_BW zero division") desc = """ """ class Metric_L3_Cache_Fill_BW: name = "L3_Cache_Fill_BW" domain = "Metric" maxval = 0 errcount = 0 area = "Info.Memory" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L3_Cache_Fill_BW(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L3_Cache_Fill_BW zero division") desc = """ """ class Metric_Page_Walks_Utilization: name = "Page_Walks_Utilization" domain = "Core_Metric" maxval = 1.0 errcount = 0 area = "Info.Memory.TLB" metricgroup = frozenset(['Mem', 'MemoryTLB']) sibling = None def compute(self, EV): try: self.val = Page_Walks_Utilization(self, EV, 0) self.thresh = (self.val > 0.5) except ZeroDivisionError: handle_error_metric(self, "Page_Walks_Utilization zero division") desc = """ Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses""" class Metric_L1D_Cache_Fill_BW_2T: name = "L1D_Cache_Fill_BW_2T" domain = "Core_Metric" maxval = 0 errcount = 0 area = "Info.Memory.Core" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L1D_Cache_Fill_BW_2T(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L1D_Cache_Fill_BW_2T zero division") desc = """ Average per-core data fill bandwidth to the L1 data cache [GB / sec]""" class Metric_L2_Cache_Fill_BW_2T: name = "L2_Cache_Fill_BW_2T" domain = "Core_Metric" maxval = 0 errcount = 0 area = "Info.Memory.Core" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L2_Cache_Fill_BW_2T(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L2_Cache_Fill_BW_2T zero division") desc = """ Average per-core data fill bandwidth to the L2 cache [GB / sec]""" class Metric_L3_Cache_Fill_BW_2T: name = "L3_Cache_Fill_BW_2T" domain = "Core_Metric" maxval = 0 errcount = 0 area = "Info.Memory.Core" metricgroup = frozenset(['Mem', 'MemoryBW']) sibling = None def compute(self, EV): try: self.val = L3_Cache_Fill_BW_2T(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "L3_Cache_Fill_BW_2T zero division") desc = """ Average per-core data fill bandwidth to the L3 cache [GB / sec]""" class Metric_Load_L2_Miss_Latency: name = "Load_L2_Miss_Latency" domain = "Clocks_Latency" maxval = 1000 errcount = 0 area = "Info.Memory.Latency" metricgroup = frozenset(['Memory_Lat', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Load_L2_Miss_Latency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Load_L2_Miss_Latency zero division") desc = """ Average Latency for L2 cache miss demand Loads""" class Metric_Load_L2_MLP: name = "Load_L2_MLP" domain = "Metric" maxval = 100 errcount = 0 area = "Info.Memory.Latency" metricgroup = frozenset(['Memory_BW', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Load_L2_MLP(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Load_L2_MLP zero division") desc = """ Average Parallel L2 cache miss demand Loads""" class Metric_Data_L2_MLP: name = "Data_L2_MLP" domain = "Metric" maxval = 100 errcount = 0 area = "Info.Memory.Latency" metricgroup = frozenset(['Memory_BW', 'Offcore']) sibling = None def compute(self, EV): try: self.val = Data_L2_MLP(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Data_L2_MLP zero division") desc = """ Average Parallel L2 cache miss data reads""" class Metric_CPU_Utilization: name = "CPU_Utilization" domain = "Metric" maxval = 1 errcount = 0 area = "Info.System" metricgroup = frozenset(['HPC', 'Summary']) sibling = None def compute(self, EV): try: self.val = CPU_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CPU_Utilization zero division") desc = """ Average CPU Utilization (percentage)""" class Metric_CPUs_Utilized: name = "CPUs_Utilized" domain = "Metric" maxval = 300 errcount = 0 area = "Info.System" metricgroup = frozenset(['Summary']) sibling = None def compute(self, EV): try: self.val = CPUs_Utilized(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "CPUs_Utilized zero division") desc = """ Average number of utilized CPUs""" class Metric_Core_Frequency: name = "Core_Frequency" domain = "SystemMetric" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Summary', 'Power']) sibling = None def compute(self, EV): try: self.val = Core_Frequency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Core_Frequency zero division") desc = """ Measured Average Core Frequency for unhalted processors [GHz]""" class Metric_Uncore_Frequency: name = "Uncore_Frequency" domain = "SystemMetric" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['SoC']) sibling = None def compute(self, EV): try: self.val = Uncore_Frequency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Uncore_Frequency zero division") desc = """ Measured Average Uncore Frequency for the SoC [GHz]""" class Metric_Turbo_Utilization: name = "Turbo_Utilization" domain = "Core_Metric" maxval = 10.0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Power']) sibling = None def compute(self, EV): try: self.val = Turbo_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Turbo_Utilization zero division") desc = """ Average Frequency Utilization relative nominal frequency""" class Metric_SMT_2T_Utilization: name = "SMT_2T_Utilization" domain = "Core_Metric" maxval = 1.0 errcount = 0 area = "Info.System" metricgroup = frozenset(['SMT']) sibling = None def compute(self, EV): try: self.val = SMT_2T_Utilization(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "SMT_2T_Utilization zero division") desc = """ Fraction of cycles where both hardware Logical Processors were active""" class Metric_Kernel_Utilization: name = "Kernel_Utilization" domain = "Metric" maxval = 1.0 errcount = 0 area = "Info.System" metricgroup = frozenset(['OS']) sibling = None def compute(self, EV): try: self.val = Kernel_Utilization(self, EV, 0) self.thresh = (self.val > 0.05) except ZeroDivisionError: handle_error_metric(self, "Kernel_Utilization zero division") desc = """ Fraction of cycles spent in the Operating System (OS) Kernel mode""" class Metric_Kernel_CPI: name = "Kernel_CPI" domain = "Metric" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['OS']) sibling = None def compute(self, EV): try: self.val = Kernel_CPI(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Kernel_CPI zero division") desc = """ Cycles Per Instruction for the Operating System (OS) Kernel mode""" class Metric_DRAM_BW_Use: name = "DRAM_BW_Use" domain = "GB/sec" maxval = 200 errcount = 0 area = "Info.System" metricgroup = frozenset(['HPC', 'MemOffcore', 'MemoryBW', 'SoC']) sibling = None def compute(self, EV): try: self.val = DRAM_BW_Use(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "DRAM_BW_Use zero division") desc = """ Average external Memory Bandwidth Use for reads and writes [GB / sec]""" class Metric_MEM_Read_Latency: name = "MEM_Read_Latency" domain = "NanoSeconds" maxval = 1000 errcount = 0 area = "Info.System" metricgroup = frozenset(['Mem', 'MemoryLat', 'SoC']) sibling = None def compute(self, EV): try: self.val = MEM_Read_Latency(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "MEM_Read_Latency zero division") desc = """ Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches.""" class Metric_MEM_Parallel_Reads: name = "MEM_Parallel_Reads" domain = "SystemMetric" maxval = 100 errcount = 0 area = "Info.System" metricgroup = frozenset(['Mem', 'MemoryBW', 'SoC']) sibling = None def compute(self, EV): try: self.val = MEM_Parallel_Reads(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "MEM_Parallel_Reads zero division") desc = """ Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches""" class Metric_Time: name = "Time" domain = "Seconds" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Summary']) sibling = None def compute(self, EV): try: self.val = Time(self, EV, 0) self.thresh = (self.val < 1) except ZeroDivisionError: handle_error_metric(self, "Time zero division") desc = """ Run duration time in seconds""" class Metric_Socket_CLKS: name = "Socket_CLKS" domain = "Count" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['SoC']) sibling = None def compute(self, EV): try: self.val = Socket_CLKS(self, EV, 0) self.thresh = True except ZeroDivisionError: handle_error_metric(self, "Socket_CLKS zero division") desc = """ Socket actual clocks when any core is active on that socket""" class Metric_IpFarBranch: name = "IpFarBranch" domain = "Inst_Metric" maxval = 0 errcount = 0 area = "Info.System" metricgroup = frozenset(['Branches', 'OS']) sibling = None def compute(self, EV): try: self.val = IpFarBranch(self, EV, 0) self.thresh = (self.val < 1000000) except ZeroDivisionError: handle_error_metric(self, "IpFarBranch zero division") desc = """ Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]""" # Schedule class Setup: def __init__(self, r): o = dict() n = Frontend_Bound() ; r.run(n) ; o["Frontend_Bound"] = n n = Fetch_Latency() ; r.run(n) ; o["Fetch_Latency"] = n n = ICache_Misses() ; r.run(n) ; o["ICache_Misses"] = n n = ITLB_Misses() ; r.run(n) ; o["ITLB_Misses"] = n n = Branch_Resteers() ; r.run(n) ; o["Branch_Resteers"] = n n = MS_Switches() ; r.run(n) ; o["MS_Switches"] = n n = LCP() ; r.run(n) ; o["LCP"] = n n = DSB_Switches() ; r.run(n) ; o["DSB_Switches"] = n n = Fetch_Bandwidth() ; r.run(n) ; o["Fetch_Bandwidth"] = n n = MITE() ; r.run(n) ; o["MITE"] = n n = DSB() ; r.run(n) ; o["DSB"] = n n = Bad_Speculation() ; r.run(n) ; o["Bad_Speculation"] = n n = Branch_Mispredicts() ; r.run(n) ; o["Branch_Mispredicts"] = n n = Machine_Clears() ; r.run(n) ; o["Machine_Clears"] = n n = Backend_Bound() ; r.run(n) ; o["Backend_Bound"] = n n = Memory_Bound() ; r.run(n) ; o["Memory_Bound"] = n n = L1_Bound() ; r.run(n) ; o["L1_Bound"] = n n = DTLB_Load() ; r.run(n) ; o["DTLB_Load"] = n n = Store_Fwd_Blk() ; r.run(n) ; o["Store_Fwd_Blk"] = n n = Lock_Latency() ; r.run(n) ; o["Lock_Latency"] = n n = Split_Loads() ; r.run(n) ; o["Split_Loads"] = n n = G4K_Aliasing() ; r.run(n) ; o["G4K_Aliasing"] = n n = FB_Full() ; r.run(n) ; o["FB_Full"] = n n = L2_Bound() ; r.run(n) ; o["L2_Bound"] = n n = L3_Bound() ; r.run(n) ; o["L3_Bound"] = n n = Contested_Accesses() ; r.run(n) ; o["Contested_Accesses"] = n n = Data_Sharing() ; r.run(n) ; o["Data_Sharing"] = n n = L3_Hit_Latency() ; r.run(n) ; o["L3_Hit_Latency"] = n n = SQ_Full() ; r.run(n) ; o["SQ_Full"] = n n = DRAM_Bound() ; r.run(n) ; o["DRAM_Bound"] = n n = MEM_Bandwidth() ; r.run(n) ; o["MEM_Bandwidth"] = n n = MEM_Latency() ; r.run(n) ; o["MEM_Latency"] = n n = Local_MEM() ; r.run(n) ; o["Local_MEM"] = n n = Remote_MEM() ; r.run(n) ; o["Remote_MEM"] = n n = Remote_Cache() ; r.run(n) ; o["Remote_Cache"] = n n = Store_Bound() ; r.run(n) ; o["Store_Bound"] = n n = Store_Latency() ; r.run(n) ; o["Store_Latency"] = n n = False_Sharing() ; r.run(n) ; o["False_Sharing"] = n n = Split_Stores() ; r.run(n) ; o["Split_Stores"] = n n = DTLB_Store() ; r.run(n) ; o["DTLB_Store"] = n n = Core_Bound() ; r.run(n) ; o["Core_Bound"] = n n = Divider() ; r.run(n) ; o["Divider"] = n n = Ports_Utilization() ; r.run(n) ; o["Ports_Utilization"] = n n = Ports_Utilized_0() ; r.run(n) ; o["Ports_Utilized_0"] = n n = Ports_Utilized_1() ; r.run(n) ; o["Ports_Utilized_1"] = n n = Ports_Utilized_2() ; r.run(n) ; o["Ports_Utilized_2"] = n n = Ports_Utilized_3m() ; r.run(n) ; o["Ports_Utilized_3m"] = n n = ALU_Op_Utilization() ; r.run(n) ; o["ALU_Op_Utilization"] = n n = Port_0() ; r.run(n) ; o["Port_0"] = n n = Port_1() ; r.run(n) ; o["Port_1"] = n n = Port_5() ; r.run(n) ; o["Port_5"] = n n = Port_6() ; r.run(n) ; o["Port_6"] = n n = Load_Op_Utilization() ; r.run(n) ; o["Load_Op_Utilization"] = n n = Port_2() ; r.run(n) ; o["Port_2"] = n n = Port_3() ; r.run(n) ; o["Port_3"] = n n = Store_Op_Utilization() ; r.run(n) ; o["Store_Op_Utilization"] = n n = Port_4() ; r.run(n) ; o["Port_4"] = n n = Port_7() ; r.run(n) ; o["Port_7"] = n n = Retiring() ; r.run(n) ; o["Retiring"] = n n = Light_Operations() ; r.run(n) ; o["Light_Operations"] = n n = Heavy_Operations() ; r.run(n) ; o["Heavy_Operations"] = n n = Microcode_Sequencer() ; r.run(n) ; o["Microcode_Sequencer"] = n n = Assists() ; r.run(n) ; o["Assists"] = n n = CISC() ; r.run(n) ; o["CISC"] = n # parents o["Fetch_Latency"].parent = o["Frontend_Bound"] o["ICache_Misses"].parent = o["Fetch_Latency"] o["ITLB_Misses"].parent = o["Fetch_Latency"] o["Branch_Resteers"].parent = o["Fetch_Latency"] o["MS_Switches"].parent = o["Fetch_Latency"] o["LCP"].parent = o["Fetch_Latency"] o["DSB_Switches"].parent = o["Fetch_Latency"] o["Fetch_Bandwidth"].parent = o["Frontend_Bound"] o["MITE"].parent = o["Fetch_Bandwidth"] o["DSB"].parent = o["Fetch_Bandwidth"] o["Branch_Mispredicts"].parent = o["Bad_Speculation"] o["Machine_Clears"].parent = o["Bad_Speculation"] o["Memory_Bound"].parent = o["Backend_Bound"] o["L1_Bound"].parent = o["Memory_Bound"] o["DTLB_Load"].parent = o["L1_Bound"] o["Store_Fwd_Blk"].parent = o["L1_Bound"] o["Lock_Latency"].parent = o["L1_Bound"] o["Split_Loads"].parent = o["L1_Bound"] o["G4K_Aliasing"].parent = o["L1_Bound"] o["FB_Full"].parent = o["L1_Bound"] o["L2_Bound"].parent = o["Memory_Bound"] o["L3_Bound"].parent = o["Memory_Bound"] o["Contested_Accesses"].parent = o["L3_Bound"] o["Data_Sharing"].parent = o["L3_Bound"] o["L3_Hit_Latency"].parent = o["L3_Bound"] o["SQ_Full"].parent = o["L3_Bound"] o["DRAM_Bound"].parent = o["Memory_Bound"] o["MEM_Bandwidth"].parent = o["DRAM_Bound"] o["MEM_Latency"].parent = o["DRAM_Bound"] o["Local_MEM"].parent = o["MEM_Latency"] o["Remote_MEM"].parent = o["MEM_Latency"] o["Remote_Cache"].parent = o["MEM_Latency"] o["Store_Bound"].parent = o["Memory_Bound"] o["Store_Latency"].parent = o["Store_Bound"] o["False_Sharing"].parent = o["Store_Bound"] o["Split_Stores"].parent = o["Store_Bound"] o["DTLB_Store"].parent = o["Store_Bound"] o["Core_Bound"].parent = o["Backend_Bound"] o["Divider"].parent = o["Core_Bound"] o["Ports_Utilization"].parent = o["Core_Bound"] o["Ports_Utilized_0"].parent = o["Ports_Utilization"] o["Ports_Utilized_1"].parent = o["Ports_Utilization"] o["Ports_Utilized_2"].parent = o["Ports_Utilization"] o["Ports_Utilized_3m"].parent = o["Ports_Utilization"] o["ALU_Op_Utilization"].parent = o["Ports_Utilized_3m"] o["Port_0"].parent = o["ALU_Op_Utilization"] o["Port_1"].parent = o["ALU_Op_Utilization"] o["Port_5"].parent = o["ALU_Op_Utilization"] o["Port_6"].parent = o["ALU_Op_Utilization"] o["Load_Op_Utilization"].parent = o["Ports_Utilized_3m"] o["Port_2"].parent = o["Load_Op_Utilization"] o["Port_3"].parent = o["Load_Op_Utilization"] o["Store_Op_Utilization"].parent = o["Ports_Utilized_3m"] o["Port_4"].parent = o["Store_Op_Utilization"] o["Port_7"].parent = o["Store_Op_Utilization"] o["Light_Operations"].parent = o["Retiring"] o["Heavy_Operations"].parent = o["Retiring"] o["Microcode_Sequencer"].parent = o["Heavy_Operations"] o["Assists"].parent = o["Microcode_Sequencer"] o["CISC"].parent = o["Microcode_Sequencer"] # user visible metrics n = Metric_IPC() ; r.metric(n) ; o["IPC"] = n n = Metric_UopPI() ; r.metric(n) ; o["UopPI"] = n n = Metric_UpTB() ; r.metric(n) ; o["UpTB"] = n n = Metric_CPI() ; r.metric(n) ; o["CPI"] = n n = Metric_CLKS() ; r.metric(n) ; o["CLKS"] = n n = Metric_SLOTS() ; r.metric(n) ; o["SLOTS"] = n n = Metric_CoreIPC() ; r.metric(n) ; o["CoreIPC"] = n n = Metric_ILP() ; r.metric(n) ; o["ILP"] = n n = Metric_CORE_CLKS() ; r.metric(n) ; o["CORE_CLKS"] = n n = Metric_IpLoad() ; r.metric(n) ; o["IpLoad"] = n n = Metric_IpStore() ; r.metric(n) ; o["IpStore"] = n n = Metric_IpBranch() ; r.metric(n) ; o["IpBranch"] = n n = Metric_IpCall() ; r.metric(n) ; o["IpCall"] = n n = Metric_IpTB() ; r.metric(n) ; o["IpTB"] = n n = Metric_BpTkBranch() ; r.metric(n) ; o["BpTkBranch"] = n n = Metric_Instructions() ; r.metric(n) ; o["Instructions"] = n n = Metric_Retire() ; r.metric(n) ; o["Retire"] = n n = Metric_DSB_Coverage() ; r.metric(n) ; o["DSB_Coverage"] = n n = Metric_IpUnknown_Branch() ; r.metric(n) ; o["IpUnknown_Branch"] = n n = Metric_IpMispredict() ; r.metric(n) ; o["IpMispredict"] = n n = Metric_IpMisp_Indirect() ; r.metric(n) ; o["IpMisp_Indirect"] = n n = Metric_Load_Miss_Real_Latency() ; r.metric(n) ; o["Load_Miss_Real_Latency"] = n n = Metric_MLP() ; r.metric(n) ; o["MLP"] = n n = Metric_L1MPKI() ; r.metric(n) ; o["L1MPKI"] = n n = Metric_L2MPKI() ; r.metric(n) ; o["L2MPKI"] = n n = Metric_L2MPKI_RFO() ; r.metric(n) ; o["L2MPKI_RFO"] = n n = Metric_L3MPKI() ; r.metric(n) ; o["L3MPKI"] = n n = Metric_L1D_Cache_Fill_BW() ; r.metric(n) ; o["L1D_Cache_Fill_BW"] = n n = Metric_L2_Cache_Fill_BW() ; r.metric(n) ; o["L2_Cache_Fill_BW"] = n n = Metric_L3_Cache_Fill_BW() ; r.metric(n) ; o["L3_Cache_Fill_BW"] = n n = Metric_Page_Walks_Utilization() ; r.metric(n) ; o["Page_Walks_Utilization"] = n n = Metric_L1D_Cache_Fill_BW_2T() ; r.metric(n) ; o["L1D_Cache_Fill_BW_2T"] = n n = Metric_L2_Cache_Fill_BW_2T() ; r.metric(n) ; o["L2_Cache_Fill_BW_2T"] = n n = Metric_L3_Cache_Fill_BW_2T() ; r.metric(n) ; o["L3_Cache_Fill_BW_2T"] = n n = Metric_Load_L2_Miss_Latency() ; r.metric(n) ; o["Load_L2_Miss_Latency"] = n n = Metric_Load_L2_MLP() ; r.metric(n) ; o["Load_L2_MLP"] = n n = Metric_Data_L2_MLP() ; r.metric(n) ; o["Data_L2_MLP"] = n n = Metric_CPU_Utilization() ; r.metric(n) ; o["CPU_Utilization"] = n n = Metric_CPUs_Utilized() ; r.metric(n) ; o["CPUs_Utilized"] = n n = Metric_Core_Frequency() ; r.metric(n) ; o["Core_Frequency"] = n n = Metric_Uncore_Frequency() ; r.metric(n) ; o["Uncore_Frequency"] = n n = Metric_Turbo_Utilization() ; r.metric(n) ; o["Turbo_Utilization"] = n n = Metric_SMT_2T_Utilization() ; r.metric(n) ; o["SMT_2T_Utilization"] = n n = Metric_Kernel_Utilization() ; r.metric(n) ; o["Kernel_Utilization"] = n n = Metric_Kernel_CPI() ; r.metric(n) ; o["Kernel_CPI"] = n n = Metric_DRAM_BW_Use() ; r.metric(n) ; o["DRAM_BW_Use"] = n n = Metric_MEM_Read_Latency() ; r.metric(n) ; o["MEM_Read_Latency"] = n n = Metric_MEM_Parallel_Reads() ; r.metric(n) ; o["MEM_Parallel_Reads"] = n n = Metric_Time() ; r.metric(n) ; o["Time"] = n n = Metric_Socket_CLKS() ; r.metric(n) ; o["Socket_CLKS"] = n n = Metric_IpFarBranch() ; r.metric(n) ; o["IpFarBranch"] = n # references between groups o["Fetch_Bandwidth"].Frontend_Bound = o["Frontend_Bound"] o["Fetch_Bandwidth"].Fetch_Latency = o["Fetch_Latency"] o["Branch_Mispredicts"].Bad_Speculation = o["Bad_Speculation"] o["Machine_Clears"].Bad_Speculation = o["Bad_Speculation"] o["Machine_Clears"].Branch_Mispredicts = o["Branch_Mispredicts"] o["Backend_Bound"].Retiring = o["Retiring"] o["Backend_Bound"].Bad_Speculation = o["Bad_Speculation"] o["Backend_Bound"].Frontend_Bound = o["Frontend_Bound"] o["Memory_Bound"].Retiring = o["Retiring"] o["Memory_Bound"].Bad_Speculation = o["Bad_Speculation"] o["Memory_Bound"].Frontend_Bound = o["Frontend_Bound"] o["Memory_Bound"].Backend_Bound = o["Backend_Bound"] o["Memory_Bound"].Fetch_Latency = o["Fetch_Latency"] o["MEM_Latency"].MEM_Bandwidth = o["MEM_Bandwidth"] o["Core_Bound"].Retiring = o["Retiring"] o["Core_Bound"].Frontend_Bound = o["Frontend_Bound"] o["Core_Bound"].Memory_Bound = o["Memory_Bound"] o["Core_Bound"].Backend_Bound = o["Backend_Bound"] o["Core_Bound"].Bad_Speculation = o["Bad_Speculation"] o["Core_Bound"].Fetch_Latency = o["Fetch_Latency"] o["Ports_Utilization"].Fetch_Latency = o["Fetch_Latency"] o["Ports_Utilized_0"].Fetch_Latency = o["Fetch_Latency"] o["Retiring"].Heavy_Operations = o["Heavy_Operations"] o["Light_Operations"].Retiring = o["Retiring"] o["Light_Operations"].Heavy_Operations = o["Heavy_Operations"] o["Light_Operations"].Microcode_Sequencer = o["Microcode_Sequencer"] o["Heavy_Operations"].Microcode_Sequencer = o["Microcode_Sequencer"] o["CISC"].Microcode_Sequencer = o["Microcode_Sequencer"] o["CISC"].Assists = o["Assists"] # siblings cross-tree o["MS_Switches"].sibling = (o["Machine_Clears"], o["L1_Bound"], o["Microcode_Sequencer"],) o["LCP"].sibling = (o["DSB_Switches"], o["Fetch_Bandwidth"],) o["DSB_Switches"].sibling = (o["LCP"], o["Fetch_Bandwidth"],) o["Fetch_Bandwidth"].sibling = (o["LCP"], o["DSB_Switches"],) o["Machine_Clears"].sibling = (o["MS_Switches"], o["L1_Bound"], o["Contested_Accesses"], o["Data_Sharing"], o["Remote_Cache"], o["False_Sharing"], o["Microcode_Sequencer"],) o["L1_Bound"].sibling = (o["MS_Switches"], o["Machine_Clears"], o["Ports_Utilized_1"], o["Microcode_Sequencer"],) o["DTLB_Load"].sibling = (o["DTLB_Store"],) o["Lock_Latency"].sibling = (o["Store_Latency"],) o["FB_Full"].sibling = (o["SQ_Full"], o["MEM_Bandwidth"], o["Store_Latency"],) o["Contested_Accesses"].sibling = (o["Machine_Clears"], o["Data_Sharing"], o["Remote_Cache"], o["False_Sharing"],) o["Data_Sharing"].sibling = (o["Machine_Clears"], o["Contested_Accesses"], o["Remote_Cache"], o["False_Sharing"],) o["L3_Hit_Latency"].sibling = (o["MEM_Latency"],) o["L3_Hit_Latency"].overlap = True o["SQ_Full"].sibling = (o["FB_Full"], o["MEM_Bandwidth"],) o["MEM_Bandwidth"].sibling = (o["FB_Full"], o["SQ_Full"],) o["MEM_Latency"].sibling = (o["L3_Hit_Latency"],) o["Remote_Cache"].sibling = (o["Machine_Clears"], o["Contested_Accesses"], o["Data_Sharing"], o["False_Sharing"],) o["Store_Latency"].sibling = (o["Lock_Latency"], o["FB_Full"],) o["Store_Latency"].overlap = True o["False_Sharing"].sibling = (o["Machine_Clears"], o["Contested_Accesses"], o["Data_Sharing"], o["Remote_Cache"],) o["Split_Stores"].sibling = (o["Port_4"],) o["DTLB_Store"].sibling = (o["DTLB_Load"],) o["Ports_Utilized_1"].sibling = (o["L1_Bound"],) o["Ports_Utilized_2"].sibling = (o["Port_0"], o["Port_1"], o["Port_5"], o["Port_6"],) o["Port_0"].sibling = (o["Ports_Utilized_2"], o["Port_1"], o["Port_5"], o["Port_6"],) o["Port_1"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_5"], o["Port_6"],) o["Port_5"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_6"],) o["Port_6"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"],) o["Port_4"].sibling = (o["Split_Stores"],) o["Microcode_Sequencer"].sibling = (o["MS_Switches"], o["Machine_Clears"], o["L1_Bound"],) o["IpTB"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],) o["DSB_Coverage"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],) o["DRAM_BW_Use"].sibling = (o["FB_Full"], o["SQ_Full"], o["MEM_Bandwidth"],)
123,862
Python
.py
3,072
34.670247
423
0.657931
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,923
pmumon.py
andikleen_pmu-tools/pmumon.py
#!/usr/bin/env python3 # standalone simple pmu configuration tool # allows to count an even without using perf # will conflict with any parallel perf (and other profiler) # usage. # Author: Andi Kleen # from __future__ import print_function import os import struct import sys def writemsr(msr, val, cpu): f = os.open('/dev/cpu/%d/msr' % (cpu,), os.O_WRONLY) os.lseek(f, msr, os.SEEK_SET) os.write(f, struct.pack('Q', val)) os.close(f) def readmsr(msr, cpu): f = os.open('/dev/cpu/%d/msr' % (cpu,), os.O_RDONLY) os.lseek(f, msr, os.SEEK_SET) val = struct.unpack('Q', os.read(f, 8))[0] os.close(f) return val if len(sys.argv) != 3 and len(sys.argv) != 2: print("Usage: pmumon cpu [event]") print("When no event is specified read+clear event on cpu, otherwise start it") print("event == 0 clears. event is in hex") print("perf/oprofile/etc. must not be active. no parallel users") sys.exit(1) MSR_EVNTSEL = 0x186 + 1 MSR_PERFCTR = 0xc1 + 1 cpu = int(sys.argv[1]) if len(sys.argv) > 2: event = int(sys.argv[2], 16) writemsr(MSR_EVNTSEL, 0, cpu) # disable first writemsr(MSR_PERFCTR, 0, cpu) writemsr(MSR_EVNTSEL, event, cpu) #print("global status %x" % (readmsr(0x38f, cpu),)) else: print("%x = %d" % (readmsr(MSR_EVNTSEL, cpu), readmsr(MSR_PERFCTR, cpu),))
1,339
Python
.py
39
31.153846
83
0.664093
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,924
pmudef.py
andikleen_pmu-tools/pmudef.py
EVENTSEL_EVENT = 0x00ff EVENTSEL_UMASK = 0xff00 EVENTSEL_EDGE = 1<<18 EVENTSEL_PC = 1<<19 EVENTSEL_ANY = 1<<21 EVENTSEL_INV = 1<<23 EVENTSEL_CMASK = 0xff000000 EVMASK = (EVENTSEL_EVENT | EVENTSEL_UMASK | EVENTSEL_EDGE | EVENTSEL_PC | EVENTSEL_ANY | EVENTSEL_INV | EVENTSEL_CMASK) EVENTSEL_ENABLE = 1<<22 MSR_EVNTSEL = 0x186 MSR_IA32_FIXED_CTR_CTRL = 0x38d MSR_PEBS_ENABLE = 0x3f1 MSR_PERFCTR = 0xc1 MSR_PMC = 0x4c1 MSR_FIXED_CTR = 0x309 MSR_FIXED_CTR_CTL = 0x38d MSR_GLOBAL_STATUS = 0x38e MSR_GLOBAL_CTRL = 0x38f MSR_GLOBAL_OVF_CTRL = 0x390 extra_flags = ( (EVENTSEL_EDGE, "edge"), (EVENTSEL_PC, "pc"), (EVENTSEL_ANY, "any"), (EVENTSEL_INV, "inv"), (EVENTSEL_CMASK, "cmask"))
738
Python
.py
26
25.307692
88
0.682203
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,925
event-translate.py
andikleen_pmu-tools/event-translate.py
#!/usr/bin/env python3 # translate raw events to names # event-translate rXXX ... from __future__ import print_function import re import sys import ocperf from pmudef import EVMASK, EVENTSEL_EVENT, EVENTSEL_UMASK, extra_flags emap = ocperf.find_emap() if not emap: sys.exit("Unknown CPU or cannot find event table") for j in sys.argv[1:]: m = re.match(r'r([0-9a-f]+)(:.*)?', j) if m: print(m.group(1)) evsel = int(m.group(1), 16) print("%s:" % (j)) if evsel & EVMASK in emap.codes: print(emap.codes[evsel & EVMASK].name) elif (evsel & (EVENTSEL_EVENT|EVENTSEL_UMASK)) in emap.codes: print(emap.codes[evsel & (EVENTSEL_EVENT|EVENTSEL_UMASK)].name, end='') for k in extra_flags: if evsel & k[0]: m = k[0] en = evsel while (m & 1) == 0: m >>= 1 en >>= 1 print("%s=%d" % (k[1], en & m),end='') print() else: print("cannot find", m.group(1)) else: # XXX implement offcore new style events print("cannot parse", j)
1,194
Python
.py
35
24.971429
83
0.525043
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,926
plot-normalized.py
andikleen_pmu-tools/plot-normalized.py
#!/usr/bin/env python3 # plot already normalized data # first column is time stamp import sys import argparse import os import csv import matplotlib if os.getenv("DISPLAY") is None: matplotlib.use('Agg') import matplotlib.pyplot as plt ap = argparse.ArgumentParser(usage='Plot already normalized CSV data') ap.add_argument('--output', '-o', help='Output to file. Otherwise show.', nargs='?') ap.add_argument('inf', nargs='?', default=sys.stdin, type=argparse.FileType('r'), help='input CSV file') args = ap.parse_args() inf = args.inf rc = csv.reader(inf) num = 0 timestamps = [] columns = {} for r in rc: num += 1 if num == 1: for j in r[1:]: columns[j] = [] continue timestamps.append(r[0]) c = 1 for j in columns: try: columns[j].append(float(r[c])) except ValueError: columns[j].append(float('nan')) c += 1 for j in columns: plt.plot(timestamps, columns[j], label=j) leg = plt.legend() leg.get_frame().set_alpha(0.5) if args.output: plt.savefig(args.output) else: plt.show()
1,129
Python
.py
44
21.25
81
0.637627
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,927
node.py
andikleen_pmu-tools/node.py
# Helper classes and functions for nodes # Decorator class to declare reference dependecies between classes class requires(object): """Decorator to mark required references. These references will be added to the object as instance attributes. Example: @requires("ref1", "ref2") class SomeClass(object): def some_method(self): return self.ref1 + self.ref2 """ def __init__(self, *required_refs): self.required_refs = required_refs def __call__(self, cls): setattr(cls, "required_refs", self.required_refs) return cls def set_parent(parent, nodes): for node in nodes: node.parent = parent # Check that all required references are set def check_refs(fn): """Decorator to check if required references for an object are set. If it finds missing references, it will raise an exception. Example: @requires("retiring", "bad_speculation", "frontend_bound") class BackendBound(object): @check_refs def _compute(self, ev): # checks if required refs are set before executing """ def wrapped(self, *args, **kwargs): if not hasattr(self, "required_refs"): raise Exception("Missing required_refs object") missing_refs = [ref for ref in self.required_refs if not hasattr(self, ref)] if missing_refs: raise Exception("Missing references: {0}".format(missing_refs)) return fn(self, *args, **kwargs) wrapped.__name__ = fn.__name__ return wrapped def add_references(node, **refs): """Adds an attribute to node, as specified in the **refs argument. Example: ... backend = BackendBound() add_references(backend, retiring=retiring, frontend_bound=frontend, bad_speculation=bad_speculation) """ for name, obj in refs.items(): setattr(node, name, obj)
1,923
Python
.py
49
32.204082
75
0.656099
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,928
perf_metrics.py
andikleen_pmu-tools/perf_metrics.py
class CPU_Utilization: name = "CPU utilization" desc = """ Number of CPUs used. The top down CPU metrics are only meaningful when a CPU thread is executing. The percentage are always relative to the executing time. When the utilization is low the workload may actually not be CPU bound, but IO (network, block) IO bound instead. Check the scheduler and IO metrics below. Or it may be CPU bound, but not use enough parallelism, if the number of CPUs is less than the number of cores.""" nogroup = True subplot = "CPU Utilization" unit = "CPUs" def compute(self, EV): try: # interval-ns is not a perf event, but handled by toplev internally. self.val = (EV("task-clock", 1) * 1e6) / EV("interval-ns", 1) except ZeroDivisionError: self.val = 0 class MUX: name = "MUX" desc = """ PerfMon Event Multiplexing accuracy indicator""" unit = "%" maxval = 100.0 errcount = 0 def compute(self, EV): self.val = EV("mux", 0) self.thresh = 0 < self.val < 100.0 class Setup: def __init__(self, r): #r.force_metric(CPU_Utilization()) r.force_metric(MUX())
1,179
Python
.py
33
30.515152
80
0.657043
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,929
slm_ratios.py
andikleen_pmu-tools/slm_ratios.py
# # Silvermont top level model # Can be collected without multiplexing # Please see http://ark.intel.com for more details on these CPUs. # from __future__ import print_function import metrics import node print_error = lambda msg: False version = "1.0" # Override using set_clks_event_name() CLKS_EVENT_NAME = "CPU_CLK_UNHALTED.CORE" # Module-level function used to work around event name differences, # e.g. Knights Landing def set_clks_event_name(ev_name): global CLKS_EVENT_NAME CLKS_EVENT_NAME = ev_name # Instructions Per Cycle def IPC(EV, level): return EV("INST_RETIRED.ANY", level) / EV("cycles", 1) # Average Frequency Utilization relative nominal frequency def TurboUtilization(EV, level): return EV("cycles", level) / EV("CPU_CLK_UNHALTED.REF_TSC", level) def DurationTimeInSeconds(EV, level): return EV("interval-ns", 0) / 1e+06 / 1000 # Run duration time in seconds def Time(EV, level): return DurationTimeInSeconds(EV, level) # Per-thread actual clocks def CLKS(EV, level): return EV(CLKS_EVENT_NAME, level) # Cycles Per Instruction (threaded) def CPI(EV, level): return 1 / IPC(EV, level) def icache_line_fetch_cost(ev, level): return ev("FETCH_STALL.ICACHE_FILL_PENDING_CYCLES", level) / \ CLKS(ev, level) def predecode_wrong_cost(ev, level): return (ev("DECODE_RESTRICTION.PREDECODE_WRONG", level) * 3 / CLKS(ev, level)) def ba_clears_cost(ev, level): return ev("BACLEARS.ALL", level) * 5 / CLKS(ev, level) def ms_entry_cost(ev, level): return ev("MS_DECODED.MS_ENTRY", level) * 5 / CLKS(ev, level) def itlb_misses_cost(ev, level): return ev("PAGE_WALKS.I_SIDE_CYCLES", level) / CLKS(ev, level) # LEVEL 0, user-visible metrics" class CyclesPerUop(metrics.MetricBase): name = "CyclesPerUop" domain = "Metric" desc = "\nCycles per uop." def _compute(self, ev): return ev(CLKS_EVENT_NAME, self.level) / \ ev("UOPS_RETIRED.ALL", self.level) # LEVEL 1 class FrontendBound(metrics.FrontendBound): def _compute(self, ev): return ev("NO_ALLOC_CYCLES.NOT_DELIVERED", 1) / CLKS(ev, self.level) @node.requires("retiring", "bad_speculation", "frontend") class BackendBound(metrics.BackendBound): @node.check_refs def _compute(self, ev): return 1 - (self.retiring.compute(ev) + self.bad_speculation.compute(ev) + self.frontend.compute(ev)) class BadSpeculation(metrics.BadSpeculation): def _compute(self, ev): return ev("NO_ALLOC_CYCLES.MISPREDICTS", 1) / CLKS(ev, self.level) class Retiring(metrics.Retiring): def _compute(self, ev): return ev("UOPS_RETIRED.ALL", 1) / (2 * CLKS(ev, self.level)) # LEVEL 2 @node.requires("icache_misses", "itlb", "ms_cost", "frontend") class FrontendLatency(metrics.FrontendLatency): @node.check_refs def _compute(self, ev): return (self.icache_misses.compute(ev) + self.itlb.compute(ev) + self.ms_cost.compute(ev) + ba_clears_cost(ev, self.level) ) / CLKS(ev, self.level) # LEVEL 3 class ICacheMisses(metrics.ICacheMisses): def _compute(self, ev): return (icache_line_fetch_cost(ev, self.level) + predecode_wrong_cost(ev, self.level)) class ITLBMisses(metrics.ITLBMisses): def _compute(self, ev): return itlb_misses_cost(ev, self.level) class MSSwitches(metrics.MSSwitches): def _compute(self, ev): return ms_entry_cost(ev, self.level) class Metric_IPC: name = "IPC" desc = """ Instructions Per Cycle""" def compute(self, EV): try: self.val = IPC(EV, 0) except ZeroDivisionError: print("IPC zero division") self.val = 0 class Metric_TurboUtilization: name = "TurboUtilization" desc = """ Average Frequency Utilization relative nominal frequency""" def compute(self, EV): try: self.val = TurboUtilization(EV, 0) except ZeroDivisionError: print("TurboUtilization zero division") self.val = 0 class Metric_CLKS: name = "CLKS" desc = """ Per-thread actual clocks""" domain = "Count" maxval = 0 errcount = 0 def compute(self, EV): try: self.val = CLKS(EV, 0) except ZeroDivisionError: print_error("CLKS zero division") self.errcount += 1 self.val = 0 class Metric_Time: name = "Time" desc = """ Run duration time in seconds""" domain = "Count" maxval = 0 errcount = 0 def compute(self, EV): try: self.val = Time(EV, 0) except ZeroDivisionError: print_error("Time zero division") self.errcount += 1 self.val = 0 class Metric_CPI: name = "CPI" desc = """ Cycles Per Instruction (threaded)""" domain = "Metric" maxval = 0 errcount = 0 def compute(self, EV): try: self.val = CPI(EV, 0) except ZeroDivisionError: print_error("CPI zero division") self.errcount += 1 self.val = 0 class Setup: def __init__(self, runner): # Instantiate nodes as required to be able to specify their # references # L3 objects icache_misses = ICacheMisses() itlb_misses = ITLBMisses() ms_cost = MSSwitches() #L1 objects frontend = FrontendBound() bad_speculation = BadSpeculation() retiring = Retiring() backend = BackendBound(retiring=retiring, bad_speculation=bad_speculation, frontend=frontend) # L2 objects frontend_latency = FrontendLatency(icache_misses=icache_misses, itlb=itlb_misses, ms_cost=ms_cost, frontend=frontend ) # Set parents node.set_parent(None, [frontend, bad_speculation, retiring, backend]) node.set_parent(frontend, [frontend_latency]) node.set_parent(frontend_latency, [icache_misses, itlb_misses, ms_cost]) # User visible metrics user_metrics = [Metric_IPC(), Metric_CPI(), Metric_TurboUtilization(), Metric_CLKS(), Metric_Time(), CyclesPerUop()] nodes = [obj for obj in locals().values() if issubclass(obj.__class__, metrics.MetricBase) and obj.level > 0] nodes = sorted(nodes, key=lambda n: n.level) # Pass to runner list(map(runner.run, nodes)) list(map(runner.metric, user_metrics))
6,771
Python
.py
188
28.356383
78
0.620642
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,930
latego.py
andikleen_pmu-tools/latego.py
#!/usr/bin/env python3 # configure latego workaround on Sandy Bridge EP # can be run as a standalone tool or used as module # latego enable|disable hexevent from __future__ import print_function import signal import struct import re import os import msr import pci busses = (0x3f, 0x7f, 0xbf, 0xff) def local_direct2core(val): c = 0 for b in busses: if pci.probe(b, 14, 0): pci.changebit(b, 14, 0, 0x84, 1, val) c += 1 if c == 0: print("no local devices found") def remote_direct2core(val): c = 0 for b in busses: if pci.probe(b, 8, 0): pci.changebit(b, 8, 0, 0x80, 1, val) pci.changebit(b, 9, 0, 0x80, 1, val) c += 1 if c == 0: print("no remote devices found") def direct2core(val): # make sure all cores are awake when we do that f = os.open("/dev/cpu_dma_latency", os.O_WRONLY) os.write(f, struct.pack("I", 0)) local_direct2core(val) remote_direct2core(val) os.close(f) def set_bypass(val): msr.changebit(0x39c, 0, val) bypass = 1 << 0 d2c = 1 << 1 latego_events = { 0x04d1: bypass, 0x20d1: bypass|d2c, 0x01d3: bypass|d2c, 0x04d3: bypass|d2c, 0x01d2: bypass, 0x02d2: bypass, 0x04d2: bypass, 0x08d2: bypass, 0x01cd: bypass|d2c } latego_names = { "mem_load_uops_retired.llc_hit": 0x04d1, "mem_load_uops_retired.llc_miss": 0x20d1, "mem_load_uops_llc_miss_retired.local_dram": 0x01d3, "mem_load_uops_llc_miss_retired.remote_dram": 0x04d3, "mem_load_uops_llc_hit_retired.xsnp_miss": 0x01d2, "mem_load_uops_llc_hit_retired.xsnp_hit": 0x02d2, "mem_load_uops_llc_hit_retired.xsnp_hitm": 0x04d2, "mem_load_uops_llc_hit_retired.xsnp_none": 0x08d2, "mem_trans_retired.load_latency": 0x01cd } signal_setup = False enabled = 0 def cleanup(): if enabled & bypass: set_bypass(0) if enabled & d2c: direct2core(0) def get_event(e): if re.match(r"[0-9]+", e): return int(e, 16) if e in latego_names: return latego_names[e] return e def setup_event(event, val): global signal_setup global enabled action = ("Disabling", "Enabling")[val] if val and not signal_setup: signal.signal(signal.SIGINT, cleanup) signal.signal(signal.SIGPIPE, cleanup) signal_setup = True if event in latego_events: v = latego_events[event] if v & d2c: print("%s direct2core" % (action)) direct2core(val) if v & bypass: print("%s bypass" % (action)) set_bypass(val) if val: enabled = v else: enabled = 0 if __name__ == '__main__': import sys if len(sys.argv) == 3 and sys.argv[1] == "enable": setup_event(get_event(sys.argv[2]), 1) elif len(sys.argv) == 3 and sys.argv[1] == "disable": setup_event(get_event(sys.argv[2]), 0) elif len(sys.argv) == 2 and sys.argv[1] == "list": print("%-45s %04s" % ("name", "hex")) for i in latego_names.keys(): print("%-45s %04x" % (i, latego_names[i], )) else: print("Usage: latego enable|disable hexevent|namedevent") print(" latego list") sys.exit(1)
3,280
Python
.py
109
24.284404
65
0.607154
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,931
ocperf.py
andikleen_pmu-tools/ocperf.py
#!/usr/bin/env python3 # Copyright (c) 2011-2020, Intel Corporation # Author: Andi Kleen # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, # version 2, as published by the Free Software Foundation. # # This program is distributed in the hope it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # wrapper for perf for using named events and events with additional MSRs. # syntax is like perf, except Intel events are listed and can be specified # # or library for other python program to convert intel event names to # perf/raw format # # Features: # - map intel events to raw perf events # - enable disable workarounds for specific events # - resolve uncore events # - handle offcore event on older kernels # For the later must run as root and only as a single instance per machine # Normal events (mainly not OFFCORE) can be handled unprivileged # For events you can specify additional intel names from the list # # env variables: # PERF=... perf binary to use (default "perf") # EVENTMAP=eventmap # EVENTMAP2=eventmap # EVENTMAP3=eventmap # OFFCORE=eventmap # UNCORE=eventmap # UNCORE2=eventmap # eventmap is a path name to a json file. can contain wildcards. # When eventmap is not specified, look in ~/.cache/pmu-events/ # The eventmap is automatically downloaded there # eventmap can be also a CPU identifer (GenuineIntel-FAMILY-MODEL, like GenuineIntel-06-37) # (Note that the numbers are in upper case hex) # # TOPOLOGY=topologyfile # topologyfile is a dump of the sysfs of another system (find /sys > file) # Needed for uncore units. This is useful to generate perf command lines for other systems. # # OCVERBOSE=1 print which files are opened # # Special arguments: # --no-period Never add a period # --print only print # --force-download Force event list download # --experimental Support experimental events # --noexplode Don't list all sub pmus for uncore events. Rely on perf stat to merge. from __future__ import print_function import sys import os import subprocess import json import re import copy import textwrap if sys.version_info.major == 2: from pipes import quote else: from shlex import quote import itertools import glob from pmudef import EVENTSEL_ANY, EVENTSEL_INV, EVMASK, extra_flags if sys.version_info.major == 3: import typing # noqa from typing import Set, List, Dict, Any, Tuple, DefaultDict # noqa import msr as msrmod import latego import event_download force_download = False experimental = False ocverbose = os.getenv("OCVERBOSE") is not None exists_cache = dict() # type: Dict[str,bool] emap_list = [] # type: List[EmapNativeJSON] topology = None # type: None | Set[str] def file_exists(s): if s in exists_cache: return exists_cache[s] global topology if topology is None: top = os.getenv("TOPOLOGY") topology = set() if top: try: topology = {x.strip() for x in open(top).readlines()} except OSError: print("Cannot open topology", top, file=sys.stderr) if s in topology: return True found = os.path.exists(s) exists_cache[s] = found return found def has_format(s, pmu): return file_exists("/sys/devices/%s/format/%s" % (pmu, s)) def has_format_any(f, pmu): return has_format(f, pmu) or has_format(f, pmu + "_0") warned = set() def warn_once(s): if s not in warned: print(s, file=sys.stderr) warned.add(s) class PerfVersion(object): def __init__(self): minor = 0 perf = os.getenv("PERF") if not perf: perf = "perf" try: version = subprocess.Popen([perf, "--version"], stdout=subprocess.PIPE).communicate()[0] except OSError: print("Cannot run", perf) version = "" if not isinstance(version, str): version = version.decode('utf-8') m = re.match(r"perf version (\d+)\.(\d+)\.", version) version = 412 # assume that no match is new enough if m: major = int(m.group(1)) minor = int(m.group(2)) version = major * 100 + minor pmu = "cpu_core" if os.path.exists("/sys/devices/cpu_core") else "cpu" self.direct = os.getenv("DIRECT_MSR") or version < 400 self.offcore = has_format("offcore_rsp", pmu) and not self.direct self.ldlat = has_format("ldlat", pmu) and not self.direct self.has_name = version >= 304 self.has_uncore_expansion = version >= 412 version = PerfVersion() class MSR(object): def __init__(self): self.reg = {} def writemsr(self, msrnum, val, print_only = False): print("msr %x = %x" % (msrnum, val, )) if print_only: return msrmod.writemsr(msrnum, val) def checked_writemsr(self, msr, val, print_only = False): if msr in self.reg: sys.exit("Multiple events use same register") self.writemsr(msr, val, print_only) self.reg[msr] = 1 qual_map = ( ("amt1", "any=1", EVENTSEL_ANY, ""), ("percore", "percore=1", 0, ""), ("perf_metrics", "", 0, ""), ("i1", "inv=1", EVENTSEL_INV, ""), ("e1", "edge=1", 0, ""), ("e0", "edge=0", 0, ""), ("tx", "in_tx=1", 0, ""), ("sup", "", 0, "k"), ("usr=yes", "", 0, "u"), ("usr=no", "", 0, "k"), ("os=yes", "", 0, "k"), ("os=no", "", 0, "u"), ("anythr=yes", "any=1", 0, ""), ("anythr=no", "any=0", 0, ""), ("pdir", "", 0, "ppp"), ("precise=yes", "", 0, "pp"), ("cp", "in_tx_cp=1", 0, "")) number = "(0x[0-9a-fA-F]+|[0-9]+)" qualval_map = ( (r"event_select=" + number, "event=%#x", 0), (r"u" + number, "umask=%#x", 0), (r"c(?:mask=)?" + number, "cmask=%d", 24), (r"e(?:dge=)?" + number, "edge=%d", 18), (r"(?:sa|sample-after|period)=" + number, "period=%d", 0)) uncore_map = ( (r'e(\d)', 'edge='), (r't=(\d+)', "thresh="), (r'match=(0x[0-9a-fA-F]+)', "filter_occ="), (r'filter1=(0x[0-9a-fA-F]+)', "config1=", 32), (r"nc=(\d+)", "filter_nc="), (r'filter=(0x[0-9a-fA-F]+)', "config1="), (r'one_unit', '', ), (r"u" + number, "umask="), (r"opc=?" + number, "filter_opc="), (r"tid=?" + number, "filter_tid="), (r"state=?" + number, "filter_state=")) uncore_map_thresh = ( (r"c(?:mask=)?" + number, "thresh="),) uncore_map_cmask = ( (r"c(?:mask=)?" + number, "cmask="),) # newe gets modified def convert_extra(extra, val, newe): nextra = "" while extra: if extra[0] == ":": extra = extra[1:] continue found = False for j in qualval_map: m = re.match(j[0], extra, re.I) if m: if j[2]: val |= int(m.group(1), 0) << j[2] newe.append(j[1] % (int(m.group(1), 0))) extra = extra[len(m.group(0)):] found = True break if found: continue found = False for j in qual_map: if extra.lower().startswith(j[0]): val |= j[2] newe.append(j[1]) extra = extra[len(j[0]):] nextra += j[3] found = True break if found: continue if not extra: break if extra[0] in perf_qual + "p": nextra += extra[0] extra = extra[1:] continue print("bad event qualifier", extra, file=sys.stderr) break return nextra, val def gen_name(n, sup): n = n.replace(".", "_").replace(":", "_").replace("=", "_") if sup: n += "_k" return n class Event(object): def __init__(self, name, val, desc): self.val = val self.name = name self.extra = "" self.userextra = "" self.msr = 0 self.msrval = 0 self.desc = desc self.precise = 0 self.collectpebs = 0 self.newextra = "" self.overflow = None self.errata = None self.counter = "" self.period = 0 self.pname = None # XXX return with pmu to be consistent with Uncore and fix callers def output_newstyle(self, extra="", noname=False, period=False, name="", noexplode=False): """Format an perf event for output and return as perf event string. Always uses new style (cpu/.../).""" val = self.val if extra: extra = self.newextra + "," + extra else: extra = self.newextra if self.pname: e = self.pname else: e = "event=0x%x,umask=0x%x" % (val & 0xff, (val >> 8) & 0xff) e += extra if version.has_name: if name: e += ",name=" + name elif not noname: e += ",name=%s" % (gen_name(self.name, "sup" in (self.extra + extra))) if period and self.period and ",period=" not in e: e += ",period=%d" % self.period return e def output(self, use_raw=False, flags="", noname=False, period=False, name="", noexplode=False): """Format an event for output and return as perf event string. use_raw when true return old style perf string (rXXX). Otherwise chose between old and new style based on the capabilities of the installed perf executable. flags when set add perf flags (e.g. u for user, p for pebs).""" val = self.val newe = [] extra = "".join(sorted(merge_extra(extra_set(self.extra), extra_set(flags)))) extra, val = convert_extra(":" + extra, val, newe) if version.direct or use_raw: if self.pname: ename = self.pname else: ename = "r%x" % (val,) if extra: ename += ":" + extra # XXX should error for extras that don't fit into raw else: p = self.output_newstyle(extra=",".join(newe), noname=noname, period=period, name=name) ename = "%s/%s/" % (self.pmu, p) + extra return ename def filter_qual(self): def check_qual(q): if q == "": return True if "=" in q: q, _ = q.split("=") if has_format_any(q, self.pmu): return True warn_once("%s: format %s not supported. Filtering out" % (self. pmu, q)) return False self.newextra = ",".join(filter(check_qual, self.newextra.split(","))) box_to_perf = { "cbo": "cbox", "qpi_ll": "qpi", "sbo": "sbox", } def box_exists(box): return file_exists("/sys/devices/uncore_%s" % (box)) def int_or_zero(row, name): if name in row: if row[name] == 'False': return 0 if row[name] == 'True': return 1 return int(row[name]) return 0 uncore_units = { "imph-u": "arb", "kti ll": "upi", "m3kti": "m3upi", "upi ll": "upi", } def convert_uncore(flags, extra_map): o = "" while flags: for j in uncore_map + extra_map: if flags[0] == ",": flags = flags[1:] match, repl = j[0], j[1] m = re.match(match, flags, re.I) if m: if repl == "": pass elif len(j) > 2: o += "," + repl + ("%#x" % (int(m.group(1), 0) << j[2])) else: o += "," + repl + m.group(1) flags = flags[m.end():] if flags == "": break if flags[0:1] == ":": flags = flags[1:] else: if flags != "": if len(extra_map) > 0: print("Uncore cannot parse", flags, file=sys.stderr) break return o class UncoreEvent(object): def __init__(self, name, row): self.name = name e = self if 'PublicDescription' in row: e.desc = row['PublicDescription'].strip() elif 'BriefDescription' in row: e.desc = row['BriefDescription'].strip() else: e.desc = row['Description'].strip() e.code = int(row['EventCode'], 16) if 'Internal' in row and int(row['Internal']) != 0: e.code |= int(row['Internal']) << 21 e.umask = int(row['UMask'], 16) e.cmask = int_or_zero(row, 'CounterMask') e.inv = int_or_zero(row, 'Invert') e.edge = int_or_zero(row, 'EdgeDetect') e.unit = row['Unit'].lower() if e.unit in uncore_units: e.unit = uncore_units[e.unit] if e.unit == "ncu": e.unit = "clock" if box_exists("clock") else "cbox" e.umask = 0 e.code = 0xff # xxx subctr if e.unit in box_to_perf: e.unit = box_to_perf[e.unit] e.msr = None e.overflow = 0 e.counter = "1" # dummy for toplev e.newextra = "" if 'Errata' in row: e.errata = row['Errata'] else: e.errata = None self.extra = '' self.userextra = '' # { # "Unit": "CBO", # "EventCode": "0x22", # "UMask": "0x21", # "EventName": "UNC_CBO_XSNP_RESPONSE.MISS_EXTERNAL", # "Description": "An external snoop misses in some processor core.", # "Counter": "0,1", # "CounterMask": "0", # "Invert": "0", # "EdgeDetect": "0" # }, # XXX cannot separate sockets # extra: perf flags # flags: emon flags def output_newstyle(self, newextra="", noname=False, period=False, name="", flags="", noexplode=False): e = self o = "/event=%#x" % e.code if e.umask: o += ",umask=%#x" % e.umask if e.cmask: o += ",cmask=%#x" % e.cmask if e.edge: o += ",edge=1" if e.inv: o += ",inv=1" if e.newextra: if flags: flags += "," flags += e.newextra one_unit = "one_unit" in flags if has_format_any("cmask", "uncore_" + e.unit): extra_map = uncore_map_cmask else: extra_map = uncore_map_thresh o += convert_uncore(flags, extra_map) # xxx subctr, occ_sel, filters if version.has_name and not noname: if name == "": name = gen_name(e.name, False) o += ",name=" + name + "_NUM" if newextra: o += "," + ",".join(newextra) o += "/" # explode boxes if needed def box_name(n): return "%s_%d" % (e.unit, n) def box_n_exists(n): if one_unit and n > 0: return False return box_exists(box_name(n)) if not noexplode and not box_exists(e.unit) and box_n_exists(0): return ",".join(["uncore_" + box_name(x) + o.replace("_NUM", "_%d" % (x)) for x in itertools.takewhile(box_n_exists, itertools.count())]) return "uncore_%s%s" % (e.unit, o.replace("_NUM", "")) def filter_qual(self): def check_qual(q): if q == "": return False if q == "one_unit": return True if "=" in q: q, _ = q.split("=") if has_format_any(q, "uncore_" + self.unit): return True warn_once("%s: format %s not supported. Filtering out" % (self.unit, q)) return False self.newextra = ",".join(filter(check_qual, convert_uncore(self.newextra, ()).split(","))) output = output_newstyle def ffs(flag): assert flag != 0 m = 1 j = 0 while (flag & m) == 0: m = m << 1 j += 1 return j perf_qual = "kuhGHSD" # without pebs def extra_set(e): return set(map(lambda x: x[0], re.findall(r"(" + "|".join([x[0] for x in qual_map + qualval_map + uncore_map]) + "|[" + perf_qual + "]|p+)", e))) def merge_extra(a, b): m = a | b if 'ppp' in m: m = m - set(['p', 'pp']) if 'pp' in m: m = m - set(['p']) m = m - set([':']) return m def print_event(name, desc, f, human, wrap, pmu=""): desc = "".join([y for y in desc if y < chr(127)]) print(" %-42s" % (name,), end='', file=f) if pmu: print(" [%s]" % pmu, end='', file=f) if human: print("\n%s" % (wrap.fill(desc),), file=f) else: print(" [%s]" % (desc,), file=f) def uncore_exists(box, postfix=""): if file_exists("/sys/devices/uncore_" + box + postfix): return True if file_exists("/sys/devices/uncore_" + box + "_0" + postfix): return True return False missing_boxes = set() def check_uncore_event(e, extramsg): if uncore_exists(e.unit): if e.cmask and not uncore_exists(e.unit, "/format/cmask"): warn_once("Uncore unit " + e.unit + " missing cmask for " + e.name) extramsg.append("not supported due to missing cmask in PMU") return None if e.umask and not uncore_exists(e.unit, "/format/umask"): warn_once("Uncore unit " + e.unit + " missing umask for " + e.name) extramsg.append("not supported due to missing umask in PMU") return None return e if e.unit not in missing_boxes: warn_once("Uncore unit " + e.unit + " missing") missing_boxes.add(e.unit) extramsg.append("not supported due to missing PMU") return None fixed_counters = { "inst_retired.any": (0xc0, 0, 0), "cpu_clk_unhalted.thread": (0x3c, 0, 0), "cpu_clk_unhalted.thread_any": (0x3c, 0, 1), "cpu_clk_unhalted.core": (0x3c, 0, 1), } def update_ename(ev, name): if ev: ev = copy.deepcopy(ev) ev.name = name return ev def json_open(name): if ocverbose: print("open", name, file=sys.stderr) d = open(name, "rb").read() if not isinstance(d, str): d = d.decode('utf-8') json_data = json.loads(d) if isinstance(json_data, dict) and 'Events' in json_data: json_data = json_data['Events'] return json_data class EmapNativeJSON(object): """Read an event table.""" def __init__(self, name, pmu): self.events = {} self.perf_events = {} self.codes = {} self.desc = {} self.pevents = {} self.latego = False self.uncore_events = {} self.error = False self.pmu = pmu self.read_events(name) def add_event(self, e): self.events[e.name] = e self.perf_events[e.name.replace('.', '_')] = e # workaround for perf-style naming if e.pname: self.pevents[e.pname] = e self.codes[e.val] = e self.desc[e.name] = e.desc e.pmu = self.pmu def read_table(self, r): for row in r: def get(x): return row[x] def gethex(x): return int(get(x).split(",")[0], 16) def getdec(x): return int(get(x), 10) name = get(u'EventName').lower().rstrip() try: code = gethex(u'EventCode') umask = gethex(u'UMask') except ValueError: if ocverbose: print("cannot parse event", name) continue anyf = 0 if name in fixed_counters: code, umask, anyf = fixed_counters[name] if u'Other' in row: other = gethex(u'Other') << 16 else: other = 0 other |= gethex(u'EdgeDetect') << 18 if u'AnyThread' in row: other |= (gethex(u'AnyThread') | anyf) << 21 other |= getdec(u'CounterMask') << 24 other |= gethex(u'Invert') << 23 val = code | (umask << 8) | other val &= EVMASK d = get(u'PublicDescription') if d is None: d = '' d = d.strip() e = Event(name, val, d) e.newextra = "" if other & ((1 << 16)|(1 << 17)): if other & (1<<16): e.extra += "u" if other & (1 << 17): e.extra += "k" e.perfqual = None if u'MSRIndex' in row and get(u'MSRIndex') and get(u'MSRValue'): msrnum = gethex(u'MSRIndex') msrval = gethex(u'MSRValue') if version.offcore and msrnum in (0x1a6, 0x1a7): e.newextra = ",offcore_rsp=0x%x" % (msrval, ) e.perfqual = "offcore_rsp" elif version.ldlat and msrnum in (0x3f6,): e.newextra = ",ldlat=0x%x" % (msrval, ) e.perfqual = "ldlat" elif msrnum == 0x3f7: e.newextra = ",frontend=%#x" % (msrval, ) e.perfqual = "frontend" # add new msr here else: e.msrval = msrval e.msr = msrnum if u'SampleAfterValue' in row: e.overflow = get(u'SampleAfterValue') e.counter = get(u'Counter') e.precise = getdec(u'Precise') if u'Precise' in row else 0 e.collectpebs = getdec(u'CollectPEBS') if u'CollectPEBS' in row else 0 e.pebs = getdec(u'PEBS') if u'PEBS' in row else 0 if e.collectpebs > 1 or e.pebs >= 2: e.extra += "pp" if len(e.counter.split(",")) == 1: e.extra += "p" try: if get(u'Errata') != "null": try: d += " Errata: " d += get(u'Errata') e.errata = get(u'Errata') except UnicodeDecodeError: pass except KeyError: pass e.desc = d for (flag, name) in extra_flags: if val & flag: e.newextra += ",%s=%d" % (name, (val & flag) >> ffs(flag), ) e.period = int(get(u'SampleAfterValue')) if u'SampleAfterValue' in row else 0 self.add_event(e) def getevent(self, e, nocheck=False, extramsg=[]): """Retrieve an event with name e. Return Event object or None. When nocheck is set don't check against current system.""" e = e.lower() extra = "" edelim = "" m = re.match(r'([^:]+):request=([^:]+):response=([^:]+)', e) if m: ename = m.group(1) + "." + m.group(2) + "." + m.group(3) return update_ename(self.getevent(ename, nocheck=nocheck, extramsg=extramsg), e) m = re.match(r'(.*?):(.*)', e) if m: extra = m.group(2) edelim = ":" e = m.group(1) if e in self.events: # hack for now. Avoid ambiguity with :p # Should handle qualmap properly here extra = extra.replace("period=", "sample-after=") userextra = extra extra = extra_set(extra) ev = self.events[e] ev_extra = extra_set(ev.extra) if extra and merge_extra(ev_extra, extra) > ev_extra: ev = copy.deepcopy(self.events[e]) ev.userextra = userextra ev.extra = "".join(sorted(merge_extra(ev_extra, extra))) return ev return self.events[e] elif e.endswith("_ps"): return update_ename(self.getevent(e[:-3] + ":p" + extra), e) elif e.startswith("offcore") and (e + "_0") in self.events: return update_ename(self.getevent(e + "_0" + edelim + extra), e) elif e in self.uncore_events: ev = self.uncore_events[e] if ev and not nocheck: ev = check_uncore_event(ev, extramsg) if ev and extra: ev = copy.deepcopy(ev) ev.newextra = extra return ev elif e in self.perf_events: return self.perf_events[e] elif e in self.pevents: return self.pevents[e] extramsg.append("event not found for %s" % self.pmu) return None # XXX need to handle exploded events def update_event(self, e, ev): if e not in self.pevents: self.pevents[e] = ev def getraw(self, r): e = "r%x" % (r) if e in self.pevents: ev = self.pevents[e] s = ev.name if ev.extra: s += ":" + ev.extra return s return "!Raw 0x%x" % (r,) def getperf(self, p): if p in self.pevents: e = self.pevents[p] n = e.name if e.userextra: n += ":" + e.userextra return n return p def dumpevents(self, f=sys.stdout, human=True, uncore=True): """Print all events with descriptions to the file descriptor f. When human is true word wrap all the descriptions.""" wrap = None if human: wrap = textwrap.TextWrapper(initial_indent=" ", subsequent_indent=" ") for k in sorted(self.events.keys()): print_event(k, self.desc[k], f, human, wrap, self.pmu) if uncore: for k in sorted(self.uncore_events.keys()): print_event(k, self.uncore_events[k].desc, f, human, wrap) def read_events(self, name): """Read JSON normal events table.""" if name.find("JKT") >= 0 or name.find("Jaketown") >= 0: self.latego = True try: data = json_open(name) except ValueError as e: print("Cannot parse", name + ":", e.message, file=sys.stderr) self.error = True return self.read_table(data) if "topdown.slots" in self.events: self.add_topdown() def add_offcore(self, name): """Read offcore table.""" data = json_open(name) # { # "MATRIX_REQUEST": "DEMAND_DATA_RD", # "MATRIX_RESPONSE": "NULL", # "MATRIX_VALUE": "0x0000000001", # "MATRIX_REGISTER": "0,1", # "DESCRIPTION": "Counts demand data reads that" # }, offcore_response = self.getevent("OFFCORE_RESPONSE") if not offcore_response: return requests = [] responses = [] for row in data: if row[u"MATRIX_REQUEST"].upper() != "NULL": requests.append((row[u"MATRIX_REQUEST"], row[u"MATRIX_VALUE"], row[u"DESCRIPTION"])) if row[u"MATRIX_RESPONSE"].upper() != "NULL": responses.append((row[u"MATRIX_RESPONSE"], row[u"MATRIX_VALUE"], row[u"DESCRIPTION"])) def create_event(req_name, req_val, req_desc, res_name, res_val, res_desc): oe = copy.deepcopy(offcore_response) oe.name = ("OFFCORE_RESPONSE.%s.%s" % (req_name, res_name)).lower() if oe.name.lower() in self.events: return oe.msrval = int(req_val, 16) | (int(res_val, 16) << 16) oe.desc = req_desc + " " + res_desc if version.offcore: oe.newextra = ",offcore_rsp=0x%x" % (oe.msrval, ) else: oe.msr = 0x1a6 self.add_event(oe) for a, b in itertools.product(requests, responses): create_event(*(a + b)) def add_uncore(self, name, force=False): data = json_open(name) for row in data: name = row['EventName'].lower() try: self.uncore_events[name] = UncoreEvent(name, row) except UnicodeEncodeError: pass def add_topdown(self): def td_event(name, pname, desc, counter): e = Event(name, 0, desc) e.counter = counter e.pname = pname self.add_event(e) td_event("perf_metrics.retiring", "topdown-retiring", "Number of slots the pipeline was frontend bound.", "32") td_event("perf_metrics.bad_speculation", "topdown-bad-spec", "Number of slots the pipeline was doing bad speculation.", "33") td_event("perf_metrics.frontend_bound", "topdown-fe-bound", "Number of slots the pipeline was frontend bound.", "34") td_event("perf_metrics.backend_bound", "topdown-be-bound", "Number of slots the pipeline was backend bound.", "35") td_event("topdown.slots", "slots", "Number of slots", "36") if "topdown.memory_bound_slots" in self.events: td_event("perf_metrics.heavy_operations", "topdown-heavy-ops", "Number of slots pipeline retired microcode instructions with >2 uops", "36") td_event("perf_metrics.branch_mispredicts", "topdown-br-mispredict", "Number of slots frontend was bound by branch mispredictions", "37") td_event("perf_metrics.memory_bound", "topdown-mem-bound", "Number of slots backend was bound by memory", "38") td_event("perf_metrics.fetch_latency", "topdown-fetch-lat", "Number of slots frontend was bound by memory fetch latency", "39") pmu_to_type = { "cpu_core": ("hybridcore", "Core"), "cpu_atom": ("hybridcore", "Atom"), "cpu": ("core", None), } def json_with_extra(el, eventmap_is_file, pmu): typ = pmu_to_type[pmu] name = event_download.eventlist_name(el, key=typ[0], hybridkey=typ[1]) if not os.path.exists(name): if pmu == "cpu_core": name = event_download.eventlist_name(el, "core") else: name = event_download.eventlist_name(el, "hybridcore", hybridkey="Core") emap = EmapNativeJSON(name, pmu) if not emap or emap.error: print("parsing", name, "failed", file=sys.stderr) return None if experimental: try: emap.read_events(event_download.eventlist_name(el, "core experimental")) except IOError: pass add_extra_env(emap, el, eventmap_is_file) return emap def add_extra_env(emap, el, eventmap_is_file): try: oc = os.getenv("OFFCORE") if oc: oc = canon_emapvar(oc, "matrix") oc = event_download.eventlist_name(el, "offcore") emap.add_offcore(oc) elif not eventmap_is_file: oc = event_download.eventlist_name(el, "offcore") if os.path.exists(oc) and el != oc: emap.add_offcore(oc) if experimental: oc = event_download.eventlist_name(el, "offcore experimental") if os.path.exists(oc) and oc != el: emap.add_offcore(oc) except IOError: print("Cannot open offcore", oc, file=sys.stderr) try: uc = os.getenv("UNCORE") if uc: uc = canon_emapvar(uc, "uncore") uc = event_download.eventlist_name(uc, "uncore") emap.add_uncore(uc) elif not eventmap_is_file: uc = event_download.eventlist_name(el, "uncore") if os.path.exists(uc) and uc != el: emap.add_uncore(uc) if experimental: uc = event_download.eventlist_name(el, "uncore experimental") if os.path.exists(uc) and uc != el: emap.add_uncore(uc) except IOError: print("Cannot open uncore", uc, file=sys.stderr) def read_map(env, typ, r): try: e2 = os.getenv(env) if e2: e2 = canon_emapvar(e2, typ) r(e2) # don't try to download for now except IOError: print("Cannot open " + env, e2, file=sys.stderr) read_map("EVENTMAP2", "core", emap.read_events) read_map("EVENTMAP3", "core", emap.read_events) read_map("UNCORE2", "uncore", emap.add_uncore) def canon_emapvar(el, typ): if ("*" in el or "." in el or "_" in el) and "/" not in el and not file_exists(el): el = "%s/%s" % (event_download.getdir(), el) if '*' in el: l = glob.glob(el) if l: if len(l) > 1: l = [x for x in l if x.find(typ) >= 0] if l: el = l[0] return el def find_emap(eventvar="EVENTMAP", pmu="cpu"): """Search and read a perfmon event map. When the EVENTMAP environment variable is set read that, otherwise read the map for the current CPU. EVENTMAP can be a CPU specifier in the map file or a path name. Dito for the OFFCORE and UNCORE environment variables. Optionally pass the name of the EVENTMAP variable, and the cpu pmu name. Return an emap object that contains the events and can be queried or None if nothing is found or the current CPU is unknown.""" el = os.getenv(eventvar) if not el: eventmap_is_file = False el = event_download.get_cpustr() else: eventmap_is_file = "/" in el el = canon_emapvar(el, "core") if "/" in el: try: emap = EmapNativeJSON(el, pmu) if not emap or emap.error: return None add_extra_env(emap, el, eventmap_is_file) return emap except IOError: return None try: if not force_download: return json_with_extra(el, eventmap_is_file, pmu) except IOError: pass try: toget = ["core"] if not os.getenv("OFFCORE"): toget.append("offcore") if not os.getenv("UNCORE"): toget.append("uncore") if experimental: toget += [x + " experimental" for x in toget] event_download.download(el, toget) return json_with_extra(el, eventmap_is_file, pmu) except IOError: pass return None def process_events(event, print_only, period, noexplode): overflow = None # replace inner commas so we can split events event = re.sub(r"([a-z][a-z0-9]+/)([^/]+)/", lambda m: m.group(1) + m.group(2).replace(",", "#") + "/", event) el = event.split(",") nl = [] group_index = 0 for i in el: group_start = "" group_end = "" start = "" end = "" if i.startswith('{'): group_start = "{" i = i[1:] group_index = len(nl) m = re.match(r'(.*)(\}(:.*)?)', i) if m: group_end = m.group(2) i = m.group(1) i = i.strip() m = re.match(r'([^/]+)/([^#]+)(#?.*?)/(.*)', i) if m: start = m.group(1) + "/" for emap in emap_list: if emap.pmu == m.group(1): ev = emap.getevent(m.group(2)) break else: ev = emap_list[0].getevent(m.group(2)) end = m.group(3) + "/" if ev: qual = "".join(sorted(merge_extra(extra_set(ev.extra), extra_set(m.group(4))))) end += qual i = ev.output_newstyle(period=period, noexplode=noexplode) if i.count("/") > 2: # was it exploded? start = "" end = "" else: start = "" end = "" else: ev = None res = [x.getevent(i) for x in emap_list] res = [x for x in res if x] if res: if len(res) > 1: print("Event %s is not unique on hybrid CPUs. Add cpu_*// prefixes" % i, file=sys.stderr) ev = res[0] i = ev.output(period=period, noexplode=noexplode) if ev: if ev.msr: msr.checked_writemsr(ev.msr, ev.msrval, print_only) for emap in emap_list: if emap.latego and (ev.val & 0xffff) in latego.latego_events: latego.setup_event(ev.val & 0xffff, 1) overflow = ev.overflow event = (group_start + start + i + end + group_end).replace("#", ",") nl.append(event) if ev: emap.update_event(event, ev) if "S" in group_end: for j in range(group_index + 1, len(nl)): nl[j] = re.sub(r',period=\d+', '', nl[j]) return str.join(',', nl), overflow def getarg(i, cmd): if sys.argv[i][2:] == '' or sys.argv[i][:2] == '--': cmd.append(sys.argv[i]) i += 1 arg = "" if len(sys.argv) > i: arg = sys.argv[i] prefix = "" else: arg = sys.argv[i][2:] prefix = sys.argv[i][:2] return arg, i, prefix def process_args(): perf = os.getenv("PERF") if not perf: perf = "perf" cmd = [perf] noexplode = False overflow = None print_only = False never, no, yes = range(3) record = no i = 1 while i < len(sys.argv): if sys.argv[i] == "--print": print_only = True elif sys.argv[i] == "--force-download": pass elif sys.argv[i] == "--experimental": pass elif sys.argv[i] == "--no-period": record = never elif sys.argv[i] == "--noexplode": noexplode = True elif sys.argv[i] == "record" and record == no: cmd.append(sys.argv[i]) record = yes elif sys.argv[i][0:2] == '-e' or sys.argv[i] == '--event': event, i, prefix = getarg(i, cmd) event, overflow = process_events(event, print_only, record == yes, noexplode) cmd.append(prefix + event) elif record and (sys.argv[i][0:2] == '-c' or sys.argv[i] == '--count'): oarg, i, prefix = getarg(i, cmd) if oarg == "default": if overflow is None: print(""" Specify the -e events before -c default or event has no overflow field.""", file=sys.stderr) sys.exit(1) cmd.append(prefix + overflow) else: cmd.append(prefix + oarg) else: cmd.append(sys.argv[i]) i += 1 print(" ".join(map(quote, cmd))) if print_only: sys.exit(0) return cmd if sys.version_info.major == 3: popentext = dict(text=True) else: popentext = {} def get_pager(): f = sys.stdout if f.isatty(): try: sp = subprocess.Popen(["less", "-F"], stdin=subprocess.PIPE, **popentext) # type: ignore return sp.stdin, sp except OSError: f = sys.stdout return f, None def perf_cmd(cmd): if len(emap_list) == 0: sys.exit(subprocess.call(cmd)) elif len(sys.argv) >= 2 and sys.argv[1] == "list": pager, proc = get_pager() try: l = subprocess.Popen(cmd, stdout=pager) l.wait() print(file=pager) uncore = True for emap in emap_list: if len(emap_list) > 1: print("%s:\n" % emap.pmu, file=pager) emap.dumpevents(pager, proc is not None, uncore) uncore = False if proc: pager.close() proc.wait() except IOError: pass elif len(sys.argv) >= 2 and (sys.argv[1] == "report" or sys.argv[1] == "stat"): direct = version.has_name if not direct: for w in sys.argv: if w == "--tui": direct = True break if direct: ret = subprocess.call(cmd) latego.cleanup() sys.exit(ret) try: pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout raw = lambda e: " " + emap.getraw(int(e.group(1), 16)) for i in pipe: i = re.sub("[rR]aw 0x([0-9a-f]{4,})", raw, i) i = re.sub("r([0-9a-f]{4,})", raw, i) i = re.sub("(cpu(_core|_atom)?/.*?/)", lambda e: emap.getperf(e.group(1)), i) print(i, end='') except IOError: pass pipe.close() latego.cleanup() else: sys.exit(subprocess.call(cmd)) def find_pmus(): g = glob.glob("/sys/devices/cpu*") if len(g) == 0: g = ["/sys/devices/cpu"] return [i.replace("/sys/devices/", "") for i in g] if __name__ == '__main__': for j in sys.argv: if j == "--force-download": force_download = True if j == "--experimental": experimental = True if j == "--noexplode": noexplode = True msr = MSR() pmus = find_pmus() for pmu in pmus: emap = find_emap(pmu=pmu) if not emap: print("Do not recognize CPU or cannot find CPU map file.", file=sys.stderr) else: emap_list.append(emap) cmd = process_args() try: perf_cmd(cmd) except KeyboardInterrupt: pass
41,470
Python
.py
1,122
27.148841
152
0.523352
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,932
tl_io.py
andikleen_pmu-tools/tl_io.py
# Copyright (c) 2020, Intel Corporation # Author: Andi Kleen # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, # version 2, as published by the Free Software Foundation. # # This program is distributed in the hope it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. from __future__ import print_function import sys import subprocess import os import argparse if sys.version_info.major == 3: from typing import Set # noqa if sys.version_info.major == 3: popentext = dict(universal_newlines=True) else: popentext = {} def popen_stdout(cmd): return subprocess.Popen(cmd, stdout=subprocess.PIPE, **popentext) # type: ignore def popen_stdinout(cmd, f): return subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=f, **popentext) # type: ignore def flex_open_r(fn): if fn.endswith(".xz"): xz = popen_stdout(["xz", "-d", "--stdout", fn]) return xz.stdout if fn.endswith(".gz"): gzip = popen_stdout(["gzip", "-d", "-c", fn]) return gzip.stdout if fn.endswith(".zst"): return popen_stdout(["zstd", "-d", "--stdout", fn]) .stdout return open(fn, 'r') def flex_open_w(fn): f = open(fn, "w") if fn.endswith(".xz"): xz = popen_stdinout(["xz", "-z", "--stdout"], f) return xz.stdin if fn.endswith(".gz"): gzip = popen_stdinout(["gzip", "-c"], f) return gzip.stdin if fn.endswith(".zst"): return popen_stdinout(["zstd", "--stdout"], f).stdin return f tl_tester = os.getenv("TL_TESTER") test_mode = tl_tester and tl_tester != "0" args = argparse.Namespace() def set_args(a): global args args = a def warn_no_assert(msg): if not args.quiet: print("warning: " + msg, file=sys.stderr) def warn_test(msg): if test_mode: warn_no_assert(msg) def warn(msg): warn_no_assert(msg) if test_mode: assert 0, msg warned = set() # type: Set[str] def warn_once_no_assert(msg): if msg not in warned and not args.quiet: print("warning: " + msg, file=sys.stderr) warned.add(msg) def warn_once(msg): warn_once_no_assert(msg) if test_mode: assert 0, msg def print_once(msg): if msg not in warned and not args.quiet: print(msg) warned.add(msg) def inform(msg): if not args.quiet: print(msg) def debug_print(x): if args.debug: print(x, file=sys.stderr) def obj_debug_print(obj, x): if args.debug or (args.dfilter and obj.name in args.dfilter): print(x, file=sys.stderr) def test_debug_print(x): if args.debug or test_mode: print(x, file=sys.stderr)
2,853
Python
.py
88
27.931818
93
0.659505
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,933
tl_output.py
andikleen_pmu-tools/tl_output.py
# Copyright (c) 2012-2020, Intel Corporation # Author: Andi Kleen # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, # version 2, as published by the Free Software Foundation. # # This program is distributed in the hope it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # Output toplev results in various formats from __future__ import print_function import locale import csv import re import sys import json import os from math import isnan from collections import defaultdict, Counter, OrderedDict from tl_uval import UVal, combine_uval from tl_io import flex_open_w if sys.version_info.major == 3: import typing # noqa from typing import DefaultDict, Dict, Any # noqa def output_name(name, typ): if typ: if "." in name: name = re.sub(r'(.*)\.', r'\1-%s.' % typ, name) else: name += "-" + typ return name def open_logfile(name, typ): if name is None or name == "": return sys.stderr if 'write' in name.__class__.__dict__: return name name = output_name(name, typ) try: return flex_open_w(name) except IOError: sys.exit("Cannot open logfile %s" % name) def open_all_logfiles(args, logfile): if args.split_output and args.per_thread + args.per_core + args.per_socket + args.global_ > 0: logfiles = dict() if args.per_thread: logfiles['thread'] = open_logfile(logfile, "thread") if args.per_core: logfiles['core'] = open_logfile(logfile, "core") if args.per_socket: logfiles['socket'] = open_logfile(logfile, "socket") if args.global_: logfiles['global'] = open_logfile(logfile, "global") return logfiles, None else: return None, open_logfile(logfile, None) BUFS = 1024*1024 def catrmfile(infn, outf, keep): with open(infn, "r") as inf: while True: buf = inf.read(BUFS) if len(buf) == 0: break outf.write(buf) outf.flush() if not keep: os.remove(infn) def catrmoutput(infn, logf, logfiles, keep): if logfiles: for j in logfiles.keys(): catrmfile(output_name(infn, j), logfiles[j], keep) else: catrmfile(infn, logf, keep) class Output(object): """Abstract base class for Output classes.""" def __init__(self, logfile, version, cpu, args): self.logfiles, self.logf = open_all_logfiles(args, args.output) self.printed_descs = set() self.hdrlen = 30 self.version = version self.unitlen = 12 self.belowlen = 0 self.version = "%s on %s [%s%s]" % (version, cpu.name, cpu.true_name, "/" + cpu.pmu_name if cpu.pmu_name else "") self.curname = "" self.curname_nologf = "" self.printedversion = set() self.no_header = args.no_csv_header self.no_footer = args.no_csv_footer self.abbrev = args.abbrev self.valcsv = None self.last_prefix = "" self.args = args def flushfiles(self): if self.logfiles: for j in self.logfiles.values(): j.flush() self.logf.flush() # pass all possible hdrs in advance to compute suitable padding def set_hdr(self, hdr, area): if area: hdr = "%-16s %s" % (area, hdr) self.hdrlen = max(len(hdr) + 1, self.hdrlen) def set_below(self, below): if below: self.belowlen = 1 def set_unit(self, unit): self.unitlen = max(len(unit), self.unitlen) def set_cpus(self, cpus): pass def item(self, area, name, uval, timestamp, unit, desc, title, sample, bn, below, idle): assert isinstance(uval, UVal) # -- if desc in self.printed_descs: desc = "" else: self.printed_descs.add(desc) if not area: area = "" self.show(timestamp, title, area, name, uval, unit, desc, sample, bn, below, idle) def ratio(self, area, name, uval, timestamp, unit, desc, title, sample, bn, below, idle): uval.is_ratio = True self.item(area, name, uval, timestamp, unit, desc, title, sample, bn, below, idle) def metric(self, area, name, uval, timestamp, desc, title, unit, idle): self.item(area, name, uval, timestamp, unit, desc, title, None, "", "", idle) def flush(self): pass def remark(self, m): if not self.logfiles: self.logf.write('\n%s:\n' % m) def reset(self, name): if self.logfiles: self.logf = self.logfiles[name] self.curname = name self.curname_nologf = name def show(self, timestamp, title, area, hdr, val, unit, desc, sample, bn, below, idle): pass def print_version(self): if self.no_header or self.args.no_version: return if self.curname not in self.printedversion: if self.logfiles: self.logfiles[self.curname].write("# " + self.version + "\n") else: self.logf.write("# " + self.version + "\n") self.printedversion.add(self.curname) print_header = print_version def print_footer(self): pass def print_footer_all(self): if self.no_footer or self.args.no_version: return if self.logfiles: for f in self.logfiles.values(): f.write("# %s\n" % self.version) else: self.logf.write("# " + self.version + "\n") def fmt_below(below): if below: return "<" return "" def short_hdr(hdr, last): n = os.path.commonprefix((hdr, last)) if "." not in n: return hdr n = n[:n.rfind(".")] return "..." + hdr[len(n)+1:] class OutputHuman(Output): """Generate human readable single-column output.""" def __init__(self, logfile, args, version, cpu): Output.__init__(self, logfile, version, cpu, args) try: locale.setlocale(locale.LC_ALL, '') except locale.Error: pass self.args = args self.titlelen = 7 def set_cpus(self, cpus): if len(cpus) > 0: self.titlelen = max(map(len, cpus)) + 1 def print_desc(self, desc, sample): if self.args.no_desc: return if desc: print("\t" + desc, file=self.logf) if sample: print("\t" + "Sampling events: ", sample, file=self.logf) def print_timestamp(self, timestamp): if timestamp: if isnan(timestamp): self.logf.write("%-11s " % "SUMMARY") else: self.logf.write("%6.9f " % timestamp) def print_line_header(self, area, ohdr): if "Info" in area or not self.abbrev: hdr = ohdr else: hdr = short_hdr(ohdr, self.last_prefix) self.last_prefix = ohdr if area: hdr = "%-16s %s" % (area, hdr) self.logf.write("%-*s " % (self.hdrlen, hdr)) # timestamp Timestamp in interval mode # title CPU # area FE/BE ... # hdr Node Name # val Formatted measured value # unit unit # desc Object description # sample Sample Objects (string) # vs Statistics object # bn marker for bottleneck # below True if below # idle Idle marker (ignored for Human) # Example: # C0 BE Backend_Bound: 62.00 % def show(self, timestamp, title, area, hdr, val, unit, desc, sample, bn, below, idle): self.print_header() self.print_timestamp(timestamp) write = self.logf.write if title: write("%-*s" % (self.titlelen, title)) self.print_line_header(area, hdr) vals = "{:<{unitlen}} {:>20} {:<{belowlen}}".format( (" " if unit and unit[0] != "%" else "") + unit, val.format_value(unit), fmt_below(below), unitlen=self.unitlen + 2, belowlen=self.belowlen) if not self.args.no_mux and val.multiplex != 100.0: vals += " " + val.format_mux() if val.stddev: vals += " +- {:>8}".format(val.format_uncertainty()) if bn: vals += bn write(vals + "\n") self.print_desc(desc, sample) def metric(self, area, name, uval, timestamp, desc, title, unit, idle): self.item(area, name, uval, timestamp, unit, desc, title, None, "", "", False) def convert_ts(ts): if isnan(ts): return "SUMMARY" return ts class OutputColumns(OutputHuman): """Human-readable output data in per-cpu columns.""" def __init__(self, logfile, args, version, cpu): OutputHuman.__init__(self, logfile, args, version, cpu) self.nodes = {} self.timestamp = None self.cpunames = set() self.printed_header = False def set_cpus(self, cpus): self.cpunames = cpus def show(self, timestamp, title, area, hdr, val, unit, desc, sample, bn, below, idle): if self.args.single_thread: OutputHuman.show(self, timestamp, title, area, hdr, val, unit, desc, sample, bn, below, idle) return self.print_header() self.timestamp = timestamp key = (area, hdr) if key not in self.nodes: self.nodes[key] = {} assert title not in self.nodes[key] self.nodes[key][title] = (val, unit, desc, sample, bn, below, idle) def flush(self): VALCOL_LEN = 16 write = self.logf.write cpunames = sorted(self.cpunames) if not self.printed_header: if self.timestamp: write("%9s" % "") self.print_line_header("", "") for j in cpunames: write("%*s " % (VALCOL_LEN, j)) write("\n") self.printed_header = True for key in sorted(sorted(self.nodes.keys(), key=lambda x: x[1]), key=lambda x: x[0] == ""): node = self.nodes[key] desc = None sample = None unit = None if self.timestamp: self.print_timestamp(self.timestamp) self.print_line_header(key[0], key[1]) vlist = [] for cpuname in cpunames: if cpuname in node: cpu = node[cpuname] uval, unit, desc, sample, bn, below, idle = cpu v = uval.format_value(unit) vlist.append(uval) write("%*s%s " % (VALCOL_LEN, v, "?" if below else "*" if bn else " ")) else: write("%*s " % (VALCOL_LEN, "")) if unit: # XXX should move this to be per entry? cval = combine_uval(vlist) vs = (" +- " + cval.format_uncertainty() + " " + cval.format_mux()) if cval.stddev else "" write(" %-*s%s" % (self.unitlen, (" " if unit[0] != "%" else "") + unit, vs)) write("\n") self.print_desc(desc, sample) self.nodes = {} def reset(self, name): Output.reset(self, name) self.printed_header = False class OutputColumnsCSV(OutputColumns): """Columns output in CSV mode.""" def __init__(self, logfile, sep, args, version, cpu): OutputColumns.__init__(self, logfile, args, version, cpu) self.writer = {} if self.logfiles: for n, f in self.logfiles.items(): self.writer[n] = csv.writer(f, delimiter=sep, lineterminator='\n') else: self.writer[''] = csv.writer(self.logf, delimiter=sep, lineterminator='\n') self.printed_header = False # XXX implement bn and idle def show(self, timestamp, title, area, hdr, val, unit, desc, sample, bn, below, idle): self.print_header() self.timestamp = timestamp key = (area, hdr) if key not in self.nodes: self.nodes[key] = {} assert title not in self.nodes[key] self.nodes[key][title] = (val, unit + " " + fmt_below(below), desc, sample) def flush(self): cpunames = sorted(self.cpunames) if not self.printed_header and not self.no_header: ts = ["Timestamp"] if self.timestamp else [] header = ts + ["Area", "Node"] + cpunames + ["Description", "Sample", "Stddev", "Multiplex"] self.writer[self.curname].writerow([x for x in header]) self.printed_header = True for key in sorted(sorted(self.nodes.keys(), key=lambda x: x[1]), key=lambda x: x[0] == ""): node = self.nodes[key] ts = [convert_ts(self.timestamp)] if self.timestamp else [] l = ts + [key[0], key[1]] vlist = [] ol = {} desc, sample = "", "" for cpuname in cpunames: if cpuname in node: cpu = node[cpuname] if cpu[2]: desc = cpu[2] desc = re.sub(r"\s+", " ", desc) if cpu[3]: sample = cpu[3] # ignore unit for now vlist.append(cpu[0]) ol[cpuname] = float(cpu[0].value) if cpu[0].value else "" else: vlist.append(UVal("",0)) l += [ol[x] if x in ol else "" for x in cpunames] l.append(desc) l.append(sample) vs = combine_uval(vlist) if vs: l += (vs.format_uncertainty().strip(), vs.format_mux().strip()) else: l += ["", ""] self.writer[self.curname].writerow(l) self.nodes = {} print_footer = Output.print_footer_all class OutputCSV(Output): """Output data in CSV format.""" def __init__(self, logfile, sep, args, version, cpu): Output.__init__(self, logfile, version, cpu, args) self.writer = {} if self.logfiles: for n, f in self.logfiles.items(): self.writer[n] = csv.writer(f, delimiter=sep, lineterminator='\n') else: self.writer[''] = csv.writer(self.logf, delimiter=sep, lineterminator='\n') self.args = args self.printed_headers = set() def print_header_csv(self, timestamp, title): if self.no_header: return if self.curname_nologf not in self.printed_headers: l = [] if timestamp: l.append("Timestamp") if title: l.append("CPUs") self.writer[self.curname].writerow(l + ['Area', 'Value', 'Unit', 'Description', 'Sample', 'Stddev', 'Multiplex', 'Bottleneck', 'Idle']) self.printed_headers.add(self.curname_nologf) def show(self, timestamp, title, area, hdr, val, unit, desc, sample, bn, below, idle): self.print_header_csv(timestamp, title) if self.args.no_desc: desc = "" desc = re.sub(r"\s+", " ", desc) l = [] if timestamp: l.append(convert_ts(timestamp)) if title: l.append("CPU" + title if re.match(r'[0-9]+', title) else title) stddev = val.format_uncertainty().strip() multiplex = val.multiplex if not isnan(val.multiplex) else "" self.writer[self.curname].writerow(l + [hdr, val.format_value_raw().strip(), (unit + " " + fmt_below(below)).strip(), desc, sample, stddev, multiplex, bn, "Y" if idle else ""]) print_footer = Output.print_footer_all class OutputJSON(Output): """Output data in chrome / trace-viewer JSON format.""" def __init__(self, logfile, sep, args, version, cpu): Output.__init__(self, logfile, version, cpu, args) self.nodes = defaultdict(dict) # type: DefaultDict[str, Dict[str, Any]] self.headers = OrderedDict() self.count = Counter() # type: typing.Counter[str] self.no_header = args.no_json_header self.no_footer = args.no_json_footer self.num = 0 def print_footer_all(self): def write_all(s): if self.logfiles: for n in self.logfiles: self.logfiles[n].write(s(n)) else: self.logf.write(s("")) if self.no_footer: if self.num > 0: write_all(lambda x: ",\n") else: def start(name): n = "" if name not in self.count: n += "[\n" return n + "\n]\n" write_all(start) print_footer = print_footer_all def show(self, timestamp, title, area, hdr, val, unit, desc, sample, bn, below, idle): self.timestamp = timestamp self.nodes[title][hdr] = val self.headers[hdr] = True self.num += 1 def flush(self): nodes = OrderedDict() # type: OrderedDict[str,Any] for hdr in self.headers: for title in sorted(self.nodes.keys()): if hdr not in self.nodes[title]: continue nd = self.nodes[title] val = nd[hdr].value if isinstance(nd[hdr], UVal) else nd[hdr] if title: title += " " if hdr in ("Frontend_Bound", "Backend_Bound", "BadSpeculation", "Retiring"): # XXX key = title + "Level1" if key not in nodes: nodes[key] = {} nodes[title + "Level1"][hdr] = val elif hdr.count(".") >= 1: dot = hdr.rindex(".") nodes[title + hdr[:dot]] = { hdr: round(val, 2) } else: # assume it's metric nodes[title + hdr] = {hdr: val} for name in nodes.keys(): if self.count[self.curname] == 0: if not self.no_header: self.logf.write("[\n") else: self.logf.write(",\n") json.dump({"name": name, "ph": "C", "pid": 0, "ts": self.timestamp / 1e6 if self.timestamp and not isnan(self.timestamp) else 0, "args": nodes[name]}, self.logf) self.count[self.curname] += 1 self.nodes = defaultdict(dict) self.headers = OrderedDict() def remark(self, m): pass
18,928
Python
.py
475
29.454737
106
0.540833
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,934
tl_uval.py
andikleen_pmu-tools/tl_uval.py
# Copyright (c) 2018 Technical University of Munich # Author: Martin Becker # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, # version 2, as published by the Free Software Foundation. # # This program is distributed in the hope it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # import math import logging import operator from tl_io import warn log = logging.getLogger(__name__) TEMPVAL = 'anon' div_op = operator.div if 'div' in operator.__dict__ else None # type: ignore def combine_uval(ulist): """ Combine multiple measurements of the same event into one measurement. Uses weighted average. """ combined = None if ulist is not None: combined = ulist[0] for oth in ulist[1:]: combined.update(oth) return combined class UVal: """ Measurement value annotated with uncertainty. Supports binary operators for error propagation. """ def __init__(self, name, value, stddev=0., samples=1, mux=100., comment="", computed=False): self.name = name self.comment = comment self.value = value self.stddev = stddev self.samples = samples self.computed = computed self.is_ratio = False self.multiplex = mux def __repr__(self): return "{} [{} +- {}]*{}".format(self.name, self.value, self.stddev, self.samples) def format_value(self, unit): if self.value is None: return "" if self.is_ratio: return "{:>16.1f} ".format(self.value * 100.) elif unit == "Count" or unit == "Clocks": return "{:16,.0f} ".format(self.value) elif self.value > 1000: return "{:16,.1f} ".format(self.value) else: return "{:16.2f}".format(self.value) def format_value_raw(self): if self.value is None: return "" if self.is_ratio: return "{:>13.1f}".format(self.value * 100.) else: return "{:13.1f}".format(self.value) def format_uncertainty(self): """string representation of measurement uncertainty""" vs = "" if self.stddev is not None: if self.is_ratio: if self.value != 0.: v = self.stddev * 100. else: v = 0. vs += "{:.1f}".format(v) else: vs += "{:6,.1f}".format(self.stddev) return vs def format_mux(self): vs = "" if self.multiplex and self.multiplex == self.multiplex: vs = "[{:4.1f}%]".format(self.multiplex) return vs @staticmethod def _merge_mux(lhs, rhs): return min(lhs.multiplex, rhs.multiplex) def update(self, other): """merge data from other event into this""" assert isinstance(other, UVal), "wrong type" # -- # calc weighted average n = self.samples + other.samples res = (1. / n) * (self.samples * self + other.samples * other) # apply 'res' to this self.samples = n self.value = res.value self.stddev = res.stddev self.multiplex = UVal._merge_mux(self, other) ###################### # operators ###################### def ensure_uval(binop): # type: ignore """decorator to ensure binary operators are both UVals""" def wrapper(self, v): # type: ignore if isinstance(v, UVal): return binop(self, v) # type: ignore elif isinstance(v, (float, int)): return binop(self, UVal(TEMPVAL, value=v, stddev=0)) # type: ignore else: return NotImplemented return wrapper @ensure_uval # type: ignore def __sub__(self, other): return UVal._calc(operator.sub, self, other) @ensure_uval # type: ignore def __add__(self, other): return UVal._calc(operator.add, self, other) @ensure_uval # type: ignore def __mul__(self, other): return UVal._calc(operator.mul, self, other) @ensure_uval # type: ignore def __div__(self, other): return UVal._calc(operator.div, self, other) # type: ignore @ensure_uval # type: ignore def __truediv__(self, other): return UVal._calc(operator.truediv, self, other) @ensure_uval # type: ignore def __lt__(self, other): return self.value < other.value @ensure_uval # type: ignore def __le__(self, other): return self.value <= other.value @ensure_uval # type: ignore def __eq__(self, other): return self.value == other.value @ensure_uval # type: ignore def __ne__(self, other): return not self.__eq__(other) @ensure_uval # type: ignore def __ge__(self, other): return self.value >= other.value @ensure_uval # type: ignore def __gt__(self, other): return self.value > other.value @ensure_uval # type: ignore def __rsub__(self, other): """other - self""" return UVal._calc(operator.sub, other, self) @ensure_uval # type: ignore def __radd__(self, other): """other + self""" return UVal._calc(operator.add, other, self) @ensure_uval # type: ignore def __rmul__(self, other): """other * self""" return UVal._calc(operator.mul, other, self) @ensure_uval # type: ignore def __rdiv__(self, other): """other / self""" return UVal._calc(operator.div, other, self) # type: ignore @ensure_uval # type: ignore def __rtruediv__(self, other): """other / self""" return UVal._calc(operator.truediv, other, self) def __nonzero__(self): # python 2 return self.value != 0.0 def __bool__(self): # python 3 return self.value != 0.0 ######################### # uncertainty propagator ######################### @staticmethod def _calc(op, lhs, rhs, cov=0.): """Compute the result of 'lhs [op] rhs' and propagate standard deviations""" A = lhs.value B = rhs.value a = lhs.stddev b = rhs.stddev # new value f = op(float(A), B) if isinstance(f, float) and f.is_integer(): f = int(f) # uncertainty if op in (operator.mul, operator.truediv, div_op): sgn = 1 if op == operator.mul else -1 if A != 0 and B != 0: u = abs(f) * math.sqrt(pow(float(a)/A, 2) + pow(float(b)/B, 2) + sgn*2.*cov/(A*B)) elif op == operator.mul: u = 0. elif op == div_op or op == operator.truediv: u = 0. if A != 0: warn("Error prop failed because of DIV/0: {} {} {}".format(lhs, op, rhs)) elif op in (operator.add, operator.sub): sgn = 1 if op == operator.add else -1 u = math.sqrt(pow(a, 2) + pow(b, 2) + sgn*2.*cov) else: u = None log.error("Unsupported operation for uncertainty propagator in {} {} {}".format(lhs, op, rhs)) # -- ret = UVal(TEMPVAL, value=f, stddev=u, mux=UVal._merge_mux(lhs, rhs), computed=True) log.debug("{} {} {} => {}", lhs, op, rhs, ret) return ret
7,505
Python
.py
201
29.129353
106
0.566396
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,935
list-events.py
andikleen_pmu-tools/list-events.py
#!/usr/bin/env python3 # print all events in a eventmap from __future__ import print_function import sys import ocperf emap = ocperf.find_emap() if not emap: sys.exit("Unknown CPU or cannot find event table") for j in sorted(emap.events): print(j)
256
Python
.py
10
23.8
54
0.752033
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,936
linux_metrics.py
andikleen_pmu-tools/linux_metrics.py
# # basic linux software metrics # # for most of these it would be much nicer to measure durations, but # perf stat doesn't support that # from __future__ import print_function import os, sys class CS: name = "Context switches" desc = " Number of context switches between threads or processes on a CPU." nogroup = True subplot = "Scheduler" def compute(self, EV): self.val = EV("cs", 1) self.thresh = self.val > 0 class MinorFaults: name = "Minor faults" desc = " Page faults not leading to disk IO, such as allocation of memory." nogroup = True subplot = "Faults" def compute(self, EV): self.val = EV("minor-faults", 1) self.thresh = self.val > 0 class MajorFaults: name = "Major faults" desc = """ Page faults leading to disk IO, such as loading executable text or do mmap'ed IO.""" nogroup = True subplot = "Faults" def compute(self, EV): self.val = EV("major-faults", 1) self.thresh = self.val > 0 class Migrations: name = "Migrations" desc = " Number of thread/process migrations to another CPU." nogroup = True subplot = "Scheduler" def compute(self, EV): self.val = EV("migrations", 1) self.thresh = self.val > 0 # The events below require trace points, so typically root. class BadEV(Exception): pass def C(ev): if not os.path.exists("/sys/kernel/debug/tracing/events" + ev.replace(":", "/")): raise BadEV() return ev class Syscalls: name = "Syscalls" desc = " Number of syscalls, not including vsyscalls such as gettimeofday." nogroup = True subplot = "OS metrics" def compute(self, EV): try: self.val = EV(C("raw_syscalls:sys_enter"), 1) except BadEV: self.val = 0 self.thresh = self.val > 0 class Interrupts: name = "Interrupts" desc = """ Number of interrupts, including NMIs, excluding exceptions. These are interrupts caused by hardware, typically to indicate IO. This includes performance counter sampling interrupts.""" nogroup = True subplot = "Interrupts" # can overcount with shared vectors def compute(self, EV): try: self.val = EV(C("irq:irq_handler_entry"), 1) + EV(C("nmi:nmi_handler"), 1) except BadEV: self.val = 0 self.thresh = self.val > 0 # XXX on older kernels will not count TLB flushes, when they still had an # own vector class IPIs: name = "IPIs" desc = """ Number of inter-processor-interrupts (IPIs). These are caused by software, for example to flush TLBs, finish IOs on the originating CPU, flush per CPU software caches (such as slab allocator caches) or force reschedules.""" nogroup = True subplot = "Interrupts" # can overcount with shared vectors def compute(self, EV): try: self.val = (EV(C("irq_vectors:call_function_entry"), 1) + EV(C("irq_vectors:call_function_single_entry"), 1) + EV(C("irq_vectors:reschedule_entry"), 1)) except BadEV: self.val = 0 self.thresh = self.val > 0 class Workqueues: name = "Workqueues" desc = " Work queue item executions. These are tasks executed by the kernel in the background." nogroup = True subplot = "Interrupts" def compute(self, EV): try: self.val = EV(C("workqueue:workqueue_execute_start"), 1) except BadEV: self.val = 0 self.thresh = self.val > 0 class BlockIOs: name = "BlockIOs" desc = " Block IOs issued. This counts the number of block IO requests inserted into a queue." nogroup = True subplot = "IO" def compute(self, EV): try: self.val = EV(C("block:block_rq_insert"), 1) except BadEV: self.val = 0 self.thresh = self.val > 0 class NetworkTX: name = "NetworkTX" desc = " Network packets send to a network device. Aggregated (TSO/GRO) packets are counted as single packets." nogroup = True subplot = "IO" def compute(self, EV): try: self.val = EV(C("net:net_dev_start_xmit"), 1) except BadEV: self.val = 0 self.thresh = self.val > 0 class NetworkRX: name = "NetworkRX" desc = " Network packets received from a network device. Aggregated (GRO) packets are counted as single packets." nogroup = True subplot = "IO" def compute(self, EV): try: self.val = (EV(C("net:netif_rx"), 1) + EV(C("net:netif_receive_skb"), 1) + EV(C("net:napi_gro_receive_entry"), 1) + EV(C("net:napi_gro_frags_entry"), 1)) except BadEV: self.val = 0 self.thresh = self.val > 0 # trace events # sched stats? (non perf) # yield percentage # % sched to idle # ttw local vs remote # wait for cpu time # sched wakeup # sched iowait # filemap add to page cache # compaction begin # page_alloc # start reclaim # XXX large page alloc # intel_gpu_freq_change # XXX gpu # kvm:kvm_exit warned = False class Setup: def __init__(self, r): r.force_metric(CS()) r.force_metric(MinorFaults()) r.force_metric(MajorFaults()) r.force_metric(Migrations()) global warned if os.path.exists("/sys/kernel/debug/tracing/events"): r.force_metric(Syscalls()) r.force_metric(Interrupts()) r.force_metric(IPIs()) r.force_metric(Workqueues()) r.force_metric(BlockIOs()) r.force_metric(NetworkTX()) r.force_metric(NetworkRX()) elif sys.argv[0].find("toplev") >= 0 and "--import" not in sys.argv and not warned: warned = True print("Need to be root for trace point Linux software metrics.", file=sys.stderr)
5,903
Python
.py
174
27.448276
117
0.622724
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,937
knl_ratios.py
andikleen_pmu-tools/knl_ratios.py
import metrics import node import slm_ratios as slm version = "1.0" slm.set_clks_event_name("CPU_CLK_UNHALTED.THREAD") smt_enabled = False class CyclesPerUop(slm.CyclesPerUop): pass # LEVEL 1 class FrontendBound(slm.FrontendBound): pass class BackendBound(slm.BackendBound): pass class BadSpeculation(slm.BadSpeculation): pass class Retiring(slm.Retiring): pass # LEVEL 2 class FrontendLatency(slm.FrontendLatency): pass # LEVEL 3 class ICacheMisses(slm.ICacheMisses): # Override _compute(), since KNL does not have # the DECODE_RESTRICTION.PDCACHE_WRONG event def _compute(self, ev): return slm.icache_line_fetch_cost(ev, self.level) class ITLBMisses(slm.ITLBMisses): pass class MSSwitches(slm.MSSwitches): pass class Setup(object): def __init__(self, runner): # Instantiate nodes as required to be able to specify their # references # L3 objects icache_misses = ICacheMisses() itlb_misses = ITLBMisses() ms_cost = MSSwitches() #L1 objects frontend = FrontendBound() bad_speculation = BadSpeculation() retiring = Retiring() backend = BackendBound(retiring=retiring, bad_speculation=bad_speculation, frontend=frontend) # L2 objects frontend_latency = FrontendLatency(icache_misses=icache_misses, itlb=itlb_misses, ms_cost=ms_cost, frontend=frontend ) # Set parents node.set_parent(None, [frontend, bad_speculation, retiring, backend]) node.set_parent(frontend, [frontend_latency]) node.set_parent(frontend_latency, [icache_misses, itlb_misses, ms_cost]) # User visible metrics user_metrics = [slm.Metric_IPC(), slm.Metric_CPI(), slm.Metric_TurboUtilization(), slm.Metric_CLKS(), slm.Metric_Time(), slm.CyclesPerUop()] nodes = [obj for obj in locals().values() if issubclass(obj.__class__, metrics.MetricBase) and obj.level > 0] nodes = sorted(nodes, key=lambda n: n.level) # Pass to runner list(map(runner.run, nodes)) list(map(runner.metric, user_metrics))
2,492
Python
.py
68
26.323529
77
0.595754
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,938
fake-perf.py
andikleen_pmu-tools/fake-perf.py
#!/usr/bin/env python3 # mock perf for limited test environments from __future__ import print_function import sys out = sys.stderr av = sys.argv if av[-1] == "true": sys.exit(0) j = 1 process = True while j < len(sys.argv): if av[j] == "--version": print("perf version 5.6.8", end='') break elif av[j] == "-o" and process: j += 1 out = open(av[j], "w") elif av[j] == "--": process = False j += 1 out.write("\n")
473
Python
.py
21
18.619048
43
0.56541
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,939
pfeat.py
andikleen_pmu-tools/parser/pfeat.py
# print perf headers def print_feat(feat): print("# Measured on %s (%s)" % ( feat.hostname.hostname, feat.osrelease.osrelease)) print("# %s, %s" % ( feat.cpudesc.cpudesc, feat.cpuid.cpuid)) print("# %s" % (" ".join(map(lambda x: x.cmdline, feat.cmdline.cmdline))))
326
Python
.py
9
28.444444
78
0.556962
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,940
elf.py
andikleen_pmu-tools/parser/elf.py
#!/usr/bin/env python # resolve ELF and DWARF symbol tables using elftools # # Copyright (c) 2013-2014, Intel Corporation # Author: Andi Kleen # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, # version 2, as published by the Free Software Foundation. # # This program is distributed in the hope it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. from __future__ import print_function from elftools.common.py3compat import bytes2str from elftools.elf.elffile import ELFFile from elftools.elf.sections import SymbolTableSection import elftools.common.exceptions import util import kernel # global caches open_files = dict() resolved = dict() symtables = dict() lines = dict() def build_line_table(dwarfinfo): lines = [] for CU in dwarfinfo.iter_CUs(): lp = dwarfinfo.line_program_for_CU(CU) prevstate = None for entry in lp.get_entries(): if entry.state is None or entry.state.end_sequence: continue if prevstate: lines.append((prevstate.address, entry.state.address, lp['file_entry'][prevstate.file - 1].name, prevstate.line)) prevstate = entry.state lines.sort() return lines def build_symtab(elffile): syms = [] for section in elffile.iter_sections(): if isinstance(section, SymbolTableSection): for nsym, sym in enumerate(section.iter_symbols()): name = bytes2str(sym.name) if not name: continue if sym.entry.st_info.type != 'STT_FUNC': continue end = sym['st_value'] + sym['st_size'] syms.append((sym['st_value'], end, bytes2str(sym.name))) syms.sort() return syms reported = set() def find_elf_file(fn): if fn.startswith("//"): return None if fn in open_files: elffile = open_files[fn] else: try: f = open(fn, 'rb') elffile = ELFFile(f) open_files[fn] = elffile except (IOError, elftools.common.exceptions.ELFError): if fn not in reported: print("Cannot open", fn) reported.add(fn) return None return elffile def resolve_line(fn, ip): elffile = find_elf_file(fn) if elffile is None: return "?" if fn not in lines and elffile.has_dwarf_info(): lines[fn] = build_line_table(elffile.get_dwarf_info()) src = None if resolve_line and fn in lines: pos = util.find_le(lines[fn], ip) if pos: src = "%s:%d" % (pos[2], pos[3]) return src # global one hit cache # helps a lot for LBR decoding # tbd use a small list with LRU? last_sym = None def resolve_sym(fn, ip): elffile = find_elf_file(fn) if elffile is None: return "?", 0 global last_sym try: if fn not in symtables: symtables[fn] = build_symtab(elffile) if last_sym and last_sym[0] <= ip <= last_sym[1]: return last_sym[2], ip - last_sym[0] loc = None offset = None if fn in symtables: sym = util.find_le(symtables[fn], ip) if sym: loc, offset = sym[2], ip - sym[0] except elftools.common.exceptions.ELFError: return "?", 0 return loc, offset def resolve_ip(filename, foffset, ip, need_line): sym, soffset, line = None, 0, None if filename and filename.startswith("/"): sym, soffset = resolve_sym(filename, foffset) if not sym: sym, soffset = resolve_sym(filename, ip) if need_line: line = resolve_line(filename, ip) else: sym, soffset = kernel.resolve_kernel(ip) return sym, soffset, line if __name__ == '__main__': import sys print(resolve_ip(sys.argv[1], int(sys.argv[2], 16))) print(resolve_line(sys.argv[1], int(sys.argv[2], 16)))
4,257
Python
.py
124
26.629032
75
0.607967
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,941
util.py
andikleen_pmu-tools/parser/util.py
# utility functions import bisect def find_le(f, key): pos = bisect.bisect_left(f, (key,)) if pos < len(f) and f[pos][0] == key: return f[pos] if pos == 0: return None return f[pos - 1]
220
Python
.py
9
19.666667
41
0.578947
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,942
perfpd.py
andikleen_pmu-tools/parser/perfpd.py
#!/usr/bin/env python # Import perf.data into a pandas DataFrame # # Copyright (c) 2013-2014, Intel Corporation # Author: Andi Kleen # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, # version 2, as published by the Free Software Foundation. # # This program is distributed in the hope it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. from __future__ import print_function import pandas as pd import numpy as np import perfdata from collections import defaultdict, Counter import elf import mmap ignored = {'type', 'start', 'end', '__recursion_lock__', 'ext_reserved', 'header_end', 'end_event', 'offset', 'callchain', 'branch', 'branch_stack', 'end_id', 'size', 'cpumode', 'caller', 'time', # skip attr for now, as it is too complex # XXX simple representation 'attr'} bool_fields = {'kernel', 'hv', 'guest'} def resolve_list(j, ip, mm, need_line): filename, _, foffset = mm.resolve(j.pid, ip) sym, soffset, line = elf.resolve_ip(filename, foffset, ip, need_line) return [filename, sym, soffset, line] def resolve_chain(cc, j, mm, need_line): if not cc: return [] res = [] for ip in cc.caller: r = [ip,] r += resolve_list(j, ip, mm, need_line) res.append(r) return res def resolve_branch(branch, j, mm, need_line): res = [] for br in branch: # XXX flags r = [br['from'], br['to']] r += resolve_list(j, br['from'], mm, need_line) r += resolve_list(j, br['to'], mm, need_line) res.append(r) return res class Path: """Store either a callchain or a branch stack as a list with id.""" def __init__(self, val, id): self.val = val self.id = id class Aux: """Store auxilliary data to the main pandas perf array, like call chains or branch stacks. The data is deduped and a unique id generated.""" def __init__(self): self.ids = dict() self.paths = dict() self.next_id = 0 def alloc_id(self): id = self.next_id self.next_id += 1 return id def add(self, h, create): h = tuple(h) if h in self.paths: return self.paths[h].id id = self.alloc_id() path = Path(create(), id) self.paths[h] = path self.ids[id] = path return id def getid(self, id): return self.ids[id] def __getitem__(self, id): return self.ids[id] cpumodes = { 'UNKNOWN': (0, 0, 0), 'KERNEL': (1, 0, 0), 'USER': (0, 0, 0), 'HYPERVISOR': (0, 1, 0), 'GUEST_KERNEL': (1, 0, 1), 'GUEST_USER': (0, 0, 1), } def samples_to_df(h, need_line): """Convert a parsed perf event list to a pandas table. The pandas table contains all events in a easy to process format. The pandas table has callchain_aux and branch_aux fields pointing to Aux object defining the callchains/branches.""" ev = perfdata.get_events(h) index = [] data = defaultdict(list) callchains = Aux() branches = Aux() used = Counter() mm = mmap.MmapTracker() numsample = 0 for n in range(0, len(ev)): mm.lookahead_mmap(ev, n) j = ev[n] print(j) if j.type != "SAMPLE": continue numsample += 1 mm.update_sample(j) def add(k, i): data[k].append(i) used[k] += 1 filename, mmap_base, foffset = mm.resolve(j.pid, j.ip) if filename == "[kernel.kallsyms]_text": filename = None add('filename', filename) sym, soffset, line = elf.resolve_ip(filename, foffset, j.ip, need_line) add('symbol', sym) add('line', line) add('soffset', soffset) if 'callchain' in j and j.callchain: id = callchains.add(j.callchain.caller, lambda: resolve_chain(j.callchain, j, mm, need_line)) add('callchain', id) if 'branch_stack' in j and j.branch_stack: branch = j.branch_stack.branch id = branches.add(map(lambda x: (x['from'], x.to), branch), lambda: resolve_branch(branch, j, mm, need_line)) add('branch', id) kernel, guest, hv = cpumodes[j['cpumode']] add('kernel', kernel) add('guest', guest) add('hv', hv) for name in j: if name not in ignored: if j[name]: used[name] += 1 data[name].append(j[name]) index.append(int(j["time"])) for j in data.keys(): if used[j] == 0: del data[j] df = pd.DataFrame(data, index=index, dtype=np.uint64) if numsample > 0: for i in bool_fields: df[i] = df[i].astype('bool') df.branch_aux = branches df.callchain_aux = callchains return df def read_samples(fn, need_line=True): with open(fn, "rb") as f: h = perfdata.perf_file.parse_stream(f) df = samples_to_df(h, need_line) return df, h.attrs.perf_file_attr.f_attr, h.features if __name__ == '__main__': import argparse import sys args = argparse.ArgumentParser() args.add_argument('file', nargs='?', help='perf.data file to read', default='perf.data') args.add_argument('--repl', action='store_true', help='start python shell with data') args.add_argument('--ipython', action='store_true', help='start ipython shell with data') p = args.parse_args() df, _, _ = read_samples(p.file) if p.repl: import code print(df) code.interact(banner='perf.data is in df', local=locals()) sys.exit(0) if p.ipython: try: from IPython.terminal.embed import InteractiveShellEmbed except NameError: sys.exit("Ipython not installed") print(df) ipshell = InteractiveShellEmbed(banner1="perf.data is in df") ipshell() sys.exit(0) print(df) if 'filename' in df: print(df['filename'].value_counts()) if 'symbol' in df: print(df['symbol'].value_counts()) if 'line' in df: print(df['line'].value_counts())
6,496
Python
.py
186
27.408602
79
0.584726
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,943
kernel.py
andikleen_pmu-tools/parser/kernel.py
#!/usr/bin/env python # resolve kernel symbols through kallsyms (when no vmlinux is available) # # Copyright (c) 2014, Intel Corporation # Author: Andi Kleen # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, # version 2, as published by the Free Software Foundation. # # This program is distributed in the hope it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. import util kernel = [] def parse_kernel(): with open("/proc/kallsyms", 'r') as f: for l in f: n = l.split() addr = int(n[0], 16) kernel.append((addr, n[2])) def resolve_kernel(ip): if not kernel: parse_kernel() n = util.find_le(kernel, ip) if n: return n[1], ip - n[0] return None
966
Python
.py
29
29.413793
75
0.691318
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,944
perfdata.py
andikleen_pmu-tools/parser/perfdata.py
#!/usr/bin/env python # A description of the perf.data file format in "construct" # # Copyright (c) 2011-2013, Intel Corporation # Author: Andi Kleen # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, # version 2, as published by the Free Software Foundation. # # This program is distributed in the hope it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # Only works on Little-Endian with LE input files. Sorry. # # TBD: # Generic Bitfield adapter that handles endian properly? # check size in all cases (or use optional+tunnel) # tracing support # processor trace (aux) data and extra info headers # sample_id is not handled correctly in all cases # from __future__ import print_function from construct import (If, Embedded, Struct, SNInt32, UNInt64, Flag, BitStruct, Padding, Enum, Array, Bytes, Anchor, UNInt32, GreedyRange, CString, PrefixedArray, TunnelAdapter, Magic, Pointer, BitField, UNInt16, HexDumpAdapter, String, Pass, Value, Switch) def sample_type(ctx): return ctx.attr.perf_event_attr.sample_type def sample_id_size(ctx): st = sample_type(ctx) return (st.tid + st.time + st.id + st.stream_id + st.cpu + st.identifier) * 8 def sample_id(): return If(lambda ctx: True, # xxx check size Embedded(Struct("id_all", If(lambda ctx: sample_type(ctx).tid, Embedded(Struct("pid2", SNInt32("pid2"), SNInt32("tid2")))), If(lambda ctx: sample_type(ctx).time, UNInt64("time2")), If(lambda ctx: sample_type(ctx).id, UNInt64("id2")), If(lambda ctx: sample_type(ctx).stream_id, UNInt64("stream_id2")), If(lambda ctx: sample_type(ctx).cpu, Embedded(Struct("cpu", UNInt32("cpu2"), UNInt32("res")))), If(lambda ctx: sample_type(ctx).identifier, UNInt64("identifier2"))))) def fork_exit(name): return Struct(name, SNInt32("pid"), SNInt32("ppid"), SNInt32("tid"), SNInt32("ptid"), UNInt64("time"), sample_id()) def thread_map(): return Struct("thread_map", UNInt64("nr"), Array(lambda ctx: ctx.nr, Struct("thread_map_entry", UNInt64("pid"), String("comm", 16)))) def ignore(): return Bytes("data", lambda ctx: ctx.size - 8) def throttle(name): return Struct(name, UNInt64("time"), UNInt64("id"), UNInt64("stream_id"), sample_id()) def hweight64(ctx): return bin(ctx._.attr.perf_event_attr.sample_regs_user).count("1") def event(): return Embedded( Struct("event", If(lambda ctx: sample_type(ctx).identifier, UNInt64("identifier")), If(lambda ctx: sample_type(ctx).ip, UNInt64("ip")), If(lambda ctx: sample_type(ctx).tid, Embedded(Struct("tid", SNInt32("pid"), SNInt32("tid")))), If(lambda ctx: sample_type(ctx).time, UNInt64("time")), If(lambda ctx: sample_type(ctx).addr, UNInt64("addr")), If(lambda ctx: sample_type(ctx).id, UNInt64("id")), If(lambda ctx: sample_type(ctx).stream_id, UNInt64("stream_id")), If(lambda ctx: sample_type(ctx).cpu, Embedded(Struct("cpu", UNInt32("cpu"), UNInt32("res")))), If(lambda ctx: sample_type(ctx).period, UNInt64("period")), If(lambda ctx: sample_type(ctx).read, read_format()), If(lambda ctx: sample_type(ctx).callchain, Struct("callchain", UNInt64("nr"), Array(lambda ctx: ctx.nr, UNInt64("caller")))), If(lambda ctx: sample_type(ctx).raw, Struct("raw", UNInt32("size"), Bytes("raw", lambda ctx: ctx.size))), If(lambda ctx: sample_type(ctx).branch_stack, Struct("branch_stack", UNInt64("nr"), Array(lambda ctx: ctx.nr, Struct("branch", UNInt64("from"), UNInt64("to"), # Little-Endian! BitStruct("flags", Padding(4), Flag("abort"), Flag("in_tx"), Flag("predicted"), Flag("mispred"), Padding(64 - 1 * 8)))))), If(lambda ctx: sample_type(ctx).regs_user, Struct("regs_user", Enum(UNInt64("abi"), NONE = 0, ABI_32 = 1, ABI_64 = 2), Array(lambda ctx: hweight64(ctx), UNInt64("reg")))), If(lambda ctx: sample_type(ctx).stack_user, Struct("stack_user", UNInt64("size"), Bytes("data", lambda ctx: ctx.size), UNInt64("dyn_size"))), If(lambda ctx: sample_type(ctx).weight, UNInt64("weight")), If(lambda ctx: sample_type(ctx).data_src, UNInt64("data_src")), If(lambda ctx: sample_type(ctx).transaction, UNInt64("transaction")), If(lambda ctx: sample_type(ctx).regs_intr, Struct("regs_intr", Enum(UNInt64("abi"), NONE = 0, ABI_32 = 1, ABI_64 = 2), Array(lambda ctx: hweight64(ctx), UNInt64("reg")))), Anchor("end_event"), Padding(lambda ctx: max(0, ctx.size - ctx.end_event)))) def get_attr_list(ctx): return ctx._._.attrs.perf_file_attr.f_attr # assume that sample_id_all is the same in all events # we cannot look up the event without this. def has_sample_id_all(ctx): attr = get_attr_list(ctx)[0] if 'sample_id_all' in attr: return attr.sample_id_all return False # when sample_id_all is not supported, we may # not look up the right one (perf.data limitation) def lookup_event_attr(ctx): if "end_id" in ctx and ctx.end_id: idx = ctx.end_id elif 'id' in ctx and ctx['id']: idx = ctx['id'] else: idx = 0 return get_attr_list(ctx)[idx] # XXX need to make OnDemand for large files def perf_event_header(): return Embedded(Struct(None, Enum(UNInt32("type"), MMAP = 1, LOST = 2, COMM = 3, EXIT = 4, THROTTLE = 5, UNTHROTTLE = 6, FORK = 7, READ = 8, SAMPLE = 9, MMAP2 = 10, RECORD_AUX = 11, ITRACE_START = 12, LOST_SAMPLES = 13, SWITCH = 14, SWITCH_CPU_WIDE = 15, NAMESPACES = 16, KSYMBOL = 17, BPF_EVENT = 18, CGROUP = 19, HEADER_ATTR = 64, HEADER_EVENT_TYPE = 65, TRACING_DATA = 66, HEADER_BUILD_ID = 67, FINISHED_ROUND = 68, ID_INDEX = 69, AUXTRACE_INFO = 70, AUXTRACE = 71, AUXTRACE_ERROR = 72, THREAD_MAP = 73, CPU_MAP = 74, STAT_CONFIG = 75, STAT = 76, STAT_ROUND = 77, EVENT_UPDATE = 78, TIME_CONV = 79, HEADER_FEATURE = 80, COMPRESSED = 81), Embedded(BitStruct(None, Padding(1), Enum(BitField("cpumode", 7), UNKNOWN = 0, KERNEL = 1, USER = 2, HYPERVISOR = 3, GUEST_KERNEL = 4, GUEST_USER = 5), Flag("ext_reserved"), Flag("exact_ip"), Flag("mmap_data"), Padding(5))), UNInt16("size"), If(has_sample_id_all, Pointer(lambda ctx: ctx.start + ctx.size - 8, UNInt64("end_id"))), Value("attr", lookup_event_attr))) def mmap(): return Struct("mmap", SNInt32("pid"), SNInt32("tid"), UNInt64("addr"), UNInt64("len"), UNInt64("pgoff"), Anchor("start_of_filename"), CString("filename"), Anchor("end_of_filename"), # hack for now. this shouldn't be needed. If(lambda ctx: True, # XXX Embedded(Pointer(lambda ctx: ctx.size + ctx.start - sample_id_size(ctx), sample_id())))) def mmap2(): return Struct("mmap2", SNInt32("pid"), SNInt32("tid"), UNInt64("addr"), UNInt64("len"), UNInt64("pgoff"), UNInt32("maj"), UNInt32("min"), UNInt64("ino"), UNInt64("ino_generation"), UNInt32("prot"), UNInt32("flags"), CString("filename"), sample_id()) def read_flags(ctx): return ctx._.attr.read_format def enabled_running(): return Struct("enabled_running", If(lambda ctx: read_flags(ctx).total_time_enabled, UNInt64("total_time_enabled")), If(lambda ctx: read_flags(ctx).total_time_running, UNInt64("total_time_running"))) def read_format(): return Struct("read", If(lambda ctx: read_flags(ctx).group, Struct("group", UNInt64("nr"), Embedded(enabled_running()), Array(lambda ctx: ctx.nr, Struct("val", UNInt64("value"), If(lambda ctx: read_flags(ctx).id, UNInt64("id2")))))), If(lambda ctx: not read_flags(ctx).group, Struct("single", UNInt64("value"), Embedded(enabled_running()), If(lambda ctx: read_flags(ctx).id, UNInt64("id2"))))) def time_conv(): return Struct("time_conv", UNInt64("time_shift"), UNInt64("time_mult"), UNInt64("time_zero")) def perf_event(): return Struct("perf_event", Anchor("start"), perf_event_header(), Anchor("header_end"), Switch("data", lambda ctx: ctx.type, { "MMAP": mmap(), # noqa E121 "MMAP2": mmap2(), "LOST": Struct("lost", UNInt64("id"), UNInt64("lost"), sample_id()), "COMM": Struct("comm", SNInt32("pid"), SNInt32("tid"), CString("comm"), sample_id()), "EXIT": fork_exit("exit"), "THROTTLE": throttle("throttle"), "UNTHROTTLE": throttle("unthrottle"), "FINISHED_ROUND": Pass, "FORK": fork_exit("fork"), "READ": Embedded(Struct("read_event", SNInt32("pid"), SNInt32("tid"), read_format(), sample_id())), "SAMPLE": event(), "TIME_CONV": time_conv(), "THREAD_MAP": thread_map(), # below are the so far not handled ones. Dump their # raw data only "RECORD_AUX": ignore(), "AUX": ignore(), "ITRACE_START": ignore(), "LOST_SAMPLES": ignore(), "SWITCH": ignore(), "SWITCH_CPU_WIDE": ignore(), "NAMESPACES": ignore(), "KSYMBOL": ignore(), "BPF_EVENT": ignore(), "CGROUP": ignore(), "HEADER_ATTR": ignore(), "HEADER_EVENT_TYPE": ignore(), "TRACING_DATA": ignore(), "HEADER_BUILD_ID": ignore(), "ID_INDEX": ignore(), "AUXTRACE_INFO": ignore(), "AUXTRACE": ignore(), "AUXTRACE_ERROR": ignore(), "CPU_MAP": ignore(), "STAT": ignore(), "STAT_ROUND": ignore(), "EVENT_UPDATE": ignore(), "HEADER_FEATURE": ignore(), "COMPRESSED": ignore(), }), Anchor("end"), Padding(lambda ctx: ctx.size - (ctx.end - ctx.start))) def perf_event_seq(attr): return GreedyRange(perf_event(attr)) perf_event_attr_sizes = (64, 72, 80, 96, 104) perf_event_attr = Struct("perf_event_attr", Anchor("start"), Enum(UNInt32("type"), HARDWARE = 0, SOFTWARE = 1, TRACEPOINT = 2, HW_CACHE = 3, RAW = 4, BREAKPOINT = 5), UNInt32("size"), UNInt64("config"), UNInt64("sample_period_freq"), # must be in LE order, original is a u64 # each byte is reversed BitStruct("sample_type", Flag("cpu"), Flag("id"), Flag("callchain"), Flag("read"), Flag("addr"), Flag("time"), Flag("tid"), Flag("ip"), Flag("data_src"), Flag("weight"), Flag("stack_user"), Flag("regs_user"), Flag("branch_stack"), Flag("raw"), Flag("stream_id"), Flag("period"), Padding(5), Flag("regs_intr"), Flag("transaction"), Flag("identifier"), Padding(64 - 3 * 8)), BitStruct("read_format", Padding(4), Flag("group"), Flag("id"), Flag("total_time_running"), Flag("total_time_enabled"), Padding(64 - 1*8)), Embedded(BitStruct(None, Flag("disabled"), Flag("inherit"), Flag("pinned"), Flag("exclusive"), Flag("exclude_user"), Flag("exclude_kernel"), Flag("exclude_hv"), Flag("exclude_idle"), Flag("mmap"), Flag("comm"), Flag("freq"), Flag("inherit_stat"), Flag("enable_on_exec"), Flag("task"), Flag("watermark"), BitField("precise_ip", 2), Flag("mmap_data"), Flag("sample_id_all"), Flag("exclude_host"), Flag("exclude_guest"), Flag("exclude_callchain_kernel"), Flag("exclude_callchain_user"), Padding(41))), UNInt32("wakeup_events"), UNInt32("bp_type"), UNInt64("config1"), If(lambda ctx: ctx.size >= perf_event_attr_sizes[1], UNInt64("config2")), If(lambda ctx: ctx.size >= perf_event_attr_sizes[2], UNInt64("branch_sample_type")), If(lambda ctx: ctx.size >= perf_event_attr_sizes[3], Embedded(Struct(None, UNInt64("sample_regs_user"), UNInt32("sample_stack_user"), UNInt32("__reserved_2")))), If(lambda ctx: ctx.size >= perf_event_attr_sizes[4], UNInt64("sample_regs_intr")), Anchor("end"), Value("perf_event_attr_size", lambda ctx: ctx.end - ctx.start), Padding(lambda ctx: ctx.size - ctx.perf_event_attr_size)) def pad(l = "len"): return Padding(lambda ctx: ctx[l] - (ctx.offset - ctx.start)) def str_with_len(name): return Struct(name, UNInt32("len"), Anchor("start"), CString(name), Anchor("offset"), pad()) def feature_string(name): return If(lambda ctx: ctx._[name], perf_file_section(name, Embedded(Struct(name, UNInt32("len"), CString(name))))) def string_list(name, extra = Pass): return PrefixedArray(Struct(name, UNInt32("len"), Anchor("start"), CString(name), Anchor("offset"), pad(), extra), UNInt32("nr")) def numa_topology(): return PrefixedArray(Struct("node", UNInt32("nodenr"), UNInt64("mem_total"), UNInt64("mem_free"), str_with_len("cpus")), UNInt32("nr")) def group_desc(): return string_list("group_desc", Embedded(Struct(None, UNInt32("leader_idx"), UNInt32("nr_members")))) def build_id(): return Struct("build_id", Anchor("start"), UNInt32("type"), UNInt16("misc"), UNInt16("size"), SNInt32("pid"), HexDumpAdapter(String("build_id", 24)), CString("filename"), Anchor("offset"), pad("size")) def section_adapter(name, target): return perf_file_section(name, TunnelAdapter(String("data", lambda ctx: ctx.size), target)) def pmu_mappings(): return PrefixedArray(Struct("pmu", UNInt32("type"), str_with_len("name")), UNInt32("nr")) def event_desc(): return Struct("event_desc", UNInt32("nr"), UNInt32("attr_size"), Array(lambda ctx: ctx.nr, Struct("desc", perf_event_attr, UNInt32("nr_ids"), str_with_len("event"), Array(lambda ctx: ctx.nr_ids, UNInt64("id"))))) def perf_features(): return Struct("features", # XXX If(lambda ctx: ctx._.tracing_data, perf_file_section("tracing_data", Pass)), If(lambda ctx: ctx._.build_id, section_adapter("build_id", GreedyRange(build_id()))), feature_string("hostname"), feature_string("osrelease"), feature_string("version"), feature_string("arch"), If(lambda ctx: ctx._.nrcpus, perf_file_section("nrcpus", Embedded(Struct("nrcpus", UNInt32("nr_cpus_online"), UNInt32("nr_cpus_avail"))))), feature_string("cpudesc"), feature_string("cpuid"), If(lambda ctx: ctx._.total_mem, perf_file_section("total_mem", UNInt64("total_mem"))), If(lambda ctx: ctx._.cmdline, perf_file_section("cmdline", string_list("cmdline"))), If(lambda ctx: ctx._.event_desc, perf_file_section("event_desc", event_desc())), If(lambda ctx: ctx._.cpu_topology, perf_file_section("cpu_topology", Struct("cpu_topology", string_list("cores"), string_list("threads")))), If(lambda ctx: ctx._.numa_topology, perf_file_section("numa_topology", numa_topology())), # not implemented in perf If(lambda ctx: ctx._.branch_stack, perf_file_section("branch_stack", Pass)), If(lambda ctx: ctx._.pmu_mappings, perf_file_section("pmu_mappings", pmu_mappings())), If(lambda ctx: ctx._.group_desc, perf_file_section("group_desc", group_desc()))) def perf_file_section(name, target): return Struct(name, UNInt64("offset"), UNInt64("size"), Pointer(lambda ctx: ctx.offset, target)) id_array = Array(lambda ctx: ctx.size / 8, UNInt64("id")) def num_attr(ctx): return ctx._.size / ctx._._.attr_size perf_file_attr = Struct("perf_file_attr", Array(lambda ctx: num_attr(ctx), Struct("f_attr", perf_event_attr, perf_file_section("ids", id_array)))) perf_event_types = Struct("perf_file_attr", Anchor("here"), Padding(lambda ctx: ctx._.size)) perf_data = TunnelAdapter(Bytes("perf_data", lambda ctx: ctx.size), GreedyRange(perf_event())) #OnDemand(Bytes("perf_data", lambda ctx: ctx.size)) perf_file = Struct("perf_file_header", # no support for version 1 Magic("PERFILE2"), UNInt64("size"), UNInt64("attr_size"), perf_file_section("attrs", perf_file_attr), perf_file_section("data", perf_data), perf_file_section("event_types", perf_event_types), # little endian Embedded(BitStruct(None, Flag("nrcpus"), Flag("arch"), Flag("version"), Flag("osrelease"), Flag("hostname"), Flag("build_id"), Flag("tracing_data"), Flag("reserved"), Flag("branch_stack"), Flag("numa_topology"), Flag("cpu_topology"), Flag("event_desc"), Flag("cmdline"), Flag("total_mem"), Flag("cpuid"), Flag("cpudesc"), Padding(6), Flag("group_desc"), Flag("pmu_mappings"), Padding(256 - 3*8))), Pointer(lambda ctx: ctx.data.offset + ctx.data.size, perf_features()), Padding(3 * 8)) def get_events(h): return h.data.perf_data if __name__ == '__main__': import argparse args = argparse.ArgumentParser() args.add_argument('file', help='perf.data to read', default='perf.data', nargs='?') p = args.parse_args() with open(p.file, "rb") as f: h = perf_file.parse_stream(f) print(h) #print(get_events(h))
30,404
Python
.py
629
23.766296
82
0.357525
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,945
hist.py
andikleen_pmu-tools/parser/hist.py
#!/usr/bin/env python # print histogram for perf.data from __future__ import print_function import perfpd import pfeat import argparse p = argparse.ArgumentParser(description='Print histogram for perf.data') p.add_argument('datafiles', nargs='*', help='perf.data files (default perf.data)', default=['perf.data']) p.add_argument('--sort', help='field to sort on (symbol, line)', default='symbol') p.add_argument('--min-percent', help='Minimum percent to print', default=1.0) args = p.parse_args() COLUMN_PAD = 5 MAX_COLUMN = 70 def compute_cols(names): return min(max(map(len, names)) + COLUMN_PAD, MAX_COLUMN) min_percent = float(args.min_percent) / 100.0 for d in args.datafiles: df, et, feat = perfpd.read_samples(d, (args.sort == 'line')) pfeat.print_feat(feat) # xxx split by event if 'period' in df: total = float(df['period'].sum()) g = df.groupby(args.sort) h = g.period.sum() h.sort(ascending=False) h = h.apply(lambda x: x / total) else: h = df[args.sort].value_counts(normalize=True) h = h[h >= min_percent] cols = compute_cols(h.index) for s, v in zip(h.index, h.values): print("%-*s %.2f%%" % (cols, s, v * 100.0))
1,271
Python
.py
35
31.228571
77
0.636585
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,946
mmap.py
andikleen_pmu-tools/parser/mmap.py
# track mmap updates in a perf stream and allow lookup of symbols # # Copyright (c) 2013-2014, Intel Corporation # Author: Andi Kleen # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, # version 2, as published by the Free Software Foundation. # # This program is distributed in the hope it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. from collections import defaultdict import bisect # max reorder window for MMAP updates LOOKAHEAD_WINDOW = 1024 def lookup(m, ip): i = bisect.bisect_left(m, (ip,)) if i < len(m) and m[i][0] == ip: mr = m[i] elif i == 0: return None, 0 else: mr = m[i - 1] return mr, ip - mr[0] class MmapTracker: """Track mmap updates in a perf stream and allow lookup of symbols.""" def __init__(self): self.maps = defaultdict(list) self.pnames = defaultdict(str) self.lookahead = 0 self.updates = [] # look ahead for out of order mmap updates def lookahead_mmap(self, ev, n): if n - self.lookahead == 0: self.lookahead = min(n + LOOKAHEAD_WINDOW, len(ev)) for l in range(n, self.lookahead): j = ev[l] # no time stamp: assume it's synthesized and kernel if j.type in ('MMAP','MMAP2') and j.pid == -1 and j.tid == 0: bisect.insort(self.maps[j.pid], (j.addr, j.len, j.filename)) elif j.type in ('COMM','MMAP','MMAP2'): bisect.insort(self.updates, (j.time2, j)) # process pending updates for a sample def update_sample(self, j): updates = self.updates while len(updates) > 0 and j.time >= updates[0][0]: u = updates[0][1] del updates[0] if u.type in ('MMAP', 'MMAP2'): pid = u.pid bisect.insort(self.maps[pid], (u.addr, u.len, u.filename)) elif u.type == 'COMM': self.maps[u.pid] = [] self.pnames[u.pid] = u.comm # look up tables with current state def resolve(self, pid, ip): if not self.maps[pid]: # xxx kernel return None, None, 0 m, offset = lookup(self.maps[pid], ip) if not m or offset >= m[1]: # look up kernel m, offset = lookup(self.maps[-1], ip) if not m or offset >= m[1]: return None, None, 0 assert m[0] <= ip <= m[0] + m[1] return m[2], m[0], offset
2,751
Python
.py
70
30.728571
77
0.579125
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,947
topdown-csv.py
andikleen_pmu-tools/topdown-csv/topdown-csv.py
#!/usr/bin/env python3 # Copyright (c) 2012-2024, Intel Corporation # Author: Andi Kleen # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, # version 2, as published by the Free Software Foundation. # # This program is distributed in the hope it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # convert topdown spreadsheet to python code # EVENTMAP=eventfile.json topdown-csv.py spreadsheet.csv > module.py CPU-acronym "long cpu name" # # somewhat of a mess. should really use a real parser. # # TOFIX: # use count domain number for sanity checks # special case leaking events with option? (l3 bound to memory bound) # figure out Cycles_False_Sharing_Client on HSX # uncore handle arb events directly # from __future__ import print_function import sys import csv import re import textwrap import argparse import json import itertools from copy import copy from string import Template T = Template sys.path.append("../pmu-tools") sys.path.append("..") import ocperf ap = argparse.ArgumentParser(usage='Convert topdown spreadsheet to python code. Specify EVENTMAP=eventfile.json. Spreadsheet must be in CSV format.') ap.add_argument('spreadsheet', type=argparse.FileType('r')) ap.add_argument('product_match') ap.add_argument('long_cpu_name') ap.add_argument('--product') ap.add_argument('--hybrid', action='store_true') ap.add_argument('--memory', default=0) ap.add_argument('--extra-match') ap.add_argument('--count-domain', default='Count Domain') ap.add_argument('-v', '--verbose', action='store_true') ap.add_argument('-j', '--json', type=argparse.FileType('w'), help="Output formulas and events in flat JSON format") ap.add_argument('--eventcsv', type=argparse.FileType('r'), help="Use CSV to convert events instead of JSON") ap.add_argument('--nosmt', action='store_true', help="Set default for EBS mode in model") ap.add_argument('--ignore-missing', action='store_true', help="Ignore missing events") args = ap.parse_args() if args.product: args.product_match = args.product emap = None if not args.eventcsv: emap = ocperf.find_emap() if not emap: sys.exit("Cannot find CPU map") # From "for tools developers" in TMA spreadsheet server_products = "JKT;IVT;HSX;BDX;SKX;CLX;CPX;ICX;SPR/EMR;SPR-HBM;EMR;GNR".split(";") client_products = "SNB;IVB;HSW;BDW;SKL;KBL;KBLR;CFL;CNL;ICL;RKL;RKL;TGL;ADL;MTL;LNL".split(";") server_products.reverse() client_products.reverse() server_client = { "SPR": "ADL", "ICX": "ICL", "CPX": "KBLR", "CLX": "KBLR", "SKX": "SKL", "BDX": "BDW", "HSX": "HSW", "IVT": "IVB", "JKT": "SNB", "EMR": "ADL", "GNR": "MTL", "SPR-HBM": "ADL", "SPR/EMR": "ADL", } clients_with_server = set(server_client.values()) ratio_column = { "TNT": ("TNT", ), "CMT": ("CMT",), args.product_match: (args.product_match, ) } for i, name in enumerate(client_products): ratio_column[name] = client_products[i:] for i, name in enumerate(server_products): ratio_column[name] = [] for server in server_products[i:]: client = server_client[server] seen = set() ratio_column[name] += [server, client] j = client_products.index(client) + 1 while j < len(client_products) and client_products[j] not in clients_with_server: if client_products[j] not in seen: ratio_column[name].append(client_products[j]) seen.add(client_products[j]) j += 1 aliases = { "SPR": "SPR/EMR", "CFL": "KBLR/CFL/CML", "KBLR": "KBLR/CFL/CML", "ADL": "ADL/RPL", "SKL": "SKL/KBL", "KBL": "SKL/KBL", "JKT": "JKT/SNB-EP", "EMR": "SPR/EMR", #"SPR-HBM": "SPR", "LNL": "LNL/ARL", } if args.product_match in aliases and aliases[args.product_match] in ratio_column: args.product_match = aliases[args.product_match] #import pprint #pprint.pprint(ratio_column, stream=sys.stderr) print("Used columns", " ".join(ratio_column[args.product_match]), file=sys.stderr) match_products = set(ratio_column.keys()) topdown_use_fixed = False aux_skip = set(["SMT_on", "Base_Frequency", "EBS_Mode", "PERF_METRICS_MSR"]) #fixes = { #} event_replace = ( ) event_fixes = ( ("_UOPS_", "_UOP_"), ("TOPDOWN.SLOTS:perf_metrics", "slots"), ) special_events = { "DurationTimeInMiliSeconds": ("interval-ms", ), "DurationTimeInMilliSeconds": ("interval-ms", ), "DurationTimeInSeconds": ("interval-s", ), "TSC": ("msr/tsc/", ), } extra_desc = { } csv_fixes = { } env_var = { "NUM_CORES": "num_cores", "NUM_THREADS": "num_threads", "Base_Frequency": "base_frequency", "SMT_on": "smt_enabled", "PERF_METRICS_MSR": "topdown_use_fixed", "EBS_Mode": "ebs_mode", "NUM_SOCKETS": "num_sockets", } def read_eventcsv(e): if not args.eventcsv: return {} allevents = dict() c = csv.DictReader(args.eventcsv) for j in c: if "SimEvent" in j: allevents[j["Name"]] = j["SimEvent"] if "CohoStat" in j: ev = j["CohoStat"].replace("p0.c0.t0.", "") si = j['SiliconEventName'] allevents[si] = ev allevents[j["EventName"]] = ev allevents["%s.%s" % (j["MainEventName"], j["SubEventName"])] = ev if si.endswith("_P"): allevents[si[:-2]] = ev if si in csv_fixes: allevents[csv_fixes[si]] = ev return allevents allevents = read_eventcsv(args.eventcsv) class Rule: pass class BadEvent(Exception): pass def findevent(ev): if emap: return emap.getevent(ev) is not None if args.eventcsv: def munge(e): if e == "": return None return e if ev in allevents: return munge(allevents[ev]) ev = ev.lower() if ev in allevents: return munge(allevents[ev]) return None sys.exit("No events") def find_replacement(t, what): if findevent(t + "_0"): print("replaced %s event %s with %s" % (what, t, t + "_0"), file=sys.stderr) return t + "_0" if "OCR" in t: nn = t.replace("OCR", "OFFCORE_RESPONSE") else: nn = t.replace("OFFCORE_RESPONSE", "OCR") if findevent(nn): print("replaced %s event %s with %s" % (what, t, nn), file=sys.stderr) return nn for e, r in event_fixes: nt = t.replace(e, r) if not args.hybrid: nt = nt.replace("cpu_core", "cpu") if nt.startswith("topdown-") or nt == "slots" or nt.startswith("cpu"): return nt if findevent(nt): print("replaced %s event %s with %s" % (what, t, nt), file=sys.stderr) return nt return None def verify_event(t, what): #for e, r in event_replace: # t = t.replace(e, r) extra = "" m = re.match(r'(.*?)([:/].*)', t) if m: t = m.group(1) extra = m.group(2) e = findevent(t) if e is None: if t.startswith("UNC"): print("not found", t, file=sys.stderr) if t.endswith("_ANY"): return verify_event(t.replace("_ANY", ":amt1"), what) nt = find_replacement(t, what) if not nt: print("WARNING ",product_match,extra,"cannot find %s event %s" % (what, t), file=sys.stderr) if args.ignore_missing: return "0" raise BadEvent() t = nt if isinstance(e, str): t = e if extra: if extra == "sup" or extra == "SUP": extra = "k" if extra.startswith("/"): extra = ":" + extra[1:] t = t + extra return t groups = [] consts = [] aux = [] info = [] names = {} deleted = [] aux_names = set() not_aux_areas = set() runtime_ids = set(("#SMT_on", "#SMT_On", "#EBS_Mode", "#PERF_METRICS_MSR",)) parent_stack = [] product_match = "" if args.product_match: product_match = args.product_match print("PRODUCT",product_match, file=sys.stderr) long_name = product_match if args.long_cpu_name: long_name = args.long_cpu_name skipped = 0 skipped_event = 0 def supported_event(ev): try: return verify_event(ev, "locate") except BadEvent: return None OP, ID, EVENT, MATCH, END = list(range(5)) toknames = ("op", "id", "event", "match", "end") class ParseError(BaseException): pass def parse_error(x, l=""): print("PARSE-ERROR", x, "%s %s" % (toknames[l[0]], l[1]) if l else "", file=sys.stderr) raise ParseError() def is_op(tok, m): return tok and tok[0] == OP and tok[1] == m def gettok(s): while len(s) > 0: if s[0] == " ": s = s[1:] continue if s[0] in "?:;/": tok = s[0] s = s[1:] yield (OP, tok) continue if s.startswith("N/A") or s.startswith("#NA"): s = s[3:] yield (EVENT, "N/A") continue if s.startswith("SPR-HBM"): s = s[len("SPR-HBM"):] yield (MATCH, "SPR-HBM") continue m = re.match(r'".*?"', s) if m: s = s[len(m.group(0)):] yield (EVENT, m.group(0)) m = re.match(r'[A-Z._][a-zA-Z0-9_.:=]*', s) if m: s = s[len(m.group(0)):] if m.group(0) in match_products or (m.group(0) in aliases and aliases[m.group(0)] in match_products): yield (MATCH, m.group(0)) else: yield (EVENT, m.group(0)) continue parse_error("lexing error " + s) yield (END, ) # locate = eventlist | match "?" eventlist ":" locate # eventlist = event { ";" event } def interpret_loc(l, product_match): if l.endswith("if #flavor > 'full_toplev' else #NA"): return ["#NA"] try: lexer = gettok(l) tok = next(lexer) if tok[0] == END: return [] l = parse_expr(lexer, tok, product_match) except ParseError as e: import traceback print(e, file=sys.stderr) print(l, file=sys.stderr) print(traceback.format_exc(), file=sys.stderr) return [] return l[0] def parse_eventlist(lexer, tok): if tok[0] != EVENT: parse_error("expect event", tok) l = [tok[1]] tok = next(lexer) while is_op(tok, ";"): tok = next(lexer) if tok[0] != EVENT: parse_error("expect event", tok) l.append(tok[1]) tok = next(lexer) return l, tok def check_product(products): # XXX do inheritance here for ICL? return product_match in products def parse_expr(lexer, first, product_match): if first[0] == MATCH: products = [first[1]] tok = next(lexer) while is_op(tok, "/"): tok = next(lexer) if tok[0] != MATCH: parse_error("expect match", tok) products.append(tok[1]) tok = next(lexer) if not is_op(tok, "?"): parse_error("expect ?", tok) tok = next(lexer) if check_product(products): return parse_eventlist(lexer, tok) else: _, tok = parse_eventlist(lexer, tok) if not is_op(tok, ":"): parse_error("expect :", tok) tok = next(lexer) return parse_expr(lexer, tok, product_match) elif first[0] == EVENT: return parse_eventlist(lexer, first) else: parse_error("parse error before " + repr(first), first) def compile_locate(locate): locate = interpret_loc(locate, product_match) locate = [x for x in locate if x not in ("", "N/A", "#NA")] locate = [x.replace("_PS", ":pp").strip() for x in locate] locate = list(map(supported_event, locate)) locate = [x for x in locate if x] return locate # ([SNB+] ...; [HSW+] ...) -> (...) with right product def fix_desc(desc): def change(m): if not re.search(r'\[[A-Z]{3,3}\+?\]', m.group(0)): return m.group(0) desc = "" matchlen = 1000 allseq = [] for j in m.group(0)[1:-1].split(";"): m2 = re.match(r"\s*\[([A-Z]{3,3}\+?)\](.*)", j) if m2 is None: desc = j continue seq = m2.group(1) allseq.append(seq) if seq.endswith("+"): seq = seq.replace("+", "") p = ratio_column[product_match] # pick the shortest distance if seq in p and p.index(seq) < matchlen: desc = m2.group(2) matchlen = p.index(seq) else: if seq not in ratio_column: print("Unknown desc match", seq, file=sys.stderr) if seq == product_match: desc = m2.group(2) matchlen = 1 return desc n = re.sub(r'\((.*?)\)', change, desc.replace("\n", " ")) return n fields = None csvf = csv.reader(args.spreadsheet) tdversion = "" prevr = None for row in csvf: row = ["" if x == "." else x for x in row] if row[1] == "Version": tdversion = row[2] if row[0] == "Key": fields = dict([x for x in zip([k.strip() for k in row], list(range(0, len(row))))]) if args.verbose: print("fields", fields, file=sys.stderr) continue if len(row) < 8 or row[0] == "" or fields is None: if any([x == "" for x in row]): continue print("skipped", row, file=sys.stderr) skipped += 1 continue def translate(x): if x in fields: return x if x in aliases: return aliases[x] return None def get(x): f = translate(x) if f is None: sys.exit("Cannot find %s in %s" % (x, row)) return row[fields[f]] if get('Key') == "HW Info": skipped += 1 continue # does not work because perf cannot handle THREAD vs THREAD_P event if get('Level1') == "MUX": print("Skipped MUX", file=sys.stderr) skipped += 1 continue l = [None,"","","","","",""] r = Rule() r.type = get('Key').split(".")[0] r.fulltype = get('Key') for j in range(1,7): n = 'Level%d' % j if n in fields: l[j] = get(n).replace("/", "_") column = "?" for j in ratio_column[product_match]: if isinstance(j, list): for k in j: if k in fields: j = k break else: sys.exit("Cannot find column %s" % j) if j not in fields and j not in aliases: continue ratio = get(j) if ratio: column = j break if ratio == "": skipped_event += 1 continue if ratio.strip() == "N/A" or ratio.strip() == "#NA": skipped_event += 1 continue if 'Metric Description' in fields: r.desc = fix_desc(get('Metric Description')) else: r.desc = None r.thresh = get('Threshold') if 'public' in fields: r.public = get('public') else: r.public = "" if 'Metric Group' in fields: r.metricgroup = [x for x in get('Metric Group').strip().split(';') if x != ''] else: r.metricgroup = [] domain = get(args.count_domain) r.domain = domain if "Metric Max" in fields and get('Metric Max') != "": r.maxval = get('Metric Max').replace("#", "") else: r.maxval = "0" product = None if 'Product' in fields: product = get('Product') r.htoff = False if 'Comment' in fields: comment = get('Comment').split(";") r.htoff = "HToffHSW" in comment and product_match in ("HSW", "HSX") if 'Tuning Hint' in fields and len(get('Tuning Hint').strip()) > 0: r.desc += ". " + get('Tuning Hint') if 'Link' in fields and len(get('Link').strip()) > 0: r.desc += ". " + get('Link') if 'Locate-with' in fields: r.locate = compile_locate(get('Locate-with')) else: r.locate = None for level in range(1,7): if l[level] and not l[level].startswith("*"): break r.level = level r.pname = re.sub(r'\(.*\)', '', l[level]).strip() if r.pname == "": continue if r.pname == "SW info": r.pname = l[2] r.name = r.pname.replace(" ","").replace("+", "PLUS") if r.desc is None: r.desc = r.name.replace("_", " ") if re.match(r"[0-9]", r.name): r.name = "G" + r.name r.name = r.name.replace("#","") if r.name in names and len(parent_stack) > 0: r.name = parent_stack[-1] + r.name r.prevname = None if prevr: r.prevname = prevr.name prevr = r if r.name in extra_desc and r.desc.startswith("Reuse"): r.desc = extra_desc[r.name] if (r.name.startswith("IXP_") or r.name.startswith("MEM_IXP_")) and not args.memory: print("SKIPPED", r.name, file=sys.stderr) skipped_event += 1 continue while parent_stack and names[parent_stack[-1]].level >= r.level: parent_stack.pop() if parent_stack: r.parent = parent_stack[-1] else: assert r.level == 1 r.parent = None parent_stack.append(r.name) r.issue = None issue = r.thresh.split(";") r.overlap = False if len(issue) > 0: r.thresh = issue[0] if len(issue) > 1: issue = [x.strip() for x in issue[1:]] r.issue = set(issue) if "~overlap" in r.issue: r.overlap = True r.issue.discard("~overlap") r.rawratio = ratio r.ratio = ratio if "PERF_METRICS_MSR" in r.ratio: topdown_use_fixed = True if args.verbose: print(r.type,r.name,r.ratio,r.thresh,r.level, file=sys.stderr) if r.type == "Aux" and r.domain in ("Constant", "ExternalParameter"): r.type = "Constant" runtime_ids.add(r.name) names[r.name] = r if r.type == "Constant": consts.append(r) elif r.type == "Aux": if r.name not in aux_skip: aux.append(r) elif r.type in ("Info", "Model", "SW_Info", "Bottleneck"): info.append(r) elif r.type == "tool_Aux": continue else: groups.append(r) if r.type.endswith("_aux"): aux_names.add(r.name) # need to handle references first #not_aux_areas.add(r.type.replace("_aux", "")) if args.json: def resolve(v): v = v.group(0) if v == "Base_Frequency": return v if v[1:] in names: return names[v[1:]].ratio if v in names: if " " in names[v].ratio: return "( " + names[v].ratio + " )" return names[v].ratio return v def flatform(r): for i in range(8): oldr = r r = re.sub(r'#?[a-zA-Z_0-9]+', resolve, r) if oldr == r: break return r def clean(v): return {x: v[x] for x in list(v.keys()) if v[x] and v[x] != "0"} def jout(r): return clean({ "type": r.fulltype, "formula": flatform(r.rawratio), "oformula": r.rawratio, "name": r.name, "issue": list(r.issue) if r.issue else [], "overlap": r.overlap, "desc": r.desc, "parent": r.parent, "maxval": r.maxval, "metricgroup": r.metricgroup if r.metricgroup else [], "domain": r.domain, "level": r.level}) l = info + groups + consts l = [x for x in l if x.rawratio != "#NA"] json.dump(list(map(jout, l)), args.json, indent=4, separators=(',', ': '), sort_keys=True) def flatten(x): return itertools.chain(*x) def find_children(parent): return set(flatten([set([j.name]) for j in groups if j.parent == parent])) def change_token(t, other, level, nname=""): all_children = False max_children = False if t[:2] == "##": t = t[2:] all_children = True if t[:1] == "?": t = t[1:] max_children = True if t[:1] == "#": t = t[1:] if t in env_var: return env_var[t] if t in ("EV", "level"): return t if t == "Avg_run_time": # XXX return "0" if t == "NA": return "0" # or NaN? or throw? if t in special_events: e = 'EV("%s", 0)' % special_events[t][0] if len(special_events[t]) > 1: e += " / %g" % special_events[t][1] return e if t.startswith("EV("): return t if re.match(r'[A-Z\"]', t) and (t.find(".") >= 0 or t.startswith("UNC_")): m = re.match(r'"(.*)"', t) if m: t = m.group(1) t = t.replace("_PS", "") t = t.replace(":perf_metrics", "") t = verify_event(t, "node/metric %s" % nname) if t.startswith("PERF_METRICS") and not allevents: return '(EV("%s", %s) / EV("TOPDOWN.SLOTS", %s))' % (t, level, level) return 'EV("%s", %s)' % (t, level) if re.match(r"[A-Z_]+", t): if t not in names: if t.isupper() and args.ignore_missing: return "0" print("Warning: %s not found in names" % (t,), file=sys.stderr) raise BadEvent() if names[t].type == "Constant": return t def ref_node(t, other): # find all references to classes, updating other if names[t].type not in ("Info", "Aux", "Bottleneck"): other.add(t) _, o = compile_ratio(names[t].ratio, level, names[t].name) other |= o if names[t].type not in ("Aux", "Info", "Bottleneck"): return 'self.%s.compute(EV)' % (t,) return "%s(self, EV, %s)" % (t, level, ) if all_children: children = find_children(t) s = "( " + " + ".join([ref_node(j,other) for j in children]) + " )" if max_children: return "max(" + ref_node(t,other) + "," + s + ")" return s return ref_node(t, other) return t # handle lazy evaluation: list all events separately too # very hackish # XXX handles only single type of if # for runtime_mode identifiers don't duplicate counters because they are fixed at runtime def compile_extra(tokens, indent, levelstr, nname): tokens = compile_ratio_if(tokens) # doesn't handle mixed types of if for now if any([t in runtime_ids for t in tokens]): return "" if "if" in tokens: idx = tokens.index("if") if args.verbose: print("if", tokens[idx+1], tokens[idx+2], tokens[idx+1] not in runtime_ids, file=sys.stderr) if tokens[idx + 1] not in runtime_ids: try: events = set([x for x in tokens if change_token(x, set(), "0", "extra " + nname).count("EV") > 0 and "." in x]) except BadEvent: return "" aux_names = set([x.name for x in aux]) aux_calls = aux_names & set(tokens) return "".join([indent + 'EV("%s", %s)' % (x.replace("_PS",""), levelstr) for x in events] + [indent + '%s(self, EV, %s)' % (x, levelstr) for x in aux_calls]) return "" def tokenize(s): t = re.sub(r"#*([\(\)\[\]*+-,]|if|else|/(?!Match))", r" \1 ", s).split() return t def untokenize(tokens): r = " ".join(tokens) r = re.sub(r" ?([()]) ", r"\1", r) r = re.sub(r"\)([+-/,*] ?|if)", r") \1 ", r) r = r.replace(" ", " ") r = r.replace(")else", ") else") #r = re.sub(r"[^ ]([*/+-])", r" \1", r) #r = re.sub(r"([*/+-])[^ ]", r"\1 ", r) return r def compile_min(tokens): i = 0 while i < len(tokens): # min( CPU_CLK_UNHALTED.THREAD , x ) -> # EV ( lambda EV , level: min ( CPU_CLK_UNHALTED.THREAD , x ) , 0 ) if tokens[i:i+4] == ['min', '(', 'CPU_CLK_UNHALTED.THREAD', ','] and tokens[5] == ')': tokens = tokens[:i] + "EV ( lambda EV , level : ".split(" ") + tokens[i:i+5] + [")", ",", "level"] + tokens[i+5:] i += 7 i += 1 return tokens def compile_pebs(tokens): event = "missing" #if "$PEBS" in untokenize(tokens) and "$PEBS" not in tokens: # print >>sys.stderr, "compile_pebs tokens", tokens for j in range(len(tokens)): if "." in tokens[j]: event = tokens[j] if tokens[j] == "$PEBS": ev = verify_event(event, "timed pebs event") tokens[j] = 'EV("%s", 999)' % ev return tokens # [ "abc" "cdf" ] -> [ "abc", "cdf" ] def compile_list(tokens): listflag = [False] def remap(t): if t == '[': listflag[0] = True return t elif t == ']': listflag[0] = False if listflag[0]: return t + "," return t tokens = [remap(t) for t in tokens] return tokenize(untokenize(tokens)) def compile_ratio_if(tokens): # handle #PMM_App_Direct in [...] if. The expression needs to be split because the events are not in older CPUs. # these expressions can only be outter expessions, nesting is not supported start = 0 while "if" in tokens[start:]: ifind = tokens.index("if", start) if tokens[ifind+1] == "#PMM_App_direct": if tokens[ifind+2] != "else": print("ERROR expected else in", tokens, file=sys.stderr) if args.memory: tokens = tokens[:ifind] else: tokens = tokens[ifind+3:] break start = ifind + 1 return tokens def compile_na_if(tokens, prevname): # handle #NA if ... # which means the previous node in the level is used for the #NA (for HBM / DRAM) if len(tokens) > 2 and tokens[0] == "#NA" and tokens[1] == "if": tokens[0] = prevname return tokens def compile_ratio(r, level, nname, prevname="#NA"): other = set() tokens = tokenize(r) tokens = compile_list(tokens) tokens = compile_ratio_if(tokens) tokens = compile_na_if(tokens, prevname) tokens = compile_min(tokens) tokens = compile_pebs(tokens) tokens = [change_token(t, other, level, nname) for t in tokens] r = untokenize(tokens) return r, other def compile_thresh(t, other, level, fallback): if t == "": return fallback t = t.strip() m = re.match(r"\((.*)\)$", t) if m and m.group(1).find("(") < 0: t = m.group(1) #t = t.replace("(>", "(self.val >") t = t.replace("& P", "& self.parent.thresh") # dont want lazy evaluation t = t.replace("&", "and") t = t.replace("|", "or") t = t.replace("#", "") if t[0] in '<>' or (t[0] == '(' and t[1] in '<>'): m = re.match(r"(\(?)([<>]) ?([0-9.]+)(.*)", t) if m is not None: t = "%s(self.val %s %s)%s" % (m.group(1), m.group(2), m.group(3), m.group(4)) if t[:1] == "<": t = "self.val " + t tokens = re.sub(r"([()])", r" \1 ", t).split() for j in tokens: if j in names: if names[j].type not in ("Aux", "Constant", "Info", "tool_Aux", "Bottleneck"): t = t.replace(j, "self.%s.thresh" % (j)) other.add(j) elif names[j].type in ("Aux", "Info", "Bottleneck"): t = t.replace(j, j + "(self, EV, %s)" % (level,)) if j in deleted: raise BadEvent() return t def compile_desc(d): if d.startswith("Reuse"): return "" d = fix_desc(d) d = d.replace('"', r'\"') d = re.sub(r"#.*","", d) #d = re.sub(r"\..*", ".", d) d = d.replace("\n", " ") d = "\n".join(textwrap.wrap(d, 60)) #d = re.sub(r"Tip:.*", "", d) return '""\n' + d + '""' def compile_maxval(m): return re.sub(r"([a-zA-Z_][a-zA-Z0-9_]*)", lambda m: m.group(1) + "(0,0,0)" if m.group(1) not in [x.name for x in consts] else m.group(1), m) def title(p): print("\n# " + p + "\n") def kill(l, r, parents): if r.name not in names: return print("WARNING: removed %s (ratio %s)" % (r.name, r.ratio), file=sys.stderr) del l[l.index(r)] del names[r.name] deleted.append(r.name) if parents: for j in copy(l): if j.parent == r.name: kill(l, j, True) print(T("""\ # -*- coding: latin-1 -*- # # auto generated TopDown/TMA $tdversion description for $long_name # Please see http://ark.intel.com for more details on these CPUs. # # References: # http://bit.ly/tma-ispass14 # http://halobates.de/blog/p/262 # https://sites.google.com/site/analysismethods/yasin-pubs # https://download.01.org/perfmon/ # https://github.com/andikleen/pmu-tools/wiki/toplev-manual # # Helpers print_error = lambda msg: False$SMT version = "$tdversion" base_frequency = -1.0 Memory = $memory Average_Frequency = 0.0 num_cores = 1 num_threads = 1 num_sockets = 1 $topdown$aux def handle_error(obj, msg): print_error(msg) obj.errcount += 1 obj.val = 0 obj.thresh = False def handle_error_metric(obj, msg): print_error(msg) obj.errcount += 1 obj.val = 0 """).substitute(long_name=long_name, tdversion=tdversion, memory=args.memory, topdown="topdown_use_fixed = False" if topdown_use_fixed else "", SMT=""" smt_enabled = False ebs_mode = False""" if not args.nosmt else "", aux="use_aux = False" if aux_names else "")) title("Constants") for r in consts: def fix_constant(s): s = s.replace("SMT_on", "smt_enabled") s = s.replace("PERF_METRICS_MSR", "topdown_use_fixed") s = s.replace("#", "") if re.match(r'^[A-Z]', s): return '"' + s + '"' return s print("%s = %s" % (r.name, fix_constant(r.ratio))) title("Aux. formulas") # prune rules with missing events def prune_rules(l): while True: changed = False for r in copy(l): if r.name in deleted: continue try: ratio, other = compile_ratio(r.ratio, "level", r.name, r.prevname) except BadEvent: kill(l, r, False) changed = True if not changed: break # remove unreferenced aux nodes def remove_unref(): ref = set() for r in aux + info + groups + consts: ref.update(tokenize(r.ratio)) ref.update(tokenize(r.thresh)) if r.maxval: ref.update(tokenize(r.maxval)) for r in copy(aux): if r.name not in ref and "#" + r.name not in ref: print("Removed unreferenced", r.name, file=sys.stderr) aux.remove(r) prune_rules(aux) prune_rules(info) remove_unref() for r in aux + info: extra = compile_extra(tokenize(r.ratio), "\n ", "level", r.name) ratio, other = compile_ratio(r.ratio, "level", r.name, r.prevname) print() if r.desc: print("# " + fix_desc(r.desc)) print("def %s(self, EV, level):%s" % (r.name, extra)) if r.thresh: thresh = compile_thresh(r.thresh, other, str(r.level), "True") print(" val = %s" % ratio) thresh = thresh.replace("self.val", "val") if r.type == "Aux": ratio = thresh elif r.type in ("Info", "Bottleneck"): print(" self.thresh = %s" % thresh) print(" return val") else: print(" return %s" % (ratio,)) title("Event groups") # prune rules with missing events while True: changed = False for r in copy(groups): if r.name in deleted: continue try: _, other = compile_ratio(r.ratio, str(r.level), r.name, r.prevname) compile_thresh(r.thresh, other, str(r.level), "self.val > 0.0 and self.parent.thresh") except BadEvent: kill(groups, r, True) changed = True if not changed: break for r in groups: extra = compile_extra(tokenize(r.ratio), "\n" + " " * 12, "%d" % r.level, r.name) ratio, r.other = compile_ratio(r.ratio, str(r.level), r.name, r.prevname) thresh = compile_thresh(r.thresh, r.other, str(r.level), "self.val > 0.0 and self.parent.thresh") print(T(""" class $name: name = "$pname" domain = "$domain" area = "$type" level = $level htoff = $htoff sample = $sample errcount = 0 sibling = None metricgroup = frozenset($metricgroup) maxval = $maxval def compute(self, EV): try: self.val = $ratio$extra self.thresh = $thresh except ZeroDivisionError: handle_error(self, "$name zero division") return self.val desc = "$desc" """).substitute(r.__dict__, desc=compile_desc(r.desc), ratio=ratio, thresh=thresh, extra=extra, sample=r.locate, server=r.public.find("pub") >= 0, metricgroup=r.metricgroup, maxval=compile_maxval(r.maxval) if r.maxval and r.maxval != "0" else "None")) for r in info: try: ratio, r.other = compile_ratio(r.ratio, str(r.level), r.name, r.prevname) except BadEvent: continue thresh = compile_thresh(r.thresh, r.other, str(r.level), "True") print(T(""" class Metric_$name: name = "$name" domain = "$domain" maxval = $maxval errcount = 0 area = "$fulltype" metricgroup = frozenset($metricgroup) sibling = None def compute(self, EV): try: self.val = $name(self, EV, 0) self.thresh = $thresh except ZeroDivisionError: handle_error_metric(self, "$name zero division") desc = "$desc" """).substitute(r.__dict__, maxval=compile_maxval(r.maxval) if r.maxval and r.maxval != "0" else "0", name=r.name, desc=compile_desc(r.desc), server=r.public.find("puc") >= 0, metricgroup=r.metricgroup, thresh=thresh)) changed = True while changed: changed = False for r in groups: if r.name in aux_names: continue for o in r.other: if o in aux_names: aux_names.remove(o) changed = True title("Schedule") print(T(""" class Setup: def __init__(self, r): o = dict()""").substitute(tdversion=tdversion)) def gen_aux_guard(r, r2=None): if r.name in aux_names or (r2 and r2.name in aux_names): sys.stdout.write(" if use_aux:\n ") if r.type in not_aux_areas or (r2 and r2.type in not_aux_areas): sys.stdout.write(" if not use_aux:\n ") for r in groups: gen_aux_guard(r) print((T(""" n = $name() ; r.run(n) ; o["$name"] = n"""). substitute(r.__dict__))) print() print(" # parents") print() for r in groups: if r.parent: gen_aux_guard(r, names[r.parent]) print(T(""" o["$name"].parent = o["$pname"]""").substitute( r.__dict__, pname=names[r.parent].name)) print() print(" # user visible metrics") print() for r in info: gen_aux_guard(r) print(T(""" n = Metric_$rname() ; r.metric(n) ; o["$rname"] = n""").substitute( rname=r.name, rdesc=compile_desc(r.desc))) print() print(" # references between groups") print() for r in groups + info: if 'other' not in r.__dict__: continue for on in r.other: o = names[on] gen_aux_guard(r) print(T(""" o["$rname"].$oname = o["$oname"]""").substitute( rname=r.name, oname=o.name)) print() print(" # siblings cross-tree") print() for r in groups + info: if r.issue: match = ['o["%s"]' % ox.name for ox in groups if r.issue and ox.issue and r.issue & ox.issue and ox.name != r.name] if len(match) > 0: gen_aux_guard(r) print(T(""" o["$rname"].sibling = $match""").substitute(rname=r.name, match="(" + ", ".join(match) + ",)")) if r.overlap: gen_aux_guard(r) print(T(""" o["$rname"].overlap = True""").substitute(rname=r.name)) #if r.issue in issues: # o = filter(lambda x: x != r, issues[r.issue]) # if len(o) > 0: # o = o[0] # else: # print >>sys.stderr, "issue", r.issue, "not found for", r.name # continue #else: # print >>sys.stderr, "issue", r.issue, "not found" # continue #if o.name in names: # if o.type not in ("Aux", "Info"): # print T(""" o["$rname"].sibling = o["$oname"]""").substitute( # rname=r.name, oname=o.name) #else: # continue # #print T(""" o["$rname"].sibling = None""").substitute(rname=r.name) else: continue #print T(""" o["$rname"].sibling = None""").substitute(rname=r.name) print("SKIPPED %d lines, %d events" % (skipped, skipped_event), file=sys.stderr)
37,164
Python
.py
1,076
27.273234
149
0.548789
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,948
mock.py
andikleen_pmu-tools/topdown-csv/mock.py
#!/usr/bin/env python3 # Do basic python sanity check of translation output import sys sys.path.append(".") import t l = [] m = [] def pev(e): print("\t",e) return 1 class R: def run(self, p): #print p l.append(p) def metric(self, p): m.append(p) t.Setup(R()) for p in l: p.thresh = True for p in l: print(p.name) p.compute(lambda e, level: pev(e)) if p.sample: print(" Sample:", " ".join(p.sample)) if p.sibling: print(" Siblings:", " ".join([o.name for o in p.sibling])) for p in m: print(p.name) p.compute(lambda e, level: pev(e))
629
Python
.py
29
17.62069
69
0.584874
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,949
ucevent.py
andikleen_pmu-tools/ucevent/ucevent.py
#!/usr/bin/env python # Copyright (c) 2013, Intel Corporation # Author: Andi Kleen # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, # version 2, as published by the Free Software Foundation. # # This program is distributed in the hope it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # run perf with uncore events and output data in a vmstat/turbostat like running # normalized format. # # requires a perf driver for the uncore (recent Intel server CPUs) and may # need some kernel patches (see documentation) # # ucevent event .. -- perf arguments # e.g. ucevent event # no event => list # ucevent -h to show other arguments from __future__ import print_function import argparse import sys import textwrap import os import re import subprocess import pty import locale import copy import fnmatch import glob from collections import defaultdict import ucexpr import ucmsg dbg = ucmsg.debug_msg cpu_mapping = { 45: "jkt", 62: "ivt", 63: "hsx", 86: "bdxde", 79: "bdx", 85: "skx", 106: "icx", 108: "icx", } args = None per_socket = False class CPU: """Query CPU information.""" def cpumap(self): if (self.vendor == "GenuineIntel" and self.family == 6 and self.model in cpu_mapping): return cpu_mapping[self.model] print("Cannot identify CPU model %d" % (self.model)) return None # assumes that physical ids, nodes are all in the same name space def __init__(self): self.socket = dict() f = open("/proc/cpuinfo", "r") for l in f: n = l.split() if len(n) == 0: continue if n[0] == "processor": cpu = int(n[2]) elif n[0] == "physical" and n[1] == "id": s = int(n[3]) if s not in self.socket: self.socket[s] = cpu elif n[0] == "vendor_id": self.vendor = n[2] elif n[0] == "cpu" and n[1] == "family": self.family = int(n[3]) elif n[0] == "model" and n[1] == ":": self.model = int(n[2]) f.close() def max_node(self): return max(self.socket) def socket_to_cpu(self, s): if s in self.socket: return self.socket[s] sys.exit("Invalid socket %d" % s) cpu = CPU() cputype = os.getenv("FORCECPU") if not cputype: cputype = cpu.cpumap() if not cputype: sys.exit(1) if cputype == "skx": import skx_uc as uc import skx_extra as extra elif cputype == "bdx": import bdx_uc as uc import bdx_extra as extra elif cputype == "bdxde": import bdxde_uc as uc import bdxde_extra as extra elif cputype == "hsx": import hsx_uc as uc import hsx_extra as extra elif cputype == "ivt": import ivt_uc as uc import ivt_extra as extra elif cputype == "jkt": import jkt_uc as uc import jkt_extra as extra elif cputype == "icx": import icx_uc as uc import icx_extra as extra else: sys.exit("Unknown CPU " + cputype) import ucaux cpu_aux = ucaux.Aux() def lookup_event(name): if name in uc.events: return uc.events[name] if name in uc.derived: return uc.derived[name] return None def print_list(f, ls, c, description, equation, ehdr): count = 0 if description: wrap = textwrap.TextWrapper(initial_indent=" ", subsequent_indent=" ") for i in sorted(ls.keys()): ev = ls[i] if ev["Category"] == c: count += 1 desc = "?" if "Desc" in ev: desc = ev["Desc"] derived = "" if "Broken" in ev and not args.broken: continue if "Equation" in ev: if args.unsupported: derived = " (Derived)" elif "Obscure" in ev: continue elif not args.unsupported: continue if args.name_only: print(i, file=f) continue ehdr.out() print(" %-30s %-40s" % (i, desc + derived), file=f) if description: defn = "" if "Defn" in ev: defn = ev["Defn"] if "Notes" in ev: defn += " Notes: " + ev["Notes"] if "MaxIncCyc" in ev and ev["MaxIncCyc"] > 1: defn += (" May increase upto %d units per cycle." % (ev["MaxIncCyc"])) print(wrap.fill(defn), file=f) if "Equation" in ev: eql, equations = convert_equation(ev, dict(), True, True) q = set() for eq in eql: if eq not in ['[',']']: q |= get_qualifiers(perf_box(eq)) else: q = get_qualifiers(format_box(ev)) if q: print(wrap.fill("Qualifiers: " + " ".join(q)), file=f) if "Filter" in ev: print(wrap.fill("Filter: " + show_filter(ev["Filter"])), file=f) if equation: if "Equation" in ev: print(" Equation: ", ev["Equation"], file=f) return count def show_one_filter(f): if f in cpu_aux.qual_display_alias: return cpu_aux.qual_display_alias[f] q, v = ucexpr.convert_qual(f, "0") return q def show_filter(f): return " ".join(map(show_one_filter, f.split(","))) qual_cache = dict() def get_qualifiers(box): if not box: return set() obox = box if box in qual_cache: return qual_cache[box] if not box_exists(box): box += "_0" if not box_exists(box): return set() d = os.listdir("/sys/devices/uncore_%s/format" % (box)) q = set() for j in d: if j not in ["event", "umask"]: q.add(j) qual_cache[obox] = q return q def expand_acronyms(n): for j in cpu_aux.acronyms: n = n.replace(j, j + " (" + cpu_aux.acronyms[j] + ")") return n class EventsHeader: def __init__(self, name, f): self.printed = False self.name = name self.f = f def out(self): if not self.printed and not args.name_only: n = expand_acronyms(self.name) print(file=self.f) print(n, file=self.f) self.printed = True def cmp_cat(a, b): # XXX move interesting ones first al = a.lower() bl = b.lower() return (al > bl) - (bl < al) def get_pager(): if args.no_pager: return sys.stdout, None f = sys.stdout if f.isatty(): try: if sys.version_info.major == 3: sp = subprocess.Popen(["less", "-F"], stdin=subprocess.PIPE, universal_newlines=True) else: sp = subprocess.Popen(["less", "-F"], stdin=subprocess.PIPE) return sp.stdin, sp except OSError: f = sys.stdout return f, None def print_events(cat, desc, equation): f, proc = get_pager() ecount = 0 dcount = 0 if (args.unsupported or args.broken) and not args.name_only: print("\nNot all of these events have been tested and they may be broken", file=f) print("USE AT YOUR OWN RISK!", file=f) for c in sorted(uc.categories, key=lambda x: x.lower()): if cat and expand_acronyms(c).lower().find(cat.lower()) < 0: continue ehdr = EventsHeader(c, f) ecount += print_list(f, uc.events, c, desc, equation, ehdr) dcount += print_list(f, uc.derived, c, desc, equation, ehdr) if proc: f.close() proc.wait() def format_equation(ev, qual, quiet=False): e = ucexpr.parse(ev["Equation"], ev["Box"], quiet or args.quiet, "UserEq" in ev, qual) return e def format_umask(u): u = u[1:] # drop b u = u.replace("x", "0") return int(u, 2) box_dir_cache = [] def find_boxes(prefix): if prefix.startswith("uncore_"): prefix = prefix[6:] global box_dir_cache if len(box_dir_cache) == 0: box_dir_cache += [x for x in os.listdir("/sys/devices/") if x.startswith("uncore")] l = [x.replace("uncore_", "") for x in box_dir_cache if x.startswith("uncore_" + prefix)] return sorted(l) box_cache = dict() def box_exists(box): n = "/sys/devices/uncore_%s" % (box) if n not in box_cache: box_cache[n] = os.path.exists(n) return box_cache[n] def format_event(ev): if "Equation" in ev: return format_equation(ev, None) return format_reg_event(ev, dict()) # format an event's attributes def format_attrs(ev, box): evsel = ev["EvSel"] if "Umask" in ev and ev["Umask"] and box == "pcu": evsel |= format_umask(ev["Umask"]) if "ExtSel" in ev and ev["ExtSel"] != "": evsel |= (ev["ExtSel"] << 21) attrs = "event=%#x" % (evsel) if "Umask" in ev and ev["Umask"] and box != "pcu": attrs += ",umask=%#x" % (format_umask(ev["Umask"])) if box == "pcu": if "SubCtr" in ev and int(ev["SubCtr"]) > 0: attrs += ",occ_sel=%d" % (ev["SubCtr"]) return attrs box_to_perf = { "cbo": "cbox", "qpi_ll": "qpi", "upi_ll": "upi", "sbo": "sbox", } def format_box(ev): box = ev["Box"].lower() if box in box_to_perf: box = box_to_perf[box] return box # format a single event for perf def format_reg_event(ev, qual): box = format_box(ev) attrs = format_attrs(ev, box) if qual: attrs += "," + qual if not box_exists(box): ret = ["uncore_%s/%s/" % (j, attrs) for j in find_boxes(box)] if ret: return ret return ["uncore_%s/%s/" % (box, attrs)] HEADER_INTERVAL = 50 def is_pct(s): return s.find("_PCT") >= 0 or s.find("PCT_") >= 0 nonecount = 0 def get_box(s): m = re.match(r"([^.]+)\.(.*)", s) if m: return re.sub(r"[0-9]+$", "", m.group(1)), m.group(2) global nonecount nonecount += 1 return "%d" % (nonecount) def sum_event(a, b): if args.no_sum or a.find("iMC") >= 0 or a.find("QPI_LL") >= 0: return False return get_box(a) == get_box(b) def is_str(x): return isinstance(x, str) def scale_val(val): if args.scale and not is_str(val): val = val / units[args.scale] return val PCT_FIELDLEN = 7 OVER_THRESHOLD = 3 units = { "GB": 1024**3, "MB": 1024**2, "KB": 1024 } class Output: """Output data in human readable columns. This also merges boxes and implements adaptive column widths.""" def __init__(self, fieldlen=14, adaptive=False): self.headers = [] self.vals = [] self.num_output = 0 locale.setlocale(locale.LC_ALL, '') self.FIELDLEN = fieldlen self.over = 0 self.columns = dict() self.adaptive = adaptive self.timestamp = None def out(self, name, r, timestamp): if self.vals and sum_event(self.headers[-1], name): if is_str(self.vals[-1]) or is_str(r): self.vals[-1] = str(self.vals[-1]) + str(r) else: self.vals[-1] += r self.headers[-1] = self.headers[-1].replace("0.", ".", 1) else: self.headers.append(name) self.vals.append(r) self.timestamp = timestamp def fieldlen(self, hdr): if self.columns and hdr in self.columns: return self.columns[hdr] elif is_pct(hdr): return PCT_FIELDLEN else: return self.FIELDLEN def update_column(self, hdr, l): if not self.adaptive: return if hdr in self.columns: old = self.columns[hdr] if l > old: self.over += 1 if self.over >= OVER_THRESHOLD: self.columns[hdr] = l self.num_output = -1 # force redisplay else: self.columns[hdr] = l def format_field(self, j, h, fieldlen): if isinstance(j, float): if is_pct(h): j *= 100.0 fmt = "%.2f" j = scale_val(j) elif is_str(j): fmt = "%s" else: fmt = "%d" j = scale_val(j) num = locale.format_string(fmt, j, grouping=True) if len(num) >= fieldlen: num += " " return num def print_header(self): pre = "" for v, j in zip(self.vals, self.headers): l = self.fieldlen(j) l = max(len(self.format_field(v, j, l)), l) print(pre + j, file=args.output) pre += "|" + " "*(l - 1) self.columns[j] = l def flush(self): if (self.num_output % HEADER_INTERVAL) == 0: self.print_header() self.over = 0 out = "" for j, h in zip(self.vals, self.headers): fieldlen = self.fieldlen(h) num = self.format_field(j, h, fieldlen) out += "%-*s" % (fieldlen, num) self.update_column(h, len(num)) print(out, file=args.output) self.vals = [] self.headers = [] self.num_output += 1 class OutputCSV(Output): """CSV version of Output.""" def __init__(self, csv): Output.__init__(self) self.csv = csv def flush(self): if self.num_output == 0: print(self.csv.join(["timestamp"] + self.headers), file=args.output) scaled_vals = map(scale_val, [self.timestamp] + self.vals) print(self.csv.join(map(str, scaled_vals)), file=args.output) self.vals = [] self.headers = [] self.num_output += 1 groupings = ('[', ']', '{', '}', '[[', ']]') out = None class PerfRun: """Control a perf process""" def __init__(self): self.perf = None # for testing purposes def mock(self, logfile, evl): f = open(logfile, "w") for t in range(0, 5): num = 10000 + t for i in evl: if i in groupings: continue i = i.replace("{","").replace("}","") o = "%s,%s" % (num, i) to = "%d," % (t) print(to + "S0,1,"+o, file=f) print(to + "S1,1,"+o, file=f) num += 10000 f.close() def execute(self, s, logfile, evl): if not args.quiet: l = list(map(lambda x: "'" + x + "'" if x.find("{") >= 0 else x, s)) i = l.index('--log-fd') del l[i:i+2] print(" ".join(l), file=args.output) if args.mock: self.mock(logfile, evl) self.perf = None else: if sys.version_info.major == 3 and sys.version_info.minor >= 2: # close_fds mysteriously doesn't work anymore with python 3.7 self.perf = subprocess.Popen(s, pass_fds=(int(s[s.index('--log-fd')+1]),)) else: self.perf = subprocess.Popen(s, close_fds=False) def perf_box(x): m = re.match(r"uncore_([^/]+)(_\d+)?/", x) if not m: return None return m.group(1) def available_counters(box): if box in cpu_aux.limited_counters: return cpu_aux.limited_counters[box] return cpu_aux.DEFAULT_COUNTERS def count_filter(ev): return sum(map(lambda x: ev.count("," + x), cpu_aux.filters)) def is_fixed(x): return x.find("/clockticks/") >= 0 # determine if equation can run in a group def supports_group(evl, nl): evl = filter(lambda x: not is_fixed(x), evl) boxes = defaultdict(list) bnames = defaultdict(list) for j, n in zip(evl, nl): box = perf_box(j) if not box: continue boxes[box].append(j) bnames[box].append(n) for box in boxes: # some events have too complicated counter constraints for # this pear brain scheduler to decide if groups work or not. Just do # not do groups for them. for n in bnames[box]: ev = lookup_event(n) if complicated_counters(ev): return False evl = boxes[box] filters = sum(map(count_filter, evl)) if len(evl) > available_counters(box) or filters > 1: return False return True def count_box(box): return len(find_boxes(box)) # run a equation def evaluate(eq, EV): SAMPLE_INTERVAL = float(args.interval)*1000000 # noqa F841 ROUND = lambda x: round(x) # noqa F841 KB = 1024 # noqa F841 MB = 1024*KB # noqa F841 GB = 1024*MB # noqa F841 KILO = 1000 # noqa F841 MEGA = 1000*KILO # noqa F841 GIGA = 1000*MEGA # noqa F841 NUM_R3QPI = count_box("r3qpi") # noqa F841 XXX add generic function dbg("evaluate", eq) try: return eval(eq) except SyntaxError: return "#EVAL" except NameError: return "#EVAL" except ZeroDivisionError: return 0.0 def is_error(x): return is_str(x) and x.startswith("#") # read perf output and output results def gen_res(evl, res, evp, equations, evnames, timestamp): dbg("evl", evl) dbg("res", res) dbg("evp", evp) dbg("equations", equations) cur_eq = None eql = equations for j in evl: if j == '[' or j == '[[': cur_eq = eql[0] eq_events = dict() elif j == ']' or j == ']]': r = None for x in eq_events: if is_error(eq_events[x]): r = eq_events[x] break if r is None: if '/' in equations[0]: EV = lambda x, n: float(eq_events[x]) else: EV = lambda x, n: int(eq_events[x]) r = evaluate(equations[0], EV) dbg("result", r) out.out(evnames[0], r, timestamp) equations = equations[1:] cur_eq = None evnames = evnames[1:] elif cur_eq: assert evp[0] == j eq_events[evp[0]] = res[0] res = res[1:] evp = evp[1:] elif j in ('{', '}'): continue else: assert evp[0] == j if re.match(r"[0-9]+", res[0]): r = int(res[0]) else: r = res[0] out.out(evnames[0], r, timestamp) evnames = evnames[1:] res = res[1:] evp = evp[1:] out.flush() assert len(res) == 0 assert len(evp) == 0 # replace internal [] equation groups with perf format def gen_events(evl): e = "" prev = "" for j in evl: if j == '[' or j == ']': continue if j == '[[': j = '{' elif j == ']]': j = '}' sep = "" if prev: match = [prev in groupings, j in groupings] if match == [True, True] or match == [False, False]: sep = "," if prev in ['[', '{'] and match[1] is False: sep = "" if prev in [']', '}']: sep = "," if match[0] is False and j in ['[', '{']: sep = "," e += sep + j prev = j return e def concat(d): x = [] for j in sorted(d.keys()): x += d[j] return x def gennames(names, sockets): x = [] for s in sorted(sockets): if s != "": s += "-" for n in names: x.append(s + n) return x def check_per_socket(s, warned): if (not warned and not args.mock and sorted([int(x[1:]) for x in s]) != list(range(0, len(s)))): ucmsg.warning("perf --per-socket appears broken. Please update perf.") ucmsg.warning("Data on socket > 0 will be likely incorrect.") return True return warned perf_errors = { "<not supported>": "#NS", "<not counted>": "#NC", } # run perf and output results def measure(evl, argl, equations, evnames): warned = False all_events = gen_events(evl) # use a pty because perf doesn't do enough fflush() otherwise outp, inp = pty.openpty() logfile = "ulog.%d" % (os.getpid()) run = PerfRun() run.execute([perf, "stat", "--log-fd", "%d" % (inp), "-e", all_events] + argl, logfile, evl) prev_timestamp = None evp = defaultdict(list) res = defaultdict(list) socket = "" try: if args.mock: f = open(logfile, 'r') else: f = os.fdopen(outp, 'r') os.close(inp) while True: try: # force line-by-line buffering l = f.readline() if not l: break except (KeyboardInterrupt, IOError): break l = l.strip() dbg("perf", l) if l.startswith('#') or l == "": continue if l.startswith("S"): # bogus summary lines. ignore continue if per_socket: ts, socket, _, rest = l.split(",", 3) l = ts + "," + rest # uncore// contains commas! m = re.match(r"([0-9.]+),([0-9]+|<.*>),?,(.*)$", l) if not m: print("PERF-UNREADABLE", l, end=" ") continue timestamp = m.group(1) if timestamp != prev_timestamp: if per_socket and not args.quiet: warned = check_per_socket(res.keys(), warned) if evp: num = len(res) gen_res(evl*num, concat(res), concat(evp), equations*num, gennames(evnames, res.keys()), timestamp) res = defaultdict(list) evp = defaultdict(list) prev_timestamp = timestamp r = m.group(2) if r.startswith("<"): if r in perf_errors: r = perf_errors[r] else: r = "#NA" res[socket].append(r) p = m.group(3) if re.search(r"/u?,", p): p = re.sub(r"/u?,.*", "", p) + "/" if p.startswith(","): p = p[1:] evp[socket].append(p) f.close() if args.mock: os.remove(logfile) except IOError: # ptys always seem to end with EIO #print("Error talking to perf", e) pass if evp: num = len(res) gen_res(evl*num, concat(res), concat(evp), equations*num, gennames(evnames, res.keys()), timestamp) if run.perf: run.perf.wait() dummy_count = 1000 def ev_append(ovl, x, nl, n): if x not in ovl: ovl.append(x) if n not in nl: nl.append(n) global dummy_count dummy_count += 1 return dummy_count # dummy value to avoid division by zero class WarnOnce: def __init__(self): self.warned = False def warn(self, msg): if not self.warned: ucmsg.warning(msg) self.warned = True def add_group(x, nl, in_group, mw): if len(x) == 0: return x if len(x) > 1 and not in_group: if supports_group(x, nl): return ['[['] + x + [']]'] mw.warn("Equation will multiplex and may produce inaccurate results (see manual)") return ['['] + x + [']'] # convert a equation to perf form def convert_equation(ev, qual, in_group, quiet=False): mw = WarnOnce() nnl = [] evl = [] equations = [] eql = format_equation(ev, qual, quiet) for p in eql: ovl = [] nl = [] # run equation to collect events r = evaluate(p, lambda x, n: ev_append(ovl, x, nl, n)) if is_error(r) and not args.quiet: print("Cannot evaluate equation", ev["Equation"], file=sys.stderr) nnl.append(nl) evl.append(ovl) equations.append(p) flat_eql = [] for j, nl in zip(evl, nnl): flat_eql += add_group(j, nl, in_group, mw) return flat_eql, equations standard_events = ("cycles", "ref-cycles", "instructions") # convert a single event to perf form def convert_one(evn, evl, evnames, equations, qual, in_group): ev = lookup_event(evn) if not ev: print("unknown event", evn, file=sys.stderr) sys.exit(1) if "Equation" in ev: nvl, neql = convert_equation(ev, qual, in_group) equations += neql num = len(neql) else: nvl = format_reg_event(ev, qual) # should run this in a group XXX num = len(nvl) evl += nvl # add index numbers to names if num > 1: evnames += map(lambda x, y: x.replace(".", "%d." % (y),1), [evn] * num, range(0, num)) else: evnames.append(evn) return evl, evnames, equations # equation on command line def user_equation(evn, in_group): # XXX no qualifiers for now, as the the , conflicts with "with:" ev = dict() ev["Equation"] = evn ev["Box"] = "" ev["UserEq"] = True return convert_equation(ev, dict(), in_group) # convert command line events to perf form def convert_events(arg_events): print_events = [] equations = [] evnames = [] evl = [] j = 0 in_group = 0 for evn in arg_events: if evn == "--": j += 1 break if evn in standard_events: evnames.append(evn) evl.append(evn) j += 1 continue if evn in ['{', '}']: if evn == '{': in_group += 1 else: in_group -= 1 evl += evn j += 1 continue if evn.count(".") == 0: break print_events.append(evn) # not checking for * here, as it conflicts with wildcards if re.search(r"[ /()+-]", evn): nvl, neql = user_equation(evn, in_group) equations += neql evl += nvl evnames += [evn] * len(nvl) j += 1 continue qual = "" n = evn.split(",", 1) if len(n) > 1: evn = n[0] qual = n[1] if re.search(r"[\[*?]", evn): for me in sorted(list(uc.events.keys()) + list(uc.derived.keys())): if fnmatch.fnmatch(me, evn): evl, evnames, equations = convert_one(me, evl, evnames, equations, qual, in_group) else: evl, evnames, equations = convert_one(evn, evl, evnames, equations, qual, in_group) j += 1 if not args.quiet: print("Events:", " ".join(print_events), file=args.output) return evl, equations, evnames, args.events[j:] def fix_field(nev, key, old, num): nev[key] = nev[key].replace(old, old.replace("x", str(num))) # expand a single event def expand_ev(table, name, num): ev = table[name] for n in range(0, num): nev = copy.deepcopy(ev) fix_field(nev, "Equation", ",x", n) fix_field(nev, "Equation", "=x", n) fix_field(nev, "Equation", "RANKx", n) fix_field(nev, "Equation", "NODEx", n) fix_field(nev, "Desc", "Rank x", n) fix_field(nev, "Desc", "Node x", n) fix_field(nev, "Defn", "(x)", n) table[name.replace("x", str(n))] = nev del table[name] def maybe_expand_ev(table, name, max_node): if name.find("NODEx") >= 0: expand_ev(table, name, max_node) elif name.find("RANKx") >= 0: expand_ev(table, name, cpu_aux.MAX_RANK) # XXX should do this at generation time def expand_events(): max_node = cpu.max_node() + 1 for j in list(uc.derived.keys()): maybe_expand_ev(uc.derived, j, max_node) for j in list(uc.events.keys()): maybe_expand_ev(uc.events, j, max_node) for j in extra.extra_derived: uc.derived[j] = extra.extra_derived[j] def get_counter(c): m = re.match(r"(\d+)-(\d+)", c) if m: start = int(m.group(1)) return start, int(m.group(2)) return int(c), int(c) # index=0 start of ranges, index=1 end of ranges def counter_list(counters, index): return map(lambda x: get_counter(x)[index], str(counters).split(",")) # check for complicated counters our simple scheduler cannot handle def complicated_counters(ev): counters = ev["Counters"] return (max(counter_list(counters, 1)) < available_counters(format_box(ev)) - 1 or max(counter_list(counters, 0)) > 0) def check_events(): ae = dict() for j in sorted(uc.events.keys()): ev = uc.events[j] box = j[:j.index(".")] if "EvSel" not in ev: print(j,"has no evsel") umask = "" extsel = 0 if "Umask" in ev: umask = ev['Umask'] if "ExtSel" in ev: extsel = ev['ExtSel'] if extsel == "": extsel = 0 key = box, ev['EvSel'], extsel, umask if key in ae: print(ae[key],"duplicated with",j,key) else: ae[key] = j if complicated_counters(ev): print("event %s has complicated counters: %s" % (j, ev["Counters"])) def check_multiplex(): if args.quiet: return found = False for j in glob.iglob("/sys/devices/uncore_*/perf_event_mux_interval_ms"): found = True break if not found: ucmsg.warning("No hrtimer multiplexing support in kernel.") ucmsg.warning("Multiplexed events will be incorrect when not fully busy.") def event_dummy(e, n): global dummy_count dummy_count += 1 return dummy_count def parse_all(): errors = 0 empty = 0 for name in uc.derived.keys(): print("---",name,": ") el = format_event(uc.derived[name]) for e in el: print(e) r = evaluate(e, event_dummy) print("result:", r, cputype, name) if is_error(r): errors += 1 if not el: print("empty list") empty += 1 print("%d errors, %d empties" % (errors, empty)) perf = os.getenv("PERF") if not perf: perf = "perf" expand_events() if __name__ == '__main__': p = argparse.ArgumentParser(description=''' Intel Xeon uncore performance counter events frontend for perf. The uncore is the part of the CPU that is not core. This tool allows to monitor a variety of metrics in the uncore, including memory, QPI, PCI-E bandwidth, cache hit rates, power management statistics and various others.''') p.add_argument('--cat', help='Only print events from categories containing this in list') p.add_argument('--unsupported', action='store_true', help=''' Print all events, including unsupported and potentially broken ones. Use at your own risk.''') p.add_argument('--broken', action='store_true', help=argparse.SUPPRESS) p.add_argument('--desc', help='Print detailed descriptions in list', action='store_true') p.add_argument('--name-only', help='Only print event name in list', action='store_true') p.add_argument('--equation', help='Print equations for derived events in list', action='store_true') p.add_argument('--attr', help='Print attributes', action='store_true') p.add_argument('--parse-all', help=argparse.SUPPRESS, action='store_true') p.add_argument('--mock', action='store_true', help=argparse.SUPPRESS) p.add_argument('--scale', help='Scale numbers to unit (GB,MB,KB)', choices=units) p.add_argument('--csv', '-x', help='Enable CSV mode and use argument to separate fields') p.add_argument('events', nargs='*', help=""" List of events to be converted. May have a comma separate list of name=val qualifiers after comma for each event (see --desc output for valid qualifiers). Can use { and } (each as own argument and quoted) to define groups. Valid to use shell-style wildcards (quoted) to match multiple events. It is also valid to specify a equation (quoted, containing space). After -- a perf argument line can be specified (e.g. sleep NUM or a workload to run). Please note that ucevent measurements are always global to the specified socket(s) unlike normal perf stat.""") p.add_argument('--interval', '-I', default=1000, type=int, help='Measurement interval in ms') p.add_argument('--socket', '-S', help='Measure only socket (default all)', type=int) p.add_argument('--cpu', '-C', help=''' Measure socket associated with CPU, or use that CPU for the (very few) events that use core events''', type=int) p.add_argument('--fieldlen', default=6, help='Set output field length', type=int) p.add_argument('--verbose', '-v', help='More verbose output', action='store_true', default=False) p.add_argument('--quiet', help=argparse.SUPPRESS, default=True) p.add_argument('--no-sum', help='Don\'t sum up multiple instances of units', action='store_true') p.add_argument('--check-events', help=argparse.SUPPRESS, action='store_true') p.add_argument('--output', '-o', help='Set output file', default=sys.stdout, type=argparse.FileType('w')) p.add_argument('--resolve', action='store_true', help='Only print resolved event names. Do not run perf.') p.add_argument("--no-pager", action='store_true', help='Do not use a pager') p.add_argument('--debug', help=argparse.SUPPRESS) args = p.parse_args() if args.verbose: args.quiet = False ucmsg.quiet = args.quiet ucmsg.debug = args.debug if args.check_events: check_events() sys.exit(0) if args.csv: out = OutputCSV(args.csv) else: out = Output(args.fieldlen, True) if args.parse_all: parse_all() elif not args.events: print_events(args.cat, args.desc, args.equation) else: if not args.mock: check_multiplex() argl = ['-I%s' % (args.interval), '-x,'] if args.cpu is not None: argl.append('-C%d' % (args.cpu)) elif args.socket is not None: argl.append('-C%d' % (cpu.socket_to_cpu(args.socket))) else: argl.append('-a') argl.append('--per-socket') per_socket = True evl, equations, evnames, rest = convert_events(args.events) if len(rest) == 0: rest = ["sleep", "999999"] if len(evl) == 0: sys.exit("no events to measure") if args.resolve: for ev, evname in zip(evl, evnames): print(evname,"\t",ev) sys.exit(0) try: measure(evl, argl + rest, equations, evnames) except OSError as e: sys.exit("perf failed to run: " + e)
35,405
Python
.py
1,032
25.651163
101
0.539862
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,950
ucmsg.py
andikleen_pmu-tools/ucevent/ucmsg.py
# Handle warnings and errors # Separate module to avoid circular imports from __future__ import print_function import sys import fnmatch quiet = False debug = None def debug_msg(x, y): if debug and any(map(lambda p: fnmatch.fnmatch(x, p), debug.split(","))): print("debug:", x + ": " + str(y), file=sys.stderr) def warning(x): if not quiet: print("WARNING:", x, file=sys.stderr)
406
Python
.py
13
28.153846
77
0.682051
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,951
bdxde_uc.py
andikleen_pmu-tools/ucevent/bdxde_uc.py
# BDXDE bdxde_uc_events.v1.00p bdxde_uc_derived.v1.00p # aliases aliases = { "PCUFilter": "PCU_MSR_PMON_BOX_FILTER", "HA_AddrMatch0": "HAn_PCI_PMON_BOX_ADDRMATCH0", "IRPFilter": "IRP_PCI_PMON_BOX_FILTER", "HA_AddrMatch1": "HAn_PCI_PMON_BOX_ADDRMATCH1", "HA_OpcodeMatch": "HAn_PCI_PMON_BOX_OPCODEMATCH", "CBoFilter0": "Cn_MSR_PMON_BOX_FILTER", "UBoxFilter": "U_MSR_PMON_BOX_FILTER", "CBoFilter1": "Cn_MSR_PMON_BOX_FILTER1", } events = { # UBOX: "UBOX.EVENT_MSG": { "Box": "UBOX", "Category": "UBOX EVENT_MSG Events", "Counters": "0-1", "Defn": "Virtual Logical Wire (legacy) message were received from Uncore. Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.", "Desc": "VLW Received", "EvSel": 66, "ExtSel": "", }, "UBOX.EVENT_MSG.DOORBELL_RCVD": { "Box": "UBOX", "Category": "UBOX EVENT_MSG Events", "Counters": "0-1", "Defn": "Virtual Logical Wire (legacy) message were received from Uncore. Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.", "Desc": "VLW Received", "EvSel": 66, "ExtSel": "", "Umask": "bxxxx1xxx", }, "UBOX.PHOLD_CYCLES": { "Box": "UBOX", "Category": "UBOX PHOLD Events", "Counters": "0-1", "Defn": "PHOLD cycles. Filter from source CoreID.", "Desc": "Cycles PHOLD Assert to Ack", "EvSel": 69, "ExtSel": "", }, "UBOX.PHOLD_CYCLES.ASSERT_TO_ACK": { "Box": "UBOX", "Category": "UBOX PHOLD Events", "Counters": "0-1", "Defn": "PHOLD cycles. Filter from source CoreID.", "Desc": "Cycles PHOLD Assert to Ack", "EvSel": 69, "ExtSel": "", "Umask": "bxxxxxxx1", }, "UBOX.RACU_REQUESTS": { "Box": "UBOX", "Category": "UBOX RACU Events", "Counters": "0-1", "Defn": "Number outstanding register requests within message channel tracker", "Desc": "RACU Request", "EvSel": 70, "ExtSel": "", }, # R2PCIe: "R2PCIe.CLOCKTICKS": { "Box": "R2PCIe", "Category": "R2PCIe UCLK Events", "Counters": "0-3", "Defn": "Counts the number of uclks in the R2PCIe uclk domain. This could be slightly different than the count in the Ubox because of enable/freeze delays. However, because the R2PCIe is close to the Ubox, they generally should not diverge by more than a handful of cycles.", "Desc": "Number of uclks in domain", "EvSel": 1, "ExtSel": "", }, "R2PCIe.IIO_CREDIT": { "Box": "R2PCIe", "Category": "R2PCIe IIO Credit Events", "Counters": "0-1", "EvSel": 45, "ExtSel": "", "MaxIncCyc": 4, }, "R2PCIe.IIO_CREDIT.ISOCH_QPI0": { "Box": "R2PCIe", "Category": "R2PCIe IIO Credit Events", "Counters": "0-1", "EvSel": 45, "ExtSel": "", "MaxIncCyc": 4, "Umask": "bxxxxx1xx", }, "R2PCIe.IIO_CREDIT.ISOCH_QPI1": { "Box": "R2PCIe", "Category": "R2PCIe IIO Credit Events", "Counters": "0-1", "EvSel": 45, "ExtSel": "", "MaxIncCyc": 4, "Umask": "bxxxx1xxx", }, "R2PCIe.IIO_CREDIT.PRQ_QPI0": { "Box": "R2PCIe", "Category": "R2PCIe IIO Credit Events", "Counters": "0-1", "EvSel": 45, "ExtSel": "", "MaxIncCyc": 4, "Umask": "bxxxxxxx1", }, "R2PCIe.IIO_CREDIT.PRQ_QPI1": { "Box": "R2PCIe", "Category": "R2PCIe IIO Credit Events", "Counters": "0-1", "EvSel": 45, "ExtSel": "", "MaxIncCyc": 4, "Umask": "bxxxxxx1x", }, "R2PCIe.RING_AD_USED": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", }, "R2PCIe.RING_AD_USED.CCW_ODD": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxx1xxx", }, "R2PCIe.RING_AD_USED.ALL": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00001111", }, "R2PCIe.RING_AD_USED.CW_ODD": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxx1x", }, "R2PCIe.RING_AD_USED.CCW_EVEN": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxx1xx", }, "R2PCIe.RING_AD_USED.CW": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00000011", }, "R2PCIe.RING_AD_USED.CW_EVEN": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxxx1", }, "R2PCIe.RING_AD_USED.CCW": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00001100", }, "R2PCIe.RING_AK_BOUNCES": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of times when a request destined for the AK ingress bounced.", "Desc": "AK Ingress Bounced", "EvSel": 18, "ExtSel": "", }, "R2PCIe.RING_AK_BOUNCES.DN": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of times when a request destined for the AK ingress bounced.", "Desc": "AK Ingress Bounced", "EvSel": 18, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R2PCIe.RING_AK_BOUNCES.UP": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of times when a request destined for the AK ingress bounced.", "Desc": "AK Ingress Bounced", "EvSel": 18, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R2PCIe.RING_AK_USED": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", }, "R2PCIe.RING_AK_USED.CW_ODD": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxx1x", }, "R2PCIe.RING_AK_USED.CCW_EVEN": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxx1xx", }, "R2PCIe.RING_AK_USED.CCW_ODD": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxx1xxx", }, "R2PCIe.RING_AK_USED.ALL": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00001111", }, "R2PCIe.RING_AK_USED.CW_EVEN": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxxx1", }, "R2PCIe.RING_AK_USED.CCW": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00001100", }, "R2PCIe.RING_AK_USED.CW": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00000011", }, "R2PCIe.RING_BL_USED": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", }, "R2PCIe.RING_BL_USED.CCW_EVEN": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxx1xx", }, "R2PCIe.RING_BL_USED.CW_ODD": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxx1x", }, "R2PCIe.RING_BL_USED.ALL": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00001111", }, "R2PCIe.RING_BL_USED.CCW_ODD": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxx1xxx", }, "R2PCIe.RING_BL_USED.CCW": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00001100", }, "R2PCIe.RING_BL_USED.CW_EVEN": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxxx1", }, "R2PCIe.RING_BL_USED.CW": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00000011", }, "R2PCIe.RING_IV_USED": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.", "Desc": "R2 IV Ring in Use", "EvSel": 10, "ExtSel": "", "Notes": "IV messages are split into two parts. In any cycle, a ring stop can see up to one (half-)packet moving in the CW direction and one (half-)packet moving in the CCW direction.", }, "R2PCIe.RING_IV_USED.CCW": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.", "Desc": "R2 IV Ring in Use", "EvSel": 10, "ExtSel": "", "Notes": "IV messages are split into two parts. In any cycle, a ring stop can see up to one (half-)packet moving in the CW direction and one (half-)packet moving in the CCW direction.", "Umask": "b00001100", }, "R2PCIe.RING_IV_USED.ANY": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.", "Desc": "R2 IV Ring in Use", "EvSel": 10, "ExtSel": "", "Notes": "IV messages are split into two parts. In any cycle, a ring stop can see up to one (half-)packet moving in the CW direction and one (half-)packet moving in the CCW direction.", "Umask": "b00001111", }, "R2PCIe.RING_IV_USED.CW": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.", "Desc": "R2 IV Ring in Use", "EvSel": 10, "ExtSel": "", "Notes": "IV messages are split into two parts. In any cycle, a ring stop can see up to one (half-)packet moving in the CW direction and one (half-)packet moving in the CCW direction.", "Umask": "b00000011", }, "R2PCIe.RxR_CYCLES_NE": { "Box": "R2PCIe", "Category": "R2PCIe INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the R2PCIe Ingress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Cycles Not Empty", "EvSel": 16, "ExtSel": "", }, "R2PCIe.RxR_CYCLES_NE.NCB": { "Box": "R2PCIe", "Category": "R2PCIe INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the R2PCIe Ingress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Cycles Not Empty", "EvSel": 16, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R2PCIe.RxR_CYCLES_NE.NCS": { "Box": "R2PCIe", "Category": "R2PCIe INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the R2PCIe Ingress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Cycles Not Empty", "EvSel": 16, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R2PCIe.RxR_INSERTS": { "Box": "R2PCIe", "Category": "R2PCIe INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the R2PCIe Ingress. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Allocations", "EvSel": 17, "ExtSel": "", }, "R2PCIe.RxR_INSERTS.NCS": { "Box": "R2PCIe", "Category": "R2PCIe INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the R2PCIe Ingress. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Allocations", "EvSel": 17, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R2PCIe.RxR_INSERTS.NCB": { "Box": "R2PCIe", "Category": "R2PCIe INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the R2PCIe Ingress. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Allocations", "EvSel": 17, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R2PCIe.RxR_OCCUPANCY": { "Box": "R2PCIe", "Category": "R2PCIe INGRESS Events", "Counters": 0, "Defn": "Accumulates the occupancy of a given R2PCIe Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the R2PCIe Ingress Not Empty event to calculate average occupancy or the R2PCIe Ingress Allocations event in order to calculate average queuing latency.", "Desc": "Ingress Occupancy Accumulator", "EvSel": 19, "ExtSel": "", "MaxIncCyc": 24, "SubCtr": 1, }, "R2PCIe.RxR_OCCUPANCY.DRS": { "Box": "R2PCIe", "Category": "R2PCIe INGRESS Events", "Counters": 0, "Defn": "Accumulates the occupancy of a given R2PCIe Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the R2PCIe Ingress Not Empty event to calculate average occupancy or the R2PCIe Ingress Allocations event in order to calculate average queuing latency.", "Desc": "Ingress Occupancy Accumulator", "EvSel": 19, "ExtSel": "", "MaxIncCyc": 24, "SubCtr": 1, "Umask": "b00001000", }, "R2PCIe.SBO0_CREDITS_ACQUIRED": { "Box": "R2PCIe", "Category": "R2PCIe SBO Credit Events", "Counters": "0-1", "Defn": "Number of Sbo 0 credits acquired in a given cycle, per ring.", "Desc": "SBo0 Credits Acquired", "EvSel": 40, "ExtSel": "", "MaxIncCyc": 2, }, "R2PCIe.SBO0_CREDITS_ACQUIRED.AD": { "Box": "R2PCIe", "Category": "R2PCIe SBO Credit Events", "Counters": "0-1", "Defn": "Number of Sbo 0 credits acquired in a given cycle, per ring.", "Desc": "SBo0 Credits Acquired", "EvSel": 40, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxxxxx1", }, "R2PCIe.SBO0_CREDITS_ACQUIRED.BL": { "Box": "R2PCIe", "Category": "R2PCIe SBO Credit Events", "Counters": "0-1", "Defn": "Number of Sbo 0 credits acquired in a given cycle, per ring.", "Desc": "SBo0 Credits Acquired", "EvSel": 40, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxxxx1x", }, "R2PCIe.STALL_NO_SBO_CREDIT": { "Box": "R2PCIe", "Category": "R2PCIe SBO Credit Events", "Counters": "0-1", "Defn": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.", "Desc": "Stall on No Sbo Credits", "EvSel": 44, "ExtSel": "", "MaxIncCyc": 4, }, "R2PCIe.STALL_NO_SBO_CREDIT.SBO0_AD": { "Box": "R2PCIe", "Category": "R2PCIe SBO Credit Events", "Counters": "0-1", "Defn": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.", "Desc": "Stall on No Sbo Credits", "EvSel": 44, "ExtSel": "", "MaxIncCyc": 4, "Umask": "bxxxxxxx1", }, "R2PCIe.STALL_NO_SBO_CREDIT.SBO0_BL": { "Box": "R2PCIe", "Category": "R2PCIe SBO Credit Events", "Counters": "0-1", "Defn": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.", "Desc": "Stall on No Sbo Credits", "EvSel": 44, "ExtSel": "", "MaxIncCyc": 4, "Umask": "bxxxxx1xx", }, "R2PCIe.STALL_NO_SBO_CREDIT.SBO1_AD": { "Box": "R2PCIe", "Category": "R2PCIe SBO Credit Events", "Counters": "0-1", "Defn": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.", "Desc": "Stall on No Sbo Credits", "EvSel": 44, "ExtSel": "", "MaxIncCyc": 4, "Umask": "bxxxxxx1x", }, "R2PCIe.STALL_NO_SBO_CREDIT.SBO1_BL": { "Box": "R2PCIe", "Category": "R2PCIe SBO Credit Events", "Counters": "0-1", "Defn": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.", "Desc": "Stall on No Sbo Credits", "EvSel": 44, "ExtSel": "", "MaxIncCyc": 4, "Umask": "bxxxx1xxx", }, "R2PCIe.TxR_CYCLES_FULL": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": 0, "Defn": "Counts the number of cycles when the R2PCIe Egress buffer is full.", "Desc": "Egress Cycles Full", "EvSel": 37, "ExtSel": "", }, "R2PCIe.TxR_CYCLES_FULL.AK": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": 0, "Defn": "Counts the number of cycles when the R2PCIe Egress buffer is full.", "Desc": "Egress Cycles Full", "EvSel": 37, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R2PCIe.TxR_CYCLES_FULL.BL": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": 0, "Defn": "Counts the number of cycles when the R2PCIe Egress buffer is full.", "Desc": "Egress Cycles Full", "EvSel": 37, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R2PCIe.TxR_CYCLES_FULL.AD": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": 0, "Defn": "Counts the number of cycles when the R2PCIe Egress buffer is full.", "Desc": "Egress Cycles Full", "EvSel": 37, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R2PCIe.TxR_CYCLES_NE": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": 0, "Defn": "Counts the number of cycles when the R2PCIe Egress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "Egress Cycles Not Empty", "EvSel": 35, "ExtSel": "", }, "R2PCIe.TxR_CYCLES_NE.AD": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": 0, "Defn": "Counts the number of cycles when the R2PCIe Egress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "Egress Cycles Not Empty", "EvSel": 35, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R2PCIe.TxR_CYCLES_NE.AK": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": 0, "Defn": "Counts the number of cycles when the R2PCIe Egress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "Egress Cycles Not Empty", "EvSel": 35, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R2PCIe.TxR_CYCLES_NE.BL": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": 0, "Defn": "Counts the number of cycles when the R2PCIe Egress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "Egress Cycles Not Empty", "EvSel": 35, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R2PCIe.TxR_NACK_CW": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": "0-1", "Desc": "Egress CCW NACK", "EvSel": 38, "ExtSel": "", }, "R2PCIe.TxR_NACK_CW.UP_BL": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": "0-1", "Desc": "Egress CCW NACK", "EvSel": 38, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R2PCIe.TxR_NACK_CW.DN_AD": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": "0-1", "Desc": "Egress CCW NACK", "EvSel": 38, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R2PCIe.TxR_NACK_CW.UP_AK": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": "0-1", "Desc": "Egress CCW NACK", "EvSel": 38, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R2PCIe.TxR_NACK_CW.UP_AD": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": "0-1", "Desc": "Egress CCW NACK", "EvSel": 38, "ExtSel": "", "Umask": "bxxxx1xxx", }, "R2PCIe.TxR_NACK_CW.DN_AK": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": "0-1", "Desc": "Egress CCW NACK", "EvSel": 38, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R2PCIe.TxR_NACK_CW.DN_BL": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": "0-1", "Desc": "Egress CCW NACK", "EvSel": 38, "ExtSel": "", "Umask": "bxxxxxx1x", }, # CBO: "CBO.BOUNCE_CONTROL": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Desc": "Bounce Control", "EvSel": 10, "ExtSel": "", }, "CBO.CLOCKTICKS": { "Box": "CBO", "Category": "CBO UCLK Events", "Counters": "0-3", "Desc": "Uncore Clocks", "EvSel": 0, "ExtSel": "", }, "CBO.COUNTER0_OCCUPANCY": { "Box": "CBO", "Category": "CBO OCCUPANCY Events", "Counters": "0-3", "Defn": "Since occupancy counts can only be captured in the Cbo's 0 counter, this event allows a user to capture occupancy related information by filtering the Cb0 occupancy count captured in Counter 0. The filtering available is found in the control register - threshold, invert and edge detect. E.g. setting threshold to 1 can effectively monitor how many cycles the monitored queue has an entry.", "Desc": "Counter 0 Occupancy", "EvSel": 31, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, }, "CBO.FAST_ASSERTED": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles either the local distress or incoming distress signals are asserted. Incoming distress includes both up and dn.", "Desc": "FaST wire asserted", "EvSel": 9, "ExtSel": "", }, "CBO.LLC_LOOKUP": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "Bit 0 of the umask must always be set for this event. This allows us to match a given state (or states). The state is programmed in Cn_MSR_PMON_BOX_FILTER.state. The state field is a bit mask, so you can select (and monitor) multiple states at a time. 0 = I (miss), 1 = S, 2 = E, 3 = M, 4 = F. For example, if you wanted to monitor F and S hits, you could set 10010b in the 5-bit state field. To monitor any lookup, set the field to 0x1F.", }, "CBO.LLC_LOOKUP.ANY": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "Bit 0 of the umask must always be set for this event. This allows us to match a given state (or states). The state is programmed in Cn_MSR_PMON_BOX_FILTER.state. The state field is a bit mask, so you can select (and monitor) multiple states at a time. 0 = I (miss), 1 = S, 2 = E, 3 = M, 4 = F. For example, if you wanted to monitor F and S hits, you could set 10010b in the 5-bit state field. To monitor any lookup, set the field to 0x1F.", "Umask": "b00010001", }, "CBO.LLC_LOOKUP.NID": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "Bit 0 of the umask must always be set for this event. This allows us to match a given state (or states). The state is programmed in Cn_MSR_PMON_BOX_FILTER.state. The state field is a bit mask, so you can select (and monitor) multiple states at a time. 0 = I (miss), 1 = S, 2 = E, 3 = M, 4 = F. For example, if you wanted to monitor F and S hits, you could set 10010b in the 5-bit state field. To monitor any lookup, set the field to 0x1F.", "Umask": "b01000001", }, "CBO.LLC_LOOKUP.READ": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "Bit 0 of the umask must always be set for this event. This allows us to match a given state (or states). The state is programmed in Cn_MSR_PMON_BOX_FILTER.state. The state field is a bit mask, so you can select (and monitor) multiple states at a time. 0 = I (miss), 1 = S, 2 = E, 3 = M, 4 = F. For example, if you wanted to monitor F and S hits, you could set 10010b in the 5-bit state field. To monitor any lookup, set the field to 0x1F.", "Umask": "b00100001", }, "CBO.LLC_LOOKUP.DATA_READ": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "Bit 0 of the umask must always be set for this event. This allows us to match a given state (or states). The state is programmed in Cn_MSR_PMON_BOX_FILTER.state. The state field is a bit mask, so you can select (and monitor) multiple states at a time. 0 = I (miss), 1 = S, 2 = E, 3 = M, 4 = F. For example, if you wanted to monitor F and S hits, you could set 10010b in the 5-bit state field. To monitor any lookup, set the field to 0x1F.", "Umask": "b00000011", }, "CBO.LLC_LOOKUP.WRITE": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "Bit 0 of the umask must always be set for this event. This allows us to match a given state (or states). The state is programmed in Cn_MSR_PMON_BOX_FILTER.state. The state field is a bit mask, so you can select (and monitor) multiple states at a time. 0 = I (miss), 1 = S, 2 = E, 3 = M, 4 = F. For example, if you wanted to monitor F and S hits, you could set 10010b in the 5-bit state field. To monitor any lookup, set the field to 0x1F.", "Umask": "b00000101", }, "CBO.LLC_LOOKUP.REMOTE_SNOOP": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "Bit 0 of the umask must always be set for this event. This allows us to match a given state (or states). The state is programmed in Cn_MSR_PMON_BOX_FILTER.state. The state field is a bit mask, so you can select (and monitor) multiple states at a time. 0 = I (miss), 1 = S, 2 = E, 3 = M, 4 = F. For example, if you wanted to monitor F and S hits, you could set 10010b in the 5-bit state field. To monitor any lookup, set the field to 0x1F.", "Umask": "b00001001", }, "CBO.LLC_VICTIMS": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", }, "CBO.LLC_VICTIMS.NID": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CBO.LLC_VICTIMS.I_STATE": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CBO.LLC_VICTIMS.M_STATE": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CBO.LLC_VICTIMS.MISS": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CBO.LLC_VICTIMS.F_STATE": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CBO.LLC_VICTIMS.E_STATE": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CBO.MISC": { "Box": "CBO", "Category": "CBO MISC Events", "Counters": "0-3", "Defn": "Miscellaneous events in the Cbo.", "Desc": "Cbo Misc", "EvSel": 57, "ExtSel": "", }, "CBO.MISC.WC_ALIASING": { "Box": "CBO", "Category": "CBO MISC Events", "Counters": "0-3", "Defn": "Miscellaneous events in the Cbo.", "Desc": "Cbo Misc", "EvSel": 57, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CBO.MISC.RSPI_WAS_FSE": { "Box": "CBO", "Category": "CBO MISC Events", "Counters": "0-3", "Defn": "Miscellaneous events in the Cbo.", "Desc": "Cbo Misc", "EvSel": 57, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CBO.MISC.RFO_HIT_S": { "Box": "CBO", "Category": "CBO MISC Events", "Counters": "0-3", "Defn": "Miscellaneous events in the Cbo.", "Desc": "Cbo Misc", "EvSel": 57, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CBO.MISC.STARTED": { "Box": "CBO", "Category": "CBO MISC Events", "Counters": "0-3", "Defn": "Miscellaneous events in the Cbo.", "Desc": "Cbo Misc", "EvSel": 57, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CBO.MISC.CVZERO_PREFETCH_MISS": { "Box": "CBO", "Category": "CBO MISC Events", "Counters": "0-3", "Defn": "Miscellaneous events in the Cbo.", "Desc": "Cbo Misc", "EvSel": 57, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CBO.MISC.CVZERO_PREFETCH_VICTIM": { "Box": "CBO", "Category": "CBO MISC Events", "Counters": "0-3", "Defn": "Miscellaneous events in the Cbo.", "Desc": "Cbo Misc", "EvSel": 57, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CBO.RING_AD_USED": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", }, "CBO.RING_AD_USED.DOWN_ODD": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxx1xxx", }, "CBO.RING_AD_USED.UP_ODD": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxxxx1x", }, "CBO.RING_AD_USED.ALL": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "b00001111", }, "CBO.RING_AD_USED.CCW": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "b00001100", }, "CBO.RING_AD_USED.DOWN_EVEN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxxx1xx", }, "CBO.RING_AD_USED.CW": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "b00000011", }, "CBO.RING_AD_USED.UP_EVEN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxxxxx1", }, "CBO.RING_AK_USED": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", }, "CBO.RING_AK_USED.CCW": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "b00001100", }, "CBO.RING_AK_USED.UP_EVEN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxxxxx1", }, "CBO.RING_AK_USED.CW": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "b00000011", }, "CBO.RING_AK_USED.DOWN_EVEN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxxx1xx", }, "CBO.RING_AK_USED.UP_ODD": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxxxx1x", }, "CBO.RING_AK_USED.ALL": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "b00001111", }, "CBO.RING_AK_USED.DOWN_ODD": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxx1xxx", }, "CBO.RING_BL_USED": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", }, "CBO.RING_BL_USED.UP_ODD": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxxxx1x", }, "CBO.RING_BL_USED.ALL": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "b00001111", }, "CBO.RING_BL_USED.DOWN_ODD": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxx1xxx", }, "CBO.RING_BL_USED.CW": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "b00000011", }, "CBO.RING_BL_USED.UP_EVEN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxxxxx1", }, "CBO.RING_BL_USED.DOWN_EVEN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxxx1xx", }, "CBO.RING_BL_USED.CCW": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "b00001100", }, "CBO.RING_BOUNCES": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Desc": "Number of LLC responses that bounced on the Ring.", "EvSel": 5, "ExtSel": "", }, "CBO.RING_BOUNCES.IV": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Desc": "Number of LLC responses that bounced on the Ring.", "EvSel": 5, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CBO.RING_BOUNCES.BL": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Desc": "Number of LLC responses that bounced on the Ring.", "EvSel": 5, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CBO.RING_BOUNCES.AK": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Desc": "Number of LLC responses that bounced on the Ring.", "EvSel": 5, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CBO.RING_BOUNCES.AD": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Desc": "Number of LLC responses that bounced on the Ring.", "EvSel": 5, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CBO.RING_IV_USED": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring in BDX Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "BL Ring in Use", "EvSel": 30, "ExtSel": "", "Notes": "IV messages are split into two parts. In any cycle, a ring stop can see up to one (half-)packet moving in the UP direction and one (half-)packet moving in the DN direction.", }, "CBO.RING_IV_USED.DOWN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring in BDX Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "BL Ring in Use", "EvSel": 30, "ExtSel": "", "Notes": "IV messages are split into two parts. In any cycle, a ring stop can see up to one (half-)packet moving in the UP direction and one (half-)packet moving in the DN direction.", "Umask": "b11001100", }, "CBO.RING_IV_USED.UP": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring in BDX Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "BL Ring in Use", "EvSel": 30, "ExtSel": "", "Notes": "IV messages are split into two parts. In any cycle, a ring stop can see up to one (half-)packet moving in the UP direction and one (half-)packet moving in the DN direction.", "Umask": "b00000011", }, "CBO.RING_IV_USED.ANY": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring in BDX Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "BL Ring in Use", "EvSel": 30, "ExtSel": "", "Notes": "IV messages are split into two parts. In any cycle, a ring stop can see up to one (half-)packet moving in the UP direction and one (half-)packet moving in the DN direction.", "Umask": "b00001111", }, "CBO.RING_IV_USED.DN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring in BDX Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "BL Ring in Use", "EvSel": 30, "ExtSel": "", "Notes": "IV messages are split into two parts. In any cycle, a ring stop can see up to one (half-)packet moving in the UP direction and one (half-)packet moving in the DN direction.", "Umask": "b00001100", }, "CBO.RING_SRC_THRTL": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Desc": "Number of cycles the Cbo is actively throttling traffic onto the Ring in order to limit bounce traffic.", "EvSel": 7, "ExtSel": "", }, "CBO.RxR_EXT_STARVED": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-3", "Defn": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.", "Desc": "Ingress Arbiter Blocking Cycles", "EvSel": 18, "ExtSel": "", }, "CBO.RxR_EXT_STARVED.IPQ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-3", "Defn": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.", "Desc": "Ingress Arbiter Blocking Cycles", "EvSel": 18, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CBO.RxR_EXT_STARVED.ISMQ_BIDS": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-3", "Defn": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.", "Desc": "Ingress Arbiter Blocking Cycles", "EvSel": 18, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CBO.RxR_EXT_STARVED.IRQ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-3", "Defn": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.", "Desc": "Ingress Arbiter Blocking Cycles", "EvSel": 18, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CBO.RxR_EXT_STARVED.PRQ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-3", "Defn": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.", "Desc": "Ingress Arbiter Blocking Cycles", "EvSel": 18, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CBO.RxR_INSERTS": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-3", "Defn": "Counts number of allocations per cycle into the specified Ingress queue.", "Desc": "Ingress Allocations", "EvSel": 19, "ExtSel": "", "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", }, "CBO.RxR_INSERTS.PRQ_REJ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-3", "Defn": "Counts number of allocations per cycle into the specified Ingress queue.", "Desc": "Ingress Allocations", "EvSel": 19, "ExtSel": "", "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "Umask": "bxx1xxxxx", }, "CBO.RxR_INSERTS.IPQ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-3", "Defn": "Counts number of allocations per cycle into the specified Ingress queue.", "Desc": "Ingress Allocations", "EvSel": 19, "ExtSel": "", "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "Umask": "bxxxxx1xx", }, "CBO.RxR_INSERTS.PRQ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-3", "Defn": "Counts number of allocations per cycle into the specified Ingress queue.", "Desc": "Ingress Allocations", "EvSel": 19, "ExtSel": "", "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "Umask": "bxxx1xxxx", }, "CBO.RxR_INSERTS.IRQ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-3", "Defn": "Counts number of allocations per cycle into the specified Ingress queue.", "Desc": "Ingress Allocations", "EvSel": 19, "ExtSel": "", "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "Umask": "bxxxxxxx1", }, "CBO.RxR_INSERTS.IRQ_REJ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-3", "Defn": "Counts number of allocations per cycle into the specified Ingress queue.", "Desc": "Ingress Allocations", "EvSel": 19, "ExtSel": "", "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "Umask": "bxxxxxx1x", }, "CBO.RxR_IPQ_RETRY": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Defn": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.", "Desc": "Probe Queue Retries", "EvSel": 49, "ExtSel": "", }, "CBO.RxR_IPQ_RETRY.QPI_CREDITS": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Defn": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.", "Desc": "Probe Queue Retries", "EvSel": 49, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CBO.RxR_IPQ_RETRY.ADDR_CONFLICT": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Defn": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.", "Desc": "Probe Queue Retries", "EvSel": 49, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CBO.RxR_IPQ_RETRY.FULL": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Defn": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.", "Desc": "Probe Queue Retries", "EvSel": 49, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CBO.RxR_IPQ_RETRY.ANY": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Defn": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.", "Desc": "Probe Queue Retries", "EvSel": 49, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CBO.RxR_IPQ_RETRY2": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Defn": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.", "Desc": "Probe Queue Retries", "EvSel": 40, "ExtSel": "", }, "CBO.RxR_IPQ_RETRY2.TARGET": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Defn": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.", "Desc": "Probe Queue Retries", "EvSel": 40, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CBO.RxR_IPQ_RETRY2.AD_SBO": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Defn": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.", "Desc": "Probe Queue Retries", "EvSel": 40, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CBO.RxR_IRQ_RETRY": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Desc": "Ingress Request Queue Rejects", "EvSel": 50, "ExtSel": "", }, "CBO.RxR_IRQ_RETRY.FULL": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Desc": "Ingress Request Queue Rejects", "EvSel": 50, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CBO.RxR_IRQ_RETRY.IIO_CREDITS": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Desc": "Ingress Request Queue Rejects", "EvSel": 50, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CBO.RxR_IRQ_RETRY.ADDR_CONFLICT": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Desc": "Ingress Request Queue Rejects", "EvSel": 50, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CBO.RxR_IRQ_RETRY.QPI_CREDITS": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Desc": "Ingress Request Queue Rejects", "EvSel": 50, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CBO.RxR_IRQ_RETRY.RTID": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Desc": "Ingress Request Queue Rejects", "EvSel": 50, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CBO.RxR_IRQ_RETRY.ANY": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Desc": "Ingress Request Queue Rejects", "EvSel": 50, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CBO.RxR_IRQ_RETRY.NID": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Desc": "Ingress Request Queue Rejects", "EvSel": 50, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CBO.RxR_IRQ_RETRY2": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Desc": "Ingress Request Queue Rejects", "EvSel": 41, "ExtSel": "", }, "CBO.RxR_IRQ_RETRY2.AD_SBO": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Desc": "Ingress Request Queue Rejects", "EvSel": 41, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CBO.RxR_IRQ_RETRY2.BL_SBO": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Desc": "Ingress Request Queue Rejects", "EvSel": 41, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CBO.RxR_IRQ_RETRY2.TARGET": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Desc": "Ingress Request Queue Rejects", "EvSel": 41, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CBO.RxR_ISMQ_RETRY": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 51, "ExtSel": "", }, "CBO.RxR_ISMQ_RETRY.FULL": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 51, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CBO.RxR_ISMQ_RETRY.IIO_CREDITS": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 51, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CBO.RxR_ISMQ_RETRY.RTID": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 51, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CBO.RxR_ISMQ_RETRY.QPI_CREDITS": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 51, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CBO.RxR_ISMQ_RETRY.NID": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 51, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CBO.RxR_ISMQ_RETRY.ANY": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 51, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CBO.RxR_ISMQ_RETRY.WB_CREDITS": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 51, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CBO.RxR_ISMQ_RETRY2": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Desc": "ISMQ Request Queue Rejects", "EvSel": 42, "ExtSel": "", }, "CBO.RxR_ISMQ_RETRY2.TARGET": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Desc": "ISMQ Request Queue Rejects", "EvSel": 42, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CBO.RxR_ISMQ_RETRY2.AD_SBO": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Desc": "ISMQ Request Queue Rejects", "EvSel": 42, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CBO.RxR_ISMQ_RETRY2.BL_SBO": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Desc": "ISMQ Request Queue Rejects", "EvSel": 42, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CBO.RxR_OCCUPANCY": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": 0, "Defn": "Counts number of entries in the specified Ingress queue in each cycle.", "Desc": "Ingress Occupancy", "EvSel": 17, "ExtSel": "", "MaxIncCyc": 20, "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "SubCtr": 1, }, "CBO.RxR_OCCUPANCY.IRQ_REJ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": 0, "Defn": "Counts number of entries in the specified Ingress queue in each cycle.", "Desc": "Ingress Occupancy", "EvSel": 17, "ExtSel": "", "MaxIncCyc": 20, "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "SubCtr": 1, "Umask": "b00000010", }, "CBO.RxR_OCCUPANCY.IRQ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": 0, "Defn": "Counts number of entries in the specified Ingress queue in each cycle.", "Desc": "Ingress Occupancy", "EvSel": 17, "ExtSel": "", "MaxIncCyc": 20, "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "SubCtr": 1, "Umask": "b00000001", }, "CBO.RxR_OCCUPANCY.IPQ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": 0, "Defn": "Counts number of entries in the specified Ingress queue in each cycle.", "Desc": "Ingress Occupancy", "EvSel": 17, "ExtSel": "", "MaxIncCyc": 20, "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "SubCtr": 1, "Umask": "b00000100", }, "CBO.RxR_OCCUPANCY.PRQ_REJ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": 0, "Defn": "Counts number of entries in the specified Ingress queue in each cycle.", "Desc": "Ingress Occupancy", "EvSel": 17, "ExtSel": "", "MaxIncCyc": 20, "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "SubCtr": 1, "Umask": "b00100000", }, "CBO.SBO_CREDITS_ACQUIRED": { "Box": "CBO", "Category": "CBO SBO Credit Events", "Counters": "0-3", "Defn": "Number of Sbo credits acquired in a given cycle, per ring. Each Cbo is assigned an Sbo it can communicate with.", "Desc": "SBo Credits Acquired", "EvSel": 61, "ExtSel": "", }, "CBO.SBO_CREDITS_ACQUIRED.AD": { "Box": "CBO", "Category": "CBO SBO Credit Events", "Counters": "0-3", "Defn": "Number of Sbo credits acquired in a given cycle, per ring. Each Cbo is assigned an Sbo it can communicate with.", "Desc": "SBo Credits Acquired", "EvSel": 61, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CBO.SBO_CREDITS_ACQUIRED.BL": { "Box": "CBO", "Category": "CBO SBO Credit Events", "Counters": "0-3", "Defn": "Number of Sbo credits acquired in a given cycle, per ring. Each Cbo is assigned an Sbo it can communicate with.", "Desc": "SBo Credits Acquired", "EvSel": 61, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CBO.SBO_CREDIT_OCCUPANCY": { "Box": "CBO", "Category": "CBO SBO Credit Events", "Counters": 0, "Defn": "Number of Sbo credits in use in a given cycle, per ring. Each Cbo is assigned an Sbo it can communicate with.", "Desc": "SBo Credits Occupancy", "EvSel": 62, "ExtSel": "", "MaxIncCyc": 7, "Notes": "Each Cbo has 3 AD and 2 BL credits into its assigned Sbo.", "SubCtr": 1, }, "CBO.SBO_CREDIT_OCCUPANCY.BL": { "Box": "CBO", "Category": "CBO SBO Credit Events", "Counters": 0, "Defn": "Number of Sbo credits in use in a given cycle, per ring. Each Cbo is assigned an Sbo it can communicate with.", "Desc": "SBo Credits Occupancy", "EvSel": 62, "ExtSel": "", "MaxIncCyc": 7, "Notes": "Each Cbo has 3 AD and 2 BL credits into its assigned Sbo.", "SubCtr": 1, "Umask": "bxxxxxx1x", }, "CBO.SBO_CREDIT_OCCUPANCY.AD": { "Box": "CBO", "Category": "CBO SBO Credit Events", "Counters": 0, "Defn": "Number of Sbo credits in use in a given cycle, per ring. Each Cbo is assigned an Sbo it can communicate with.", "Desc": "SBo Credits Occupancy", "EvSel": 62, "ExtSel": "", "MaxIncCyc": 7, "Notes": "Each Cbo has 3 AD and 2 BL credits into its assigned Sbo.", "SubCtr": 1, "Umask": "bxxxxxxx1", }, "CBO.TOR_INSERTS": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", }, "CBO.TOR_INSERTS.OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b00000001", }, "CBO.TOR_INSERTS.ALL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b00001000", }, "CBO.TOR_INSERTS.WB": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b00010000", }, "CBO.TOR_INSERTS.NID_EVICTION": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b01000100", }, "CBO.TOR_INSERTS.REMOTE_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b10000001", }, "CBO.TOR_INSERTS.NID_MISS_ALL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b01001010", }, "CBO.TOR_INSERTS.EVICTION": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b00000100", }, "CBO.TOR_INSERTS.MISS_REMOTE_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b10000011", }, "CBO.TOR_INSERTS.LOCAL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b00101000", }, "CBO.TOR_INSERTS.LOCAL_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b00100001", }, "CBO.TOR_INSERTS.MISS_LOCAL_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b00100011", }, "CBO.TOR_INSERTS.REMOTE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b10001000", }, "CBO.TOR_INSERTS.MISS_LOCAL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b00101010", }, "CBO.TOR_INSERTS.NID_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b01000001", }, "CBO.TOR_INSERTS.MISS_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b00000011", }, "CBO.TOR_INSERTS.NID_MISS_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b01000011", }, "CBO.TOR_INSERTS.MISS_REMOTE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b10001010", }, "CBO.TOR_INSERTS.NID_ALL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b01001000", }, "CBO.TOR_INSERTS.NID_WB": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b01010000", }, "CBO.TOR_OCCUPANCY": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, }, "CBO.TOR_OCCUPANCY.MISS_LOCAL_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b00100011", }, "CBO.TOR_OCCUPANCY.LOCAL_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b00100001", }, "CBO.TOR_OCCUPANCY.MISS_LOCAL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b00101010", }, "CBO.TOR_OCCUPANCY.REMOTE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b10001000", }, "CBO.TOR_OCCUPANCY.NID_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b01000001", }, "CBO.TOR_OCCUPANCY.NID_MISS_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b01000011", }, "CBO.TOR_OCCUPANCY.MISS_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b00000011", }, "CBO.TOR_OCCUPANCY.NID_WB": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b01010000", }, "CBO.TOR_OCCUPANCY.MISS_REMOTE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b10001010", }, "CBO.TOR_OCCUPANCY.NID_ALL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b01001000", }, "CBO.TOR_OCCUPANCY.OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b00000001", }, "CBO.TOR_OCCUPANCY.NID_EVICTION": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b01000100", }, "CBO.TOR_OCCUPANCY.WB": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b00010000", }, "CBO.TOR_OCCUPANCY.REMOTE_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b10000001", }, "CBO.TOR_OCCUPANCY.MISS_ALL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b00001010", }, "CBO.TOR_OCCUPANCY.ALL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b00001000", }, "CBO.TOR_OCCUPANCY.NID_MISS_ALL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b01001010", }, "CBO.TOR_OCCUPANCY.MISS_REMOTE_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b10000011", }, "CBO.TOR_OCCUPANCY.LOCAL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b00101000", }, "CBO.TOR_OCCUPANCY.EVICTION": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b00000100", }, "CBO.TxR_ADS_USED": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-3", "EvSel": 4, "ExtSel": "", }, "CBO.TxR_ADS_USED.AD": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-3", "EvSel": 4, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CBO.TxR_ADS_USED.AK": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-3", "EvSel": 4, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CBO.TxR_ADS_USED.BL": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-3", "EvSel": 4, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CBO.TxR_INSERTS": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "ExtSel": "", }, "CBO.TxR_INSERTS.AD_CORE": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CBO.TxR_INSERTS.IV_CACHE": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CBO.TxR_INSERTS.BL_CACHE": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CBO.TxR_INSERTS.BL_CORE": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CBO.TxR_INSERTS.AK_CACHE": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CBO.TxR_INSERTS.AK_CORE": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CBO.TxR_INSERTS.AD_CACHE": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "ExtSel": "", "Umask": "bxxxxxxx1", }, # HA: "HA.ADDR_OPC_MATCH": { "Box": "HA", "Category": "HA ADDR_OPCODE_MATCH Events", "Counters": "0-3", "Desc": "QPI Address/Opcode Match", "EvSel": 32, "ExtSel": "", }, "HA.ADDR_OPC_MATCH.AD": { "Box": "HA", "Category": "HA ADDR_OPCODE_MATCH Events", "Counters": "0-3", "Desc": "QPI Address/Opcode Match", "EvSel": 32, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.ADDR_OPC_MATCH.FILT": { "Box": "HA", "Category": "HA ADDR_OPCODE_MATCH Events", "Counters": "0-3", "Desc": "QPI Address/Opcode Match", "EvSel": 32, "ExtSel": "", "Umask": "b00000011", }, "HA.ADDR_OPC_MATCH.AK": { "Box": "HA", "Category": "HA ADDR_OPCODE_MATCH Events", "Counters": "0-3", "Desc": "QPI Address/Opcode Match", "EvSel": 32, "ExtSel": "", "Umask": "bxxx1xxxx", }, "HA.ADDR_OPC_MATCH.BL": { "Box": "HA", "Category": "HA ADDR_OPCODE_MATCH Events", "Counters": "0-3", "Desc": "QPI Address/Opcode Match", "EvSel": 32, "ExtSel": "", "Umask": "bxxxx1xxx", }, "HA.ADDR_OPC_MATCH.ADDR": { "Box": "HA", "Category": "HA ADDR_OPCODE_MATCH Events", "Counters": "0-3", "Desc": "QPI Address/Opcode Match", "EvSel": 32, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.ADDR_OPC_MATCH.OPC": { "Box": "HA", "Category": "HA ADDR_OPCODE_MATCH Events", "Counters": "0-3", "Desc": "QPI Address/Opcode Match", "EvSel": 32, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.BT_CYCLES_NE": { "Box": "HA", "Category": "HA BT (Backup Tracker) Events", "Counters": "0-3", "Defn": "Cycles the Backup Tracker (BT) is not empty. The BT is the actual HOM tracker in IVT.", "Desc": "BT Cycles Not Empty", "EvSel": 66, "ExtSel": "", "Notes": "Will not count case HT is empty and a Bypass happens.", }, "HA.BT_OCCUPANCY": { "Box": "HA", "Category": "HA BT (Backup Tracker) Events", "Counters": "0-3", "Defn": "Accumulates the occupancy of the HA BT pool in every cycle. This can be used with the \"not empty\" stat to calculate average queue occupancy or the \"allocations\" stat in order to calculate average queue latency. HA BTs are allocated as soon as a request enters the HA and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "BT Occupancy", "EvSel": 67, "ExtSel": "", "MaxIncCyc": 512, }, "HA.BYPASS_IMC": { "Box": "HA", "Category": "HA BYPASS Events", "Counters": "0-3", "Defn": "Counts the number of times when the HA was able to bypass was attempted. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filted by when the bypass was taken and when it was not.", "Desc": "HA to iMC Bypass", "EvSel": 20, "ExtSel": "", "Notes": "Only read transactions use iMC bypass", }, "HA.BYPASS_IMC.TAKEN": { "Box": "HA", "Category": "HA BYPASS Events", "Counters": "0-3", "Defn": "Counts the number of times when the HA was able to bypass was attempted. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filted by when the bypass was taken and when it was not.", "Desc": "HA to iMC Bypass", "EvSel": 20, "ExtSel": "", "Notes": "Only read transactions use iMC bypass", "Umask": "bxxxxxxx1", }, "HA.BYPASS_IMC.NOT_TAKEN": { "Box": "HA", "Category": "HA BYPASS Events", "Counters": "0-3", "Defn": "Counts the number of times when the HA was able to bypass was attempted. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filted by when the bypass was taken and when it was not.", "Desc": "HA to iMC Bypass", "EvSel": 20, "ExtSel": "", "Notes": "Only read transactions use iMC bypass", "Umask": "bxxxxxx1x", }, "HA.CLOCKTICKS": { "Box": "HA", "Category": "HA UCLK Events", "Counters": "0-3", "Defn": "Counts the number of uclks in the HA. This will be slightly different than the count in the Ubox because of enable/freeze delays. The HA is on the other side of the die from the fixed Ubox uclk counter, so the drift could be somewhat larger than in units that are closer like the QPI Agent.", "Desc": "uclks", "EvSel": 0, "ExtSel": "", }, "HA.CONFLICT_CYCLES": { "Box": "HA", "Category": "HA CONFLICTS Events", "Counters": 1, "Defn": "Counters the number of cycles there was a conflict in the HA because threads in two different sockets were requesting the same address at the same time", "Desc": "Conflict Checks", "EvSel": 11, "Filter": "N", "ExtSel": "", }, "HA.DIRECT2CORE_COUNT": { "Box": "HA", "Category": "HA DIRECT2CORE Events", "Counters": "0-3", "Defn": "Number of Direct2Core messages sent", "Desc": "Direct2Core Messages Sent", "EvSel": 17, "ExtSel": "", "Notes": "Will not be implemented since OUTBOUND_TX_BL:0x1 will count DRS to CORE which is effectively the same thing as D2C count", }, "HA.DIRECT2CORE_CYCLES_DISABLED": { "Box": "HA", "Category": "HA DIRECT2CORE Events", "Counters": "0-3", "Defn": "Number of cycles in which Direct2Core was disabled", "Desc": "Cycles when Direct2Core was Disabled", "EvSel": 18, "ExtSel": "", }, "HA.DIRECT2CORE_TXN_OVERRIDE": { "Box": "HA", "Category": "HA DIRECT2CORE Events", "Counters": "0-3", "Defn": "Number of Reads where Direct2Core overridden", "Desc": "Number of Reads that had Direct2Core Overridden", "EvSel": 19, "ExtSel": "", }, "HA.DIRECTORY_LAT_OPT": { "Box": "HA", "Category": "HA DIRECTORY Events", "Counters": "0-3", "Defn": "Directory Latency Optimization Data Return Path Taken. When directory mode is enabled and the directory retuned for a read is Dir=I, then data can be returned using a faster path if certain conditions are met (credits, free pipeline, etc).", "Desc": "Directory Lat Opt Return", "EvSel": 65, "ExtSel": "", }, "HA.DIRECTORY_LOOKUP": { "Box": "HA", "Category": "HA DIRECTORY Events", "Counters": "0-3", "Defn": "Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to.", "Desc": "Directory Lookups", "EvSel": 12, "ExtSel": "", "Notes": "Only valid for parts that implement the Directory", }, "HA.DIRECTORY_LOOKUP.NO_SNP": { "Box": "HA", "Category": "HA DIRECTORY Events", "Counters": "0-3", "Defn": "Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to.", "Desc": "Directory Lookups", "EvSel": 12, "ExtSel": "", "Notes": "Only valid for parts that implement the Directory", "Umask": "bxxxxxx1x", }, "HA.DIRECTORY_LOOKUP.SNP": { "Box": "HA", "Category": "HA DIRECTORY Events", "Counters": "0-3", "Defn": "Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to.", "Desc": "Directory Lookups", "EvSel": 12, "ExtSel": "", "Notes": "Only valid for parts that implement the Directory", "Umask": "bxxxxxxx1", }, "HA.DIRECTORY_UPDATE": { "Box": "HA", "Category": "HA DIRECTORY Events", "Counters": "0-3", "Defn": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.", "Desc": "Directory Updates", "EvSel": 13, "ExtSel": "", "Notes": "Only valid for parts that implement the Directory", }, "HA.DIRECTORY_UPDATE.SET": { "Box": "HA", "Category": "HA DIRECTORY Events", "Counters": "0-3", "Defn": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.", "Desc": "Directory Updates", "EvSel": 13, "ExtSel": "", "Notes": "Only valid for parts that implement the Directory", "Umask": "bxxxxxxx1", }, "HA.DIRECTORY_UPDATE.CLEAR": { "Box": "HA", "Category": "HA DIRECTORY Events", "Counters": "0-3", "Defn": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.", "Desc": "Directory Updates", "EvSel": 13, "ExtSel": "", "Notes": "Only valid for parts that implement the Directory", "Umask": "bxxxxxx1x", }, "HA.DIRECTORY_UPDATE.ANY": { "Box": "HA", "Category": "HA DIRECTORY Events", "Counters": "0-3", "Defn": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.", "Desc": "Directory Updates", "EvSel": 13, "ExtSel": "", "Notes": "Only valid for parts that implement the Directory", "Umask": "bxxxxxx11", }, "HA.HITME_HIT": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 113, "ExtSel": "", }, "HA.HITME_HIT.RSPFWDI_LOCAL": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 113, "ExtSel": "", "Umask": "bxx1xxxxx", }, "HA.HITME_HIT.ALL": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 113, "ExtSel": "", "Umask": "b11111111", }, "HA.HITME_HIT.READ_OR_INVITOE": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 113, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.HITME_HIT.ACKCNFLTWBI": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 113, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.HITME_HIT.WBMTOI": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 113, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.HITME_HIT.HOM": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 113, "ExtSel": "", "Umask": "b00001111", }, "HA.HITME_HIT.INVALS": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 113, "ExtSel": "", "Umask": "b00100110", }, "HA.HITME_HIT.RSPFWDI_REMOTE": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 113, "ExtSel": "", "Umask": "bxxx1xxxx", }, "HA.HITME_HIT.RSP": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 113, "ExtSel": "", "Umask": "b1xxxxxxx", }, "HA.HITME_HIT.ALLOCS": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 113, "ExtSel": "", "Umask": "b01110000", }, "HA.HITME_HIT.EVICTS": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 113, "ExtSel": "", "Umask": "b01000010", }, "HA.HITME_HIT.RSPFWDS": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 113, "ExtSel": "", "Umask": "bx1xxxxxx", }, "HA.HITME_HIT.WBMTOE_OR_S": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 113, "ExtSel": "", "Umask": "bxxxx1xxx", }, "HA.HITME_HIT_PV_BITS_SET": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Accumulates Number of PV bits set on HitMe Cache Hits", "EvSel": 114, "ExtSel": "", }, "HA.HITME_HIT_PV_BITS_SET.WBMTOI": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Accumulates Number of PV bits set on HitMe Cache Hits", "EvSel": 114, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.HITME_HIT_PV_BITS_SET.HOM": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Accumulates Number of PV bits set on HitMe Cache Hits", "EvSel": 114, "ExtSel": "", "Umask": "b00001111", }, "HA.HITME_HIT_PV_BITS_SET.RSPFWDI_LOCAL": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Accumulates Number of PV bits set on HitMe Cache Hits", "EvSel": 114, "ExtSel": "", "Umask": "bxx1xxxxx", }, "HA.HITME_HIT_PV_BITS_SET.ALL": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Accumulates Number of PV bits set on HitMe Cache Hits", "EvSel": 114, "ExtSel": "", "Umask": "b11111111", }, "HA.HITME_HIT_PV_BITS_SET.READ_OR_INVITOE": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Accumulates Number of PV bits set on HitMe Cache Hits", "EvSel": 114, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.HITME_HIT_PV_BITS_SET.ACKCNFLTWBI": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Accumulates Number of PV bits set on HitMe Cache Hits", "EvSel": 114, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.HITME_HIT_PV_BITS_SET.RSP": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Accumulates Number of PV bits set on HitMe Cache Hits", "EvSel": 114, "ExtSel": "", "Umask": "b1xxxxxxx", }, "HA.HITME_HIT_PV_BITS_SET.WBMTOE_OR_S": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Accumulates Number of PV bits set on HitMe Cache Hits", "EvSel": 114, "ExtSel": "", "Umask": "bxxxx1xxx", }, "HA.HITME_HIT_PV_BITS_SET.RSPFWDS": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Accumulates Number of PV bits set on HitMe Cache Hits", "EvSel": 114, "ExtSel": "", "Umask": "bx1xxxxxx", }, "HA.HITME_HIT_PV_BITS_SET.RSPFWDI_REMOTE": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Accumulates Number of PV bits set on HitMe Cache Hits", "EvSel": 114, "ExtSel": "", "Umask": "bxxx1xxxx", }, "HA.HITME_LOOKUP": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of times HitMe Cache is accessed", "EvSel": 112, "ExtSel": "", }, "HA.HITME_LOOKUP.RSPFWDI_LOCAL": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of times HitMe Cache is accessed", "EvSel": 112, "ExtSel": "", "Umask": "bxx1xxxxx", }, "HA.HITME_LOOKUP.ALL": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of times HitMe Cache is accessed", "EvSel": 112, "ExtSel": "", "Umask": "b11111111", }, "HA.HITME_LOOKUP.READ_OR_INVITOE": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of times HitMe Cache is accessed", "EvSel": 112, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.HITME_LOOKUP.ACKCNFLTWBI": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of times HitMe Cache is accessed", "EvSel": 112, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.HITME_LOOKUP.WBMTOI": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of times HitMe Cache is accessed", "EvSel": 112, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.HITME_LOOKUP.HOM": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of times HitMe Cache is accessed", "EvSel": 112, "ExtSel": "", "Umask": "b00001111", }, "HA.HITME_LOOKUP.INVALS": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of times HitMe Cache is accessed", "EvSel": 112, "ExtSel": "", "Umask": "b00100110", }, "HA.HITME_LOOKUP.RSPFWDI_REMOTE": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of times HitMe Cache is accessed", "EvSel": 112, "ExtSel": "", "Umask": "bxxx1xxxx", }, "HA.HITME_LOOKUP.RSP": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of times HitMe Cache is accessed", "EvSel": 112, "ExtSel": "", "Umask": "b1xxxxxxx", }, "HA.HITME_LOOKUP.RSPFWDS": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of times HitMe Cache is accessed", "EvSel": 112, "ExtSel": "", "Umask": "bx1xxxxxx", }, "HA.HITME_LOOKUP.ALLOCS": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of times HitMe Cache is accessed", "EvSel": 112, "ExtSel": "", "Umask": "b01110000", }, "HA.HITME_LOOKUP.WBMTOE_OR_S": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of times HitMe Cache is accessed", "EvSel": 112, "ExtSel": "", "Umask": "bxxxx1xxx", }, "HA.IGR_NO_CREDIT_CYCLES": { "Box": "HA", "Category": "HA QPI_IGR_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.", "Desc": "Cycles without QPI Ingress Credits", "EvSel": 34, "ExtSel": "", }, "HA.IGR_NO_CREDIT_CYCLES.AD_QPI0": { "Box": "HA", "Category": "HA QPI_IGR_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.", "Desc": "Cycles without QPI Ingress Credits", "EvSel": 34, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.IGR_NO_CREDIT_CYCLES.BL_QPI0": { "Box": "HA", "Category": "HA QPI_IGR_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.", "Desc": "Cycles without QPI Ingress Credits", "EvSel": 34, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.IGR_NO_CREDIT_CYCLES.BL_QPI2": { "Box": "HA", "Category": "HA QPI_IGR_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.", "Desc": "Cycles without QPI Ingress Credits", "EvSel": 34, "ExtSel": "", "Umask": "bxx1xxxxx", }, "HA.IGR_NO_CREDIT_CYCLES.BL_QPI1": { "Box": "HA", "Category": "HA QPI_IGR_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.", "Desc": "Cycles without QPI Ingress Credits", "EvSel": 34, "ExtSel": "", "Umask": "bxxxx1xxx", }, "HA.IGR_NO_CREDIT_CYCLES.AD_QPI2": { "Box": "HA", "Category": "HA QPI_IGR_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.", "Desc": "Cycles without QPI Ingress Credits", "EvSel": 34, "ExtSel": "", "Umask": "bxxx1xxxx", }, "HA.IGR_NO_CREDIT_CYCLES.AD_QPI1": { "Box": "HA", "Category": "HA QPI_IGR_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.", "Desc": "Cycles without QPI Ingress Credits", "EvSel": 34, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.IMC_READS": { "Box": "HA", "Category": "HA IMC_READS Events", "Counters": "0-3", "Defn": "Count of the number of reads issued to any of the memory controller channels. This can be filtered by the priority of the reads.", "Desc": "HA to iMC Normal Priority Reads Issued", "EvSel": 23, "ExtSel": "", "MaxIncCyc": 4, "Notes": "Does not count reads using the bypass path. That is counted separately in HA_IMC.BYPASS", }, "HA.IMC_READS.NORMAL": { "Box": "HA", "Category": "HA IMC_READS Events", "Counters": "0-3", "Defn": "Count of the number of reads issued to any of the memory controller channels. This can be filtered by the priority of the reads.", "Desc": "HA to iMC Normal Priority Reads Issued", "EvSel": 23, "ExtSel": "", "MaxIncCyc": 4, "Notes": "Does not count reads using the bypass path. That is counted separately in HA_IMC.BYPASS", "Umask": "b00000001", }, "HA.IMC_RETRY": { "Box": "HA", "Category": "HA IMC_MISC Events", "Counters": "0-3", "Desc": "Retry Events", "EvSel": 30, "ExtSel": "", }, "HA.IMC_WRITES": { "Box": "HA", "Category": "HA IMC_WRITES Events", "Counters": "0-3", "Defn": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.", "Desc": "HA to iMC Full Line Writes Issued", "EvSel": 26, "ExtSel": "", }, "HA.IMC_WRITES.FULL_ISOCH": { "Box": "HA", "Category": "HA IMC_WRITES Events", "Counters": "0-3", "Defn": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.", "Desc": "HA to iMC Full Line Writes Issued", "EvSel": 26, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.IMC_WRITES.PARTIAL_ISOCH": { "Box": "HA", "Category": "HA IMC_WRITES Events", "Counters": "0-3", "Defn": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.", "Desc": "HA to iMC Full Line Writes Issued", "EvSel": 26, "ExtSel": "", "Umask": "bxxxx1xxx", }, "HA.IMC_WRITES.ALL": { "Box": "HA", "Category": "HA IMC_WRITES Events", "Counters": "0-3", "Defn": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.", "Desc": "HA to iMC Full Line Writes Issued", "EvSel": 26, "ExtSel": "", "Umask": "b00001111", }, "HA.IMC_WRITES.PARTIAL": { "Box": "HA", "Category": "HA IMC_WRITES Events", "Counters": "0-3", "Defn": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.", "Desc": "HA to iMC Full Line Writes Issued", "EvSel": 26, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.IMC_WRITES.FULL": { "Box": "HA", "Category": "HA IMC_WRITES Events", "Counters": "0-3", "Defn": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.", "Desc": "HA to iMC Full Line Writes Issued", "EvSel": 26, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.OSB": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.", "Desc": "OSB Snoop Broadcast", "EvSel": 83, "ExtSel": "", }, "HA.OSB.REMOTE_USEFUL": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.", "Desc": "OSB Snoop Broadcast", "EvSel": 83, "ExtSel": "", "Umask": "bx1xxxxxx", }, "HA.OSB.READS_LOCAL_USEFUL": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.", "Desc": "OSB Snoop Broadcast", "EvSel": 83, "ExtSel": "", "Umask": "bxx1xxxxx", }, "HA.OSB.CANCELLED": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.", "Desc": "OSB Snoop Broadcast", "EvSel": 83, "ExtSel": "", "Umask": "bxxx1xxxx", }, "HA.OSB.INVITOE_LOCAL": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.", "Desc": "OSB Snoop Broadcast", "EvSel": 83, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.OSB.READS_LOCAL": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.", "Desc": "OSB Snoop Broadcast", "EvSel": 83, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.OSB.REMOTE": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.", "Desc": "OSB Snoop Broadcast", "EvSel": 83, "ExtSel": "", "Umask": "bxxxx1xxx", }, "HA.OSB_EDR": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return", "Desc": "OSB Early Data Return", "EvSel": 84, "ExtSel": "", }, "HA.OSB_EDR.READS_LOCAL_S": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return", "Desc": "OSB Early Data Return", "EvSel": 84, "ExtSel": "", "Umask": "bxxxx1xxx", }, "HA.OSB_EDR.READS_REMOTE_S": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return", "Desc": "OSB Early Data Return", "EvSel": 84, "ExtSel": "", "Umask": "bxxx1xxxx", }, "HA.OSB_EDR.READS_LOCAL_I": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return", "Desc": "OSB Early Data Return", "EvSel": 84, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.OSB_EDR.READS_REMOTE_I": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return", "Desc": "OSB Early Data Return", "EvSel": 84, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.OSB_EDR.ALL": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return", "Desc": "OSB Early Data Return", "EvSel": 84, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.REQUESTS": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "Read and Write Requests", "EvSel": 1, "ExtSel": "", }, "HA.REQUESTS.READS_REMOTE": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "Read and Write Requests", "EvSel": 1, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.REQUESTS.WRITES_LOCAL": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "Read and Write Requests", "EvSel": 1, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.REQUESTS.READS": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "Read and Write Requests", "EvSel": 1, "ExtSel": "", "Umask": "b00000011", }, "HA.REQUESTS.INVITOE_REMOTE": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "Read and Write Requests", "EvSel": 1, "ExtSel": "", "Umask": "bxx1xxxxx", }, "HA.REQUESTS.WRITES_REMOTE": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "Read and Write Requests", "EvSel": 1, "ExtSel": "", "Umask": "bxxxx1xxx", }, "HA.REQUESTS.WRITES": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "Read and Write Requests", "EvSel": 1, "ExtSel": "", "Umask": "b00001100", }, "HA.REQUESTS.INVITOE_LOCAL": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "Read and Write Requests", "EvSel": 1, "ExtSel": "", "Umask": "bxxx1xxxx", }, "HA.REQUESTS.READS_LOCAL": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "Read and Write Requests", "EvSel": 1, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.RING_AD_USED": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AD Ring in Use", "EvSel": 62, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", }, "HA.RING_AD_USED.CCW": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AD Ring in Use", "EvSel": 62, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00001100", }, "HA.RING_AD_USED.CW_EVEN": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AD Ring in Use", "EvSel": 62, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxxx1", }, "HA.RING_AD_USED.CW": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AD Ring in Use", "EvSel": 62, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00000011", }, "HA.RING_AD_USED.CCW_EVEN": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AD Ring in Use", "EvSel": 62, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxx1xx", }, "HA.RING_AD_USED.CW_ODD": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AD Ring in Use", "EvSel": 62, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxx1x", }, "HA.RING_AD_USED.CCW_ODD": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AD Ring in Use", "EvSel": 62, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxx1xxx", }, "HA.RING_AK_USED": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AK Ring in Use", "EvSel": 63, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", }, "HA.RING_AK_USED.CW_ODD": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AK Ring in Use", "EvSel": 63, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxx1x", }, "HA.RING_AK_USED.CCW_EVEN": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AK Ring in Use", "EvSel": 63, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxx1xx", }, "HA.RING_AK_USED.CCW_ODD": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AK Ring in Use", "EvSel": 63, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxx1xxx", }, "HA.RING_AK_USED.ALL": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AK Ring in Use", "EvSel": 63, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00001111", }, "HA.RING_AK_USED.CW_EVEN": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AK Ring in Use", "EvSel": 63, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxxx1", }, "HA.RING_AK_USED.CCW": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AK Ring in Use", "EvSel": 63, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00001100", }, "HA.RING_AK_USED.CW": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AK Ring in Use", "EvSel": 63, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00000011", }, "HA.RING_BL_USED": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA BL Ring in Use", "EvSel": 64, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", }, "HA.RING_BL_USED.CW_EVEN": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA BL Ring in Use", "EvSel": 64, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxxx1", }, "HA.RING_BL_USED.CCW": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA BL Ring in Use", "EvSel": 64, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00001100", }, "HA.RING_BL_USED.CW": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA BL Ring in Use", "EvSel": 64, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00000011", }, "HA.RING_BL_USED.CW_ODD": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA BL Ring in Use", "EvSel": 64, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxx1x", }, "HA.RING_BL_USED.CCW_EVEN": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA BL Ring in Use", "EvSel": 64, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxx1xx", }, "HA.RING_BL_USED.CCW_ODD": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA BL Ring in Use", "EvSel": 64, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxx1xxx", }, "HA.RING_BL_USED.ALL": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA BL Ring in Use", "EvSel": 64, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00001111", }, "HA.RPQ_CYCLES_NO_REG_CREDITS": { "Box": "HA", "Category": "HA RPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and \"special\" requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "iMC RPQ Credits Empty - Regular", "EvSel": 21, "ExtSel": "", "MaxIncCyc": 4, }, "HA.RPQ_CYCLES_NO_REG_CREDITS.CHN3": { "Box": "HA", "Category": "HA RPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and \"special\" requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "iMC RPQ Credits Empty - Regular", "EvSel": 21, "ExtSel": "", "MaxIncCyc": 4, "Umask": "b00001000", }, "HA.RPQ_CYCLES_NO_REG_CREDITS.CHN0": { "Box": "HA", "Category": "HA RPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and \"special\" requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "iMC RPQ Credits Empty - Regular", "EvSel": 21, "ExtSel": "", "MaxIncCyc": 4, "Umask": "b00000001", }, "HA.RPQ_CYCLES_NO_REG_CREDITS.CHN2": { "Box": "HA", "Category": "HA RPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and \"special\" requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "iMC RPQ Credits Empty - Regular", "EvSel": 21, "ExtSel": "", "MaxIncCyc": 4, "Umask": "b00000100", }, "HA.RPQ_CYCLES_NO_REG_CREDITS.CHN1": { "Box": "HA", "Category": "HA RPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and \"special\" requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "iMC RPQ Credits Empty - Regular", "EvSel": 21, "ExtSel": "", "MaxIncCyc": 4, "Umask": "b00000010", }, "HA.SBO0_CREDITS_ACQUIRED": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of Sbo 0 credits acquired in a given cycle, per ring.", "Desc": "SBo0 Credits Acquired", "EvSel": 104, "ExtSel": "", }, "HA.SBO0_CREDITS_ACQUIRED.BL": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of Sbo 0 credits acquired in a given cycle, per ring.", "Desc": "SBo0 Credits Acquired", "EvSel": 104, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.SBO0_CREDITS_ACQUIRED.AD": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of Sbo 0 credits acquired in a given cycle, per ring.", "Desc": "SBo0 Credits Acquired", "EvSel": 104, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.SBO0_CREDIT_OCCUPANCY": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of Sbo 0 credits in use in a given cycle, per ring.", "Desc": "SBo0 Credits Occupancy", "EvSel": 106, "ExtSel": "", }, "HA.SBO0_CREDIT_OCCUPANCY.BL": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of Sbo 0 credits in use in a given cycle, per ring.", "Desc": "SBo0 Credits Occupancy", "EvSel": 106, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.SBO0_CREDIT_OCCUPANCY.AD": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of Sbo 0 credits in use in a given cycle, per ring.", "Desc": "SBo0 Credits Occupancy", "EvSel": 106, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.SBO1_CREDITS_ACQUIRED": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of Sbo 1 credits acquired in a given cycle, per ring.", "Desc": "SBo1 Credits Acquired", "EvSel": 105, "ExtSel": "", }, "HA.SBO1_CREDITS_ACQUIRED.BL": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of Sbo 1 credits acquired in a given cycle, per ring.", "Desc": "SBo1 Credits Acquired", "EvSel": 105, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.SBO1_CREDITS_ACQUIRED.AD": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of Sbo 1 credits acquired in a given cycle, per ring.", "Desc": "SBo1 Credits Acquired", "EvSel": 105, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.SBO1_CREDIT_OCCUPANCY": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of Sbo 1 credits in use in a given cycle, per ring.", "Desc": "SBo1 Credits Occupancy", "EvSel": 107, "ExtSel": "", }, "HA.SBO1_CREDIT_OCCUPANCY.BL": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of Sbo 1 credits in use in a given cycle, per ring.", "Desc": "SBo1 Credits Occupancy", "EvSel": 107, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.SBO1_CREDIT_OCCUPANCY.AD": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of Sbo 1 credits in use in a given cycle, per ring.", "Desc": "SBo1 Credits Occupancy", "EvSel": 107, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.SNOOPS_RSP_AFTER_DATA": { "Box": "HA", "Category": "HA SNOOPS Events", "Counters": "0-3", "Defn": "Counts the number of reads when the snoop was on the critical path to the data return.", "Desc": "Data beat the Snoop Responses", "EvSel": 10, "ExtSel": "", "MaxIncCyc": 127, }, "HA.SNOOPS_RSP_AFTER_DATA.LOCAL": { "Box": "HA", "Category": "HA SNOOPS Events", "Counters": "0-3", "Defn": "Counts the number of reads when the snoop was on the critical path to the data return.", "Desc": "Data beat the Snoop Responses", "EvSel": 10, "ExtSel": "", "MaxIncCyc": 127, "Umask": "b00000001", }, "HA.SNOOPS_RSP_AFTER_DATA.REMOTE": { "Box": "HA", "Category": "HA SNOOPS Events", "Counters": "0-3", "Defn": "Counts the number of reads when the snoop was on the critical path to the data return.", "Desc": "Data beat the Snoop Responses", "EvSel": 10, "ExtSel": "", "MaxIncCyc": 127, "Umask": "b00000010", }, "HA.SNOOP_CYCLES_NE": { "Box": "HA", "Category": "HA SNOOPS Events", "Counters": "0-3", "Defn": "Counts cycles when one or more snoops are outstanding.", "Desc": "Cycles with Snoops Outstanding", "EvSel": 8, "ExtSel": "", }, "HA.SNOOP_CYCLES_NE.LOCAL": { "Box": "HA", "Category": "HA SNOOPS Events", "Counters": "0-3", "Defn": "Counts cycles when one or more snoops are outstanding.", "Desc": "Cycles with Snoops Outstanding", "EvSel": 8, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.SNOOP_CYCLES_NE.REMOTE": { "Box": "HA", "Category": "HA SNOOPS Events", "Counters": "0-3", "Defn": "Counts cycles when one or more snoops are outstanding.", "Desc": "Cycles with Snoops Outstanding", "EvSel": 8, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.SNOOP_CYCLES_NE.ALL": { "Box": "HA", "Category": "HA SNOOPS Events", "Counters": "0-3", "Defn": "Counts cycles when one or more snoops are outstanding.", "Desc": "Cycles with Snoops Outstanding", "EvSel": 8, "ExtSel": "", "Umask": "b00000011", }, "HA.SNOOP_OCCUPANCY": { "Box": "HA", "Category": "HA SNOOPS Events", "Counters": "0-3", "Defn": "Accumulates the occupancy of either the local HA tracker pool that have snoops pending in every cycle. This can be used in conjection with the \"not empty\" stat to calculate average queue occupancy or the \"allocations\" stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if an HT (HomeTracker) entry is available and this occupancy is decremented when all the snoop responses have returned.", "Desc": "Tracker Snoops Outstanding Accumulator", "EvSel": 9, "ExtSel": "", "MaxIncCyc": 127, "SubCtr": 1, }, "HA.SNOOP_OCCUPANCY.REMOTE": { "Box": "HA", "Category": "HA SNOOPS Events", "Counters": "0-3", "Defn": "Accumulates the occupancy of either the local HA tracker pool that have snoops pending in every cycle. This can be used in conjection with the \"not empty\" stat to calculate average queue occupancy or the \"allocations\" stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if an HT (HomeTracker) entry is available and this occupancy is decremented when all the snoop responses have returned.", "Desc": "Tracker Snoops Outstanding Accumulator", "EvSel": 9, "ExtSel": "", "MaxIncCyc": 127, "SubCtr": 1, "Umask": "b00000010", }, "HA.SNOOP_OCCUPANCY.LOCAL": { "Box": "HA", "Category": "HA SNOOPS Events", "Counters": "0-3", "Defn": "Accumulates the occupancy of either the local HA tracker pool that have snoops pending in every cycle. This can be used in conjection with the \"not empty\" stat to calculate average queue occupancy or the \"allocations\" stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if an HT (HomeTracker) entry is available and this occupancy is decremented when all the snoop responses have returned.", "Desc": "Tracker Snoops Outstanding Accumulator", "EvSel": 9, "ExtSel": "", "MaxIncCyc": 127, "SubCtr": 1, "Umask": "b00000001", }, "HA.SNOOP_RESP": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 33, "ExtSel": "", }, "HA.SNOOP_RESP.RSPS": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 33, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.SNOOP_RESP.RSPIFWD": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 33, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.SNOOP_RESP.RSPCNFLCT": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 33, "ExtSel": "", "Umask": "bx1xxxxxx", }, "HA.SNOOP_RESP.RSPSFWD": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 33, "ExtSel": "", "Umask": "bxxxx1xxx", }, "HA.SNOOP_RESP.RSP_WB": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 33, "ExtSel": "", "Umask": "bxxx1xxxx", }, "HA.SNOOP_RESP.RSP_FWD_WB": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 33, "ExtSel": "", "Umask": "bxx1xxxxx", }, "HA.SNOOP_RESP.RSPI": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 33, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.SNP_RESP_RECV_LOCAL": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 96, "ExtSel": "", }, "HA.SNP_RESP_RECV_LOCAL.RSPxWB": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 96, "ExtSel": "", "Umask": "bxxx1xxxx", }, "HA.SNP_RESP_RECV_LOCAL.RSPS": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 96, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.SNP_RESP_RECV_LOCAL.OTHER": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 96, "ExtSel": "", "Umask": "b1xxxxxxx", }, "HA.SNP_RESP_RECV_LOCAL.RSPI": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 96, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.SNP_RESP_RECV_LOCAL.RSPSFWD": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 96, "ExtSel": "", "Umask": "bxxxx1xxx", }, "HA.SNP_RESP_RECV_LOCAL.RSPCNFLCT": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 96, "ExtSel": "", "Umask": "bx1xxxxxx", }, "HA.SNP_RESP_RECV_LOCAL.RSPxFWDxWB": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 96, "ExtSel": "", "Umask": "bxx1xxxxx", }, "HA.SNP_RESP_RECV_LOCAL.RSPIFWD": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 96, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.STALL_NO_SBO_CREDIT": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.", "Desc": "Stall on No Sbo Credits", "EvSel": 108, "ExtSel": "", }, "HA.STALL_NO_SBO_CREDIT.SBO1_BL": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.", "Desc": "Stall on No Sbo Credits", "EvSel": 108, "ExtSel": "", "Umask": "bxxxx1xxx", }, "HA.STALL_NO_SBO_CREDIT.SBO0_AD": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.", "Desc": "Stall on No Sbo Credits", "EvSel": 108, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.STALL_NO_SBO_CREDIT.SBO1_AD": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.", "Desc": "Stall on No Sbo Credits", "EvSel": 108, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.STALL_NO_SBO_CREDIT.SBO0_BL": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.", "Desc": "Stall on No Sbo Credits", "EvSel": 108, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.TAD_REQUESTS_G0": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 0", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, }, "HA.TAD_REQUESTS_G0.REGION6": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 0", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b01000000", }, "HA.TAD_REQUESTS_G0.REGION1": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 0", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000010", }, "HA.TAD_REQUESTS_G0.REGION4": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 0", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00010000", }, "HA.TAD_REQUESTS_G0.REGION7": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 0", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b10000000", }, "HA.TAD_REQUESTS_G0.REGION0": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 0", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000001", }, "HA.TAD_REQUESTS_G0.REGION3": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 0", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00001000", }, "HA.TAD_REQUESTS_G0.REGION2": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 0", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000100", }, "HA.TAD_REQUESTS_G0.REGION5": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 0", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00100000", }, "HA.TAD_REQUESTS_G1": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 1", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, }, "HA.TAD_REQUESTS_G1.REGION9": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 1", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000010", }, "HA.TAD_REQUESTS_G1.REGION11": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 1", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00001000", }, "HA.TAD_REQUESTS_G1.REGION10": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 1", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000100", }, "HA.TAD_REQUESTS_G1.REGION8": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 1", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000001", }, "HA.TRACKER_CYCLES_FULL": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the local HA tracker pool is completely used. This can be used with edge detect to identify the number of situations when the pool became fully utilized. This should not be confused with RTID credit usage -- which must be tracked inside each cbo individually -- but represents the actual tracker buffer structure. In other words, the system could be starved for RTIDs but not fill up the HA trackers. HA trackers are allocated as soon as a request enters the HA and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "Tracker Cycles Full", "EvSel": 2, "ExtSel": "", }, "HA.TRACKER_CYCLES_FULL.ALL": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the local HA tracker pool is completely used. This can be used with edge detect to identify the number of situations when the pool became fully utilized. This should not be confused with RTID credit usage -- which must be tracked inside each cbo individually -- but represents the actual tracker buffer structure. In other words, the system could be starved for RTIDs but not fill up the HA trackers. HA trackers are allocated as soon as a request enters the HA and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "Tracker Cycles Full", "EvSel": 2, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.TRACKER_CYCLES_FULL.GP": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the local HA tracker pool is completely used. This can be used with edge detect to identify the number of situations when the pool became fully utilized. This should not be confused with RTID credit usage -- which must be tracked inside each cbo individually -- but represents the actual tracker buffer structure. In other words, the system could be starved for RTIDs but not fill up the HA trackers. HA trackers are allocated as soon as a request enters the HA and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "Tracker Cycles Full", "EvSel": 2, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.TRACKER_CYCLES_NE": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the local HA tracker pool is not empty. This can be used with edge detect to identify the number of situations when the pool became empty. This should not be confused with RTID credit usage -- which must be tracked inside each cbo individually -- but represents the actual tracker buffer structure. In other words, this buffer could be completely empty, but there may still be credits in use by the CBos. This stat can be used in conjunction with the occupancy accumulation stat in order to calculate average queue occpancy. HA trackers are allocated as soon as a request enters the HA if an HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "Tracker Cycles Not Empty", "EvSel": 3, "ExtSel": "", }, "HA.TRACKER_CYCLES_NE.REMOTE": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the local HA tracker pool is not empty. This can be used with edge detect to identify the number of situations when the pool became empty. This should not be confused with RTID credit usage -- which must be tracked inside each cbo individually -- but represents the actual tracker buffer structure. In other words, this buffer could be completely empty, but there may still be credits in use by the CBos. This stat can be used in conjunction with the occupancy accumulation stat in order to calculate average queue occpancy. HA trackers are allocated as soon as a request enters the HA if an HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "Tracker Cycles Not Empty", "EvSel": 3, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.TRACKER_CYCLES_NE.LOCAL": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the local HA tracker pool is not empty. This can be used with edge detect to identify the number of situations when the pool became empty. This should not be confused with RTID credit usage -- which must be tracked inside each cbo individually -- but represents the actual tracker buffer structure. In other words, this buffer could be completely empty, but there may still be credits in use by the CBos. This stat can be used in conjunction with the occupancy accumulation stat in order to calculate average queue occpancy. HA trackers are allocated as soon as a request enters the HA if an HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "Tracker Cycles Not Empty", "EvSel": 3, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.TRACKER_CYCLES_NE.ALL": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the local HA tracker pool is not empty. This can be used with edge detect to identify the number of situations when the pool became empty. This should not be confused with RTID credit usage -- which must be tracked inside each cbo individually -- but represents the actual tracker buffer structure. In other words, this buffer could be completely empty, but there may still be credits in use by the CBos. This stat can be used in conjunction with the occupancy accumulation stat in order to calculate average queue occpancy. HA trackers are allocated as soon as a request enters the HA if an HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "Tracker Cycles Not Empty", "EvSel": 3, "ExtSel": "", "Umask": "b00000011", }, "HA.TRACKER_OCCUPANCY": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Accumulates the occupancy of the local HA tracker pool in every cycle. This can be used in conjection with the \"not empty\" stat to calculate average queue occupancy or the \"allocations\" stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if a HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "Tracker Occupancy Accumultor", "EvSel": 4, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, }, "HA.TRACKER_OCCUPANCY.READS_LOCAL": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Accumulates the occupancy of the local HA tracker pool in every cycle. This can be used in conjection with the \"not empty\" stat to calculate average queue occupancy or the \"allocations\" stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if a HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "Tracker Occupancy Accumultor", "EvSel": 4, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bxxxxx1xx", }, "HA.TRACKER_OCCUPANCY.INVITOE_LOCAL": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Accumulates the occupancy of the local HA tracker pool in every cycle. This can be used in conjection with the \"not empty\" stat to calculate average queue occupancy or the \"allocations\" stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if a HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "Tracker Occupancy Accumultor", "EvSel": 4, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bx1xxxxxx", }, "HA.TRACKER_OCCUPANCY.READS_REMOTE": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Accumulates the occupancy of the local HA tracker pool in every cycle. This can be used in conjection with the \"not empty\" stat to calculate average queue occupancy or the \"allocations\" stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if a HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "Tracker Occupancy Accumultor", "EvSel": 4, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bxxxx1xxx", }, "HA.TRACKER_OCCUPANCY.WRITES_REMOTE": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Accumulates the occupancy of the local HA tracker pool in every cycle. This can be used in conjection with the \"not empty\" stat to calculate average queue occupancy or the \"allocations\" stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if a HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "Tracker Occupancy Accumultor", "EvSel": 4, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bxx1xxxxx", }, "HA.TRACKER_OCCUPANCY.INVITOE_REMOTE": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Accumulates the occupancy of the local HA tracker pool in every cycle. This can be used in conjection with the \"not empty\" stat to calculate average queue occupancy or the \"allocations\" stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if a HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "Tracker Occupancy Accumultor", "EvSel": 4, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "b1xxxxxxx", }, "HA.TRACKER_OCCUPANCY.WRITES_LOCAL": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Accumulates the occupancy of the local HA tracker pool in every cycle. This can be used in conjection with the \"not empty\" stat to calculate average queue occupancy or the \"allocations\" stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if a HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "Tracker Occupancy Accumultor", "EvSel": 4, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bxxx1xxxx", }, "HA.TRACKER_PENDING_OCCUPANCY": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Accumulates the number of transactions that have data from the memory controller until they get scheduled to the Egress. This can be used to calculate the queuing latency for two things. (1) If the system is waiting for snoops, this will increase. (2) If the system can't schedule to the Egress because of either (a) Egress Credits or (b) QPI BL IGR credits for remote requests.", "Desc": "Data Pending Occupancy Accumultor", "EvSel": 5, "ExtSel": "", "MaxIncCyc": 127, "SubCtr": 1, }, "HA.TRACKER_PENDING_OCCUPANCY.REMOTE": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Accumulates the number of transactions that have data from the memory controller until they get scheduled to the Egress. This can be used to calculate the queuing latency for two things. (1) If the system is waiting for snoops, this will increase. (2) If the system can't schedule to the Egress because of either (a) Egress Credits or (b) QPI BL IGR credits for remote requests.", "Desc": "Data Pending Occupancy Accumultor", "EvSel": 5, "ExtSel": "", "MaxIncCyc": 127, "SubCtr": 1, "Umask": "b00000010", }, "HA.TRACKER_PENDING_OCCUPANCY.LOCAL": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Accumulates the number of transactions that have data from the memory controller until they get scheduled to the Egress. This can be used to calculate the queuing latency for two things. (1) If the system is waiting for snoops, this will increase. (2) If the system can't schedule to the Egress because of either (a) Egress Credits or (b) QPI BL IGR credits for remote requests.", "Desc": "Data Pending Occupancy Accumultor", "EvSel": 5, "ExtSel": "", "MaxIncCyc": 127, "SubCtr": 1, "Umask": "b00000001", }, "HA.TxR_AD_CYCLES_FULL": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "AD Egress Full", "Desc": "AD Egress Full", "EvSel": 42, "ExtSel": "", }, "HA.TxR_AD_CYCLES_FULL.ALL": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "AD Egress Full", "Desc": "AD Egress Full", "EvSel": 42, "ExtSel": "", "Umask": "bxxxxxx11", }, "HA.TxR_AD_CYCLES_FULL.SCHED1": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "AD Egress Full", "Desc": "AD Egress Full", "EvSel": 42, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.TxR_AD_CYCLES_FULL.SCHED0": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "AD Egress Full", "Desc": "AD Egress Full", "EvSel": 42, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.TxR_AK": { "Box": "HA", "Category": "HA OUTBOUND_TX Events", "Counters": "0-3", "Desc": "Outbound Ring Transactions on AK", "EvSel": 14, "ExtSel": "", }, "HA.TxR_AK_CYCLES_FULL": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "AK Egress Full", "Desc": "AK Egress Full", "EvSel": 50, "ExtSel": "", }, "HA.TxR_AK_CYCLES_FULL.SCHED0": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "AK Egress Full", "Desc": "AK Egress Full", "EvSel": 50, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.TxR_AK_CYCLES_FULL.SCHED1": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "AK Egress Full", "Desc": "AK Egress Full", "EvSel": 50, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.TxR_AK_CYCLES_FULL.ALL": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "AK Egress Full", "Desc": "AK Egress Full", "EvSel": 50, "ExtSel": "", "Umask": "bxxxxxx11", }, "HA.TxR_BL": { "Box": "HA", "Category": "HA OUTBOUND_TX Events", "Counters": "0-3", "Defn": "Counts the number of DRS messages sent out on the BL ring. This can be filtered by the destination.", "Desc": "Outbound DRS Ring Transactions to Cache", "EvSel": 16, "ExtSel": "", }, "HA.TxR_BL.DRS_CORE": { "Box": "HA", "Category": "HA OUTBOUND_TX Events", "Counters": "0-3", "Defn": "Counts the number of DRS messages sent out on the BL ring. This can be filtered by the destination.", "Desc": "Outbound DRS Ring Transactions to Cache", "EvSel": 16, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.TxR_BL.DRS_CACHE": { "Box": "HA", "Category": "HA OUTBOUND_TX Events", "Counters": "0-3", "Defn": "Counts the number of DRS messages sent out on the BL ring. This can be filtered by the destination.", "Desc": "Outbound DRS Ring Transactions to Cache", "EvSel": 16, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.TxR_BL.DRS_QPI": { "Box": "HA", "Category": "HA OUTBOUND_TX Events", "Counters": "0-3", "Defn": "Counts the number of DRS messages sent out on the BL ring. This can be filtered by the destination.", "Desc": "Outbound DRS Ring Transactions to Cache", "EvSel": 16, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.TxR_BL_CYCLES_FULL": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "BL Egress Full", "Desc": "BL Egress Full", "EvSel": 54, "ExtSel": "", }, "HA.TxR_BL_CYCLES_FULL.SCHED1": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "BL Egress Full", "Desc": "BL Egress Full", "EvSel": 54, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.TxR_BL_CYCLES_FULL.ALL": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "BL Egress Full", "Desc": "BL Egress Full", "EvSel": 54, "ExtSel": "", "Umask": "bxxxxxx11", }, "HA.TxR_BL_CYCLES_FULL.SCHED0": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "BL Egress Full", "Desc": "BL Egress Full", "EvSel": 54, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.TxR_BL_OCCUPANCY": { "Box": "HA", "Category": "HA BL_EGRESS Events", "Counters": "0-3", "Defn": "BL Egress Occupancy", "Desc": "BL Egress Occupancy", "EvSel": 52, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, }, "HA.TxR_STARVED": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.", "Desc": "Injection Starvation", "EvSel": 109, "ExtSel": "", }, "HA.TxR_STARVED.BL": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.", "Desc": "Injection Starvation", "EvSel": 109, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.TxR_STARVED.AK": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.", "Desc": "Injection Starvation", "EvSel": 109, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.WPQ_CYCLES_NO_REG_CREDITS": { "Box": "HA", "Category": "HA WPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and \"special\" requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "HA iMC CHN0 WPQ Credits Empty - Regular", "EvSel": 24, "ExtSel": "", "MaxIncCyc": 4, }, "HA.WPQ_CYCLES_NO_REG_CREDITS.CHN3": { "Box": "HA", "Category": "HA WPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and \"special\" requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "HA iMC CHN0 WPQ Credits Empty - Regular", "EvSel": 24, "ExtSel": "", "MaxIncCyc": 4, "Umask": "b00001000", }, "HA.WPQ_CYCLES_NO_REG_CREDITS.CHN0": { "Box": "HA", "Category": "HA WPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and \"special\" requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "HA iMC CHN0 WPQ Credits Empty - Regular", "EvSel": 24, "ExtSel": "", "MaxIncCyc": 4, "Umask": "b00000001", }, "HA.WPQ_CYCLES_NO_REG_CREDITS.CHN2": { "Box": "HA", "Category": "HA WPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and \"special\" requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "HA iMC CHN0 WPQ Credits Empty - Regular", "EvSel": 24, "ExtSel": "", "MaxIncCyc": 4, "Umask": "b00000100", }, "HA.WPQ_CYCLES_NO_REG_CREDITS.CHN1": { "Box": "HA", "Category": "HA WPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and \"special\" requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "HA iMC CHN0 WPQ Credits Empty - Regular", "EvSel": 24, "ExtSel": "", "MaxIncCyc": 4, "Umask": "b00000010", }, # iMC: "iMC.ACT_COUNT": { "Box": "iMC", "Category": "iMC ACT Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.", "Desc": "DRAM Activate Count", "EvSel": 1, "ExtSel": "", }, "iMC.ACT_COUNT.BYP": { "Box": "iMC", "Category": "iMC ACT Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.", "Desc": "DRAM Activate Count", "EvSel": 1, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.ACT_COUNT.WR": { "Box": "iMC", "Category": "iMC ACT Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.", "Desc": "DRAM Activate Count", "EvSel": 1, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.ACT_COUNT.RD": { "Box": "iMC", "Category": "iMC ACT Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.", "Desc": "DRAM Activate Count", "EvSel": 1, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.BYP_CMDS": { "Box": "iMC", "Category": "iMC BYPASS Command Events", "Counters": "0-3", "EvSel": 161, "ExtSel": "", }, "iMC.BYP_CMDS.CAS": { "Box": "iMC", "Category": "iMC BYPASS Command Events", "Counters": "0-3", "EvSel": 161, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.BYP_CMDS.PRE": { "Box": "iMC", "Category": "iMC BYPASS Command Events", "Counters": "0-3", "EvSel": 161, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.BYP_CMDS.ACT": { "Box": "iMC", "Category": "iMC BYPASS Command Events", "Counters": "0-3", "EvSel": 161, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.CAS_COUNT": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", }, "iMC.CAS_COUNT.RD_UNDERFILL": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.CAS_COUNT.RD_RMM": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", "Umask": "bxx1xxxxx", }, "iMC.CAS_COUNT.RD_REG": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.CAS_COUNT.RD_WMM": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", "Umask": "bxxx1xxxx", }, "iMC.CAS_COUNT.WR_RMM": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.CAS_COUNT.RD": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", "Umask": "b00000011", }, "iMC.CAS_COUNT.WR": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", "Umask": "b00001100", }, "iMC.CAS_COUNT.ALL": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", "Umask": "b00001111", }, "iMC.CAS_COUNT.WR_WMM": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.DCLOCKTICKS": { "Box": "iMC", "Category": "iMC DCLK Events", "Counters": "0-3", "Desc": "DRAM Clockticks", "EvSel": 0, "ExtSel": "", }, "iMC.DRAM_PRE_ALL": { "Box": "iMC", "Category": "iMC DRAM_PRE_ALL Events", "Counters": "0-3", "Defn": "Counts the number of times that the precharge all command was sent.", "Desc": "DRAM Precharge All Commands", "EvSel": 6, "ExtSel": "", }, "iMC.DRAM_REFRESH": { "Box": "iMC", "Category": "iMC DRAM_REFRESH Events", "Counters": "0-3", "Defn": "Counts the number of refreshes issued.", "Desc": "Number of DRAM Refreshes Issued", "EvSel": 5, "ExtSel": "", }, "iMC.DRAM_REFRESH.PANIC": { "Box": "iMC", "Category": "iMC DRAM_REFRESH Events", "Counters": "0-3", "Defn": "Counts the number of refreshes issued.", "Desc": "Number of DRAM Refreshes Issued", "EvSel": 5, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.DRAM_REFRESH.HIGH": { "Box": "iMC", "Category": "iMC DRAM_REFRESH Events", "Counters": "0-3", "Defn": "Counts the number of refreshes issued.", "Desc": "Number of DRAM Refreshes Issued", "EvSel": 5, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.ECC_CORRECTABLE_ERRORS": { "Box": "iMC", "Category": "iMC ECC Events", "Counters": "0-3", "Defn": "Counts the number of ECC errors detected and corrected by the iMC on this channel. This counter is only useful with ECC DRAM devices. This count will increment one time for each correction regardless of the number of bits corrected. The iMC can correct up to 4 bit errors in independent channel mode and 8 bit erros in lockstep mode.", "Desc": "ECC Correctable Errors", "EvSel": 9, "ExtSel": "", }, "iMC.MAJOR_MODES": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Defn": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.", "Desc": "Cycles in a Major Mode", "EvSel": 7, "ExtSel": "", }, "iMC.MAJOR_MODES.WRITE": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Defn": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.", "Desc": "Cycles in a Major Mode", "EvSel": 7, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.MAJOR_MODES.PARTIAL": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Defn": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.", "Desc": "Cycles in a Major Mode", "EvSel": 7, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.MAJOR_MODES.ISOCH": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Defn": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.", "Desc": "Cycles in a Major Mode", "EvSel": 7, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.MAJOR_MODES.READ": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Defn": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.", "Desc": "Cycles in a Major Mode", "EvSel": 7, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.POWER_CHANNEL_DLLOFF": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles when all the ranks in the channel are in CKE Slow (DLLOFF) mode.", "Desc": "Channel DLLOFF Cycles", "EvSel": 132, "ExtSel": "", "Notes": "IBT = Input Buffer Termination = Off", }, "iMC.POWER_CHANNEL_PPD": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles when all the ranks in the channel are in PPD mode. If IBT=off is enabled, then this can be used to count those cycles. If it is not enabled, then this can count the number of cycles when that could have been taken advantage of.", "Desc": "Channel PPD Cycles", "EvSel": 133, "ExtSel": "", "MaxIncCyc": 4, "Notes": "IBT = Input Buffer Termination = On. ALL Ranks must be populated in order to measure", }, "iMC.POWER_CKE_CYCLES": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "ExtSel": "", "MaxIncCyc": 16, }, "iMC.POWER_CKE_CYCLES.RANK0": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "ExtSel": "", "MaxIncCyc": 16, "Umask": "b00000001", }, "iMC.POWER_CKE_CYCLES.RANK2": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "ExtSel": "", "MaxIncCyc": 16, "Umask": "b00000100", }, "iMC.POWER_CKE_CYCLES.RANK6": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "ExtSel": "", "MaxIncCyc": 16, "Umask": "b01000000", }, "iMC.POWER_CKE_CYCLES.RANK5": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "ExtSel": "", "MaxIncCyc": 16, "Umask": "b00100000", }, "iMC.POWER_CKE_CYCLES.RANK7": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "ExtSel": "", "MaxIncCyc": 16, "Umask": "b10000000", }, "iMC.POWER_CKE_CYCLES.RANK1": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "ExtSel": "", "MaxIncCyc": 16, "Umask": "b00000010", }, "iMC.POWER_CKE_CYCLES.RANK3": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "ExtSel": "", "MaxIncCyc": 16, "Umask": "b00001000", }, "iMC.POWER_CKE_CYCLES.RANK4": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "ExtSel": "", "MaxIncCyc": 16, "Umask": "b00010000", }, "iMC.POWER_CRITICAL_THROTTLE_CYCLES": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the iMC is in critical thermal throttling. When this happens, all traffic is blocked. This should be rare unless something bad is going on in the platform. There is no filtering by rank for this event.", "Desc": "Critical Throttle Cycles", "EvSel": 134, "ExtSel": "", }, "iMC.POWER_PCU_THROTTLING": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "EvSel": 66, "ExtSel": "", }, "iMC.POWER_SELF_REFRESH": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the iMC is in self-refresh and the iMC still has a clock. This happens in some package C-states. For example, the PCU may ask the iMC to enter self-refresh even though some of the cores are still processing. One use of this is for Monroe technology. Self-refresh is required during package C3 and C6, but there is no clock in the iMC at this time, so it is not possible to count these cases.", "Desc": "Clock-Enabled Self-Refresh", "EvSel": 67, "ExtSel": "", }, "iMC.POWER_THROTTLE_CYCLES": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 65, "ExtSel": "", }, "iMC.POWER_THROTTLE_CYCLES.RANK2": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 65, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.POWER_THROTTLE_CYCLES.RANK6": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 65, "ExtSel": "", "Umask": "bx1xxxxxx", }, "iMC.POWER_THROTTLE_CYCLES.RANK5": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 65, "ExtSel": "", "Umask": "bxx1xxxxx", }, "iMC.POWER_THROTTLE_CYCLES.RANK7": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 65, "ExtSel": "", "Umask": "b1xxxxxxx", }, "iMC.POWER_THROTTLE_CYCLES.RANK0": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 65, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.POWER_THROTTLE_CYCLES.RANK4": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 65, "ExtSel": "", "Umask": "bxxx1xxxx", }, "iMC.POWER_THROTTLE_CYCLES.RANK1": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 65, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.POWER_THROTTLE_CYCLES.RANK3": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 65, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.PREEMPTION": { "Box": "iMC", "Category": "iMC PREEMPTION Events", "Counters": "0-3", "Defn": "Counts the number of times a read in the iMC preempts another read or write. Generally reads to an open page are issued ahead of requests to closed pages. This improves the page hit rate of the system. However, high priority requests can cause pages of active requests to be closed in order to get them out. This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.", "Desc": "Read Preemption Count", "EvSel": 8, "ExtSel": "", }, "iMC.PREEMPTION.RD_PREEMPT_WR": { "Box": "iMC", "Category": "iMC PREEMPTION Events", "Counters": "0-3", "Defn": "Counts the number of times a read in the iMC preempts another read or write. Generally reads to an open page are issued ahead of requests to closed pages. This improves the page hit rate of the system. However, high priority requests can cause pages of active requests to be closed in order to get them out. This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.", "Desc": "Read Preemption Count", "EvSel": 8, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.PREEMPTION.RD_PREEMPT_RD": { "Box": "iMC", "Category": "iMC PREEMPTION Events", "Counters": "0-3", "Defn": "Counts the number of times a read in the iMC preempts another read or write. Generally reads to an open page are issued ahead of requests to closed pages. This improves the page hit rate of the system. However, high priority requests can cause pages of active requests to be closed in order to get them out. This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.", "Desc": "Read Preemption Count", "EvSel": 8, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.PRE_COUNT": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Precharge commands sent on this channel.", "Desc": "DRAM Precharge commands.", "EvSel": 2, "ExtSel": "", }, "iMC.PRE_COUNT.PAGE_MISS": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Precharge commands sent on this channel.", "Desc": "DRAM Precharge commands.", "EvSel": 2, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.PRE_COUNT.BYP": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Precharge commands sent on this channel.", "Desc": "DRAM Precharge commands.", "EvSel": 2, "ExtSel": "", "Umask": "bxxx1xxxx", }, "iMC.PRE_COUNT.WR": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Precharge commands sent on this channel.", "Desc": "DRAM Precharge commands.", "EvSel": 2, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.PRE_COUNT.PAGE_CLOSE": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Precharge commands sent on this channel.", "Desc": "DRAM Precharge commands.", "EvSel": 2, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.PRE_COUNT.RD": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Precharge commands sent on this channel.", "Desc": "DRAM Precharge commands.", "EvSel": 2, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.RD_CAS_PRIO": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "EvSel": 160, "ExtSel": "", }, "iMC.RD_CAS_PRIO.LOW": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "EvSel": 160, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.RD_CAS_PRIO.PANIC": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "EvSel": 160, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.RD_CAS_PRIO.HIGH": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "EvSel": 160, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.RD_CAS_PRIO.MED": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "EvSel": 160, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.RD_CAS_RANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", }, "iMC.RD_CAS_RANK0.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00000101", }, "iMC.RD_CAS_RANK0.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00001111", }, "iMC.RD_CAS_RANK0.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00001001", }, "iMC.RD_CAS_RANK0.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00000001", }, "iMC.RD_CAS_RANK0.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00010011", }, "iMC.RD_CAS_RANK0.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00001101", }, "iMC.RD_CAS_RANK0.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00001000", }, "iMC.RD_CAS_RANK0.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00000111", }, "iMC.RD_CAS_RANK0.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00000100", }, "iMC.RD_CAS_RANK0.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00010100", }, "iMC.RD_CAS_RANK0.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00000010", }, "iMC.RD_CAS_RANK0.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00010001", }, "iMC.RD_CAS_RANK0.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00010000", }, "iMC.RD_CAS_RANK0.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00001011", }, "iMC.RD_CAS_RANK0.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00000000", }, "iMC.RD_CAS_RANK0.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00000110", }, "iMC.RD_CAS_RANK0.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00010010", }, "iMC.RD_CAS_RANK0.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00001010", }, "iMC.RD_CAS_RANK0.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00000011", }, "iMC.RD_CAS_RANK0.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00001100", }, "iMC.RD_CAS_RANK0.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00001110", }, "iMC.RD_CAS_RANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", }, "iMC.RD_CAS_RANK1.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00001001", }, "iMC.RD_CAS_RANK1.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00001111", }, "iMC.RD_CAS_RANK1.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00000101", }, "iMC.RD_CAS_RANK1.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00010011", }, "iMC.RD_CAS_RANK1.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00000001", }, "iMC.RD_CAS_RANK1.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00001101", }, "iMC.RD_CAS_RANK1.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00000100", }, "iMC.RD_CAS_RANK1.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00001000", }, "iMC.RD_CAS_RANK1.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00000111", }, "iMC.RD_CAS_RANK1.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00010100", }, "iMC.RD_CAS_RANK1.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00010001", }, "iMC.RD_CAS_RANK1.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00000010", }, "iMC.RD_CAS_RANK1.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00001011", }, "iMC.RD_CAS_RANK1.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00000000", }, "iMC.RD_CAS_RANK1.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00010000", }, "iMC.RD_CAS_RANK1.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00010010", }, "iMC.RD_CAS_RANK1.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00001010", }, "iMC.RD_CAS_RANK1.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00000110", }, "iMC.RD_CAS_RANK1.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00000011", }, "iMC.RD_CAS_RANK1.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00001110", }, "iMC.RD_CAS_RANK1.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00001100", }, "iMC.RD_CAS_RANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 2", "EvSel": 178, "ExtSel": "", }, "iMC.RD_CAS_RANK2.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 2", "EvSel": 178, "ExtSel": "", "Umask": "b00000000", }, "iMC.RD_CAS_RANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", }, "iMC.RD_CAS_RANK4.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00000001", }, "iMC.RD_CAS_RANK4.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00010011", }, "iMC.RD_CAS_RANK4.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00000101", }, "iMC.RD_CAS_RANK4.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00001111", }, "iMC.RD_CAS_RANK4.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00001001", }, "iMC.RD_CAS_RANK4.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00000111", }, "iMC.RD_CAS_RANK4.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00000100", }, "iMC.RD_CAS_RANK4.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00001000", }, "iMC.RD_CAS_RANK4.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00001101", }, "iMC.RD_CAS_RANK4.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00010100", }, "iMC.RD_CAS_RANK4.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00010001", }, "iMC.RD_CAS_RANK4.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00000010", }, "iMC.RD_CAS_RANK4.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00000110", }, "iMC.RD_CAS_RANK4.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00010010", }, "iMC.RD_CAS_RANK4.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00001010", }, "iMC.RD_CAS_RANK4.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00010000", }, "iMC.RD_CAS_RANK4.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00001011", }, "iMC.RD_CAS_RANK4.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00000000", }, "iMC.RD_CAS_RANK4.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00000011", }, "iMC.RD_CAS_RANK4.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00001100", }, "iMC.RD_CAS_RANK4.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00001110", }, "iMC.RD_CAS_RANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", }, "iMC.RD_CAS_RANK5.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00001101", }, "iMC.RD_CAS_RANK5.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00001000", }, "iMC.RD_CAS_RANK5.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00000111", }, "iMC.RD_CAS_RANK5.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00000100", }, "iMC.RD_CAS_RANK5.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00001111", }, "iMC.RD_CAS_RANK5.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00001001", }, "iMC.RD_CAS_RANK5.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00000101", }, "iMC.RD_CAS_RANK5.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00010011", }, "iMC.RD_CAS_RANK5.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00000001", }, "iMC.RD_CAS_RANK5.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00010001", }, "iMC.RD_CAS_RANK5.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00000010", }, "iMC.RD_CAS_RANK5.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00010100", }, "iMC.RD_CAS_RANK5.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00001011", }, "iMC.RD_CAS_RANK5.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00000000", }, "iMC.RD_CAS_RANK5.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00010000", }, "iMC.RD_CAS_RANK5.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00001010", }, "iMC.RD_CAS_RANK5.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00010010", }, "iMC.RD_CAS_RANK5.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00000110", }, "iMC.RD_CAS_RANK5.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00001110", }, "iMC.RD_CAS_RANK5.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00001100", }, "iMC.RD_CAS_RANK5.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00000011", }, "iMC.RD_CAS_RANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", }, "iMC.RD_CAS_RANK6.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00010100", }, "iMC.RD_CAS_RANK6.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00010001", }, "iMC.RD_CAS_RANK6.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00000010", }, "iMC.RD_CAS_RANK6.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00010011", }, "iMC.RD_CAS_RANK6.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00000001", }, "iMC.RD_CAS_RANK6.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00001001", }, "iMC.RD_CAS_RANK6.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00001111", }, "iMC.RD_CAS_RANK6.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00000101", }, "iMC.RD_CAS_RANK6.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00000111", }, "iMC.RD_CAS_RANK6.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00000100", }, "iMC.RD_CAS_RANK6.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00001000", }, "iMC.RD_CAS_RANK6.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00001101", }, "iMC.RD_CAS_RANK6.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00000011", }, "iMC.RD_CAS_RANK6.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00001110", }, "iMC.RD_CAS_RANK6.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00001100", }, "iMC.RD_CAS_RANK6.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00010010", }, "iMC.RD_CAS_RANK6.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00001010", }, "iMC.RD_CAS_RANK6.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00000110", }, "iMC.RD_CAS_RANK6.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00001011", }, "iMC.RD_CAS_RANK6.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00000000", }, "iMC.RD_CAS_RANK6.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00010000", }, "iMC.RD_CAS_RANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", }, "iMC.RD_CAS_RANK7.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00010001", }, "iMC.RD_CAS_RANK7.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00000010", }, "iMC.RD_CAS_RANK7.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00010100", }, "iMC.RD_CAS_RANK7.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00000100", }, "iMC.RD_CAS_RANK7.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00001000", }, "iMC.RD_CAS_RANK7.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00000111", }, "iMC.RD_CAS_RANK7.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00001101", }, "iMC.RD_CAS_RANK7.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00000001", }, "iMC.RD_CAS_RANK7.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00010011", }, "iMC.RD_CAS_RANK7.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00000101", }, "iMC.RD_CAS_RANK7.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00001001", }, "iMC.RD_CAS_RANK7.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00001111", }, "iMC.RD_CAS_RANK7.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00001100", }, "iMC.RD_CAS_RANK7.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00001110", }, "iMC.RD_CAS_RANK7.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00000011", }, "iMC.RD_CAS_RANK7.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00000110", }, "iMC.RD_CAS_RANK7.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00001010", }, "iMC.RD_CAS_RANK7.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00010010", }, "iMC.RD_CAS_RANK7.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00010000", }, "iMC.RD_CAS_RANK7.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00000000", }, "iMC.RD_CAS_RANK7.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00001011", }, "iMC.RPQ_CYCLES_NE": { "Box": "iMC", "Category": "iMC RPQ Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the Read Pending Queue is not empty. This can then be used to calculate the average occupancy (in conjunction with the Read Pending Queue Occupancy count). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This filter is to be used in conjunction with the occupancy filter so that one can correctly track the average occupancies for schedulable entries and scheduled requests.", "Desc": "Read Pending Queue Not Empty", "EvSel": 17, "ExtSel": "", }, "iMC.RPQ_INSERTS": { "Box": "iMC", "Category": "iMC RPQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.", "Desc": "Read Pending Queue Allocations", "EvSel": 16, "ExtSel": "", }, "iMC.VMSE_MXB_WR_OCCUPANCY": { "Box": "iMC", "Category": "iMC VMSE Events", "Counters": "0-3", "Desc": "VMSE MXB write buffer occupancy", "EvSel": 145, "ExtSel": "", "MaxIncCyc": 32, "SubCtr": 1, }, "iMC.VMSE_WR_PUSH": { "Box": "iMC", "Category": "iMC VMSE Events", "Counters": "0-3", "Desc": "VMSE WR PUSH issued", "EvSel": 144, "ExtSel": "", }, "iMC.VMSE_WR_PUSH.WMM": { "Box": "iMC", "Category": "iMC VMSE Events", "Counters": "0-3", "Desc": "VMSE WR PUSH issued", "EvSel": 144, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.VMSE_WR_PUSH.RMM": { "Box": "iMC", "Category": "iMC VMSE Events", "Counters": "0-3", "Desc": "VMSE WR PUSH issued", "EvSel": 144, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.WMM_TO_RMM": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Desc": "Transition from WMM to RMM because of low threshold", "EvSel": 192, "ExtSel": "", }, "iMC.WMM_TO_RMM.LOW_THRESH": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Desc": "Transition from WMM to RMM because of low threshold", "EvSel": 192, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.WMM_TO_RMM.VMSE_RETRY": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Desc": "Transition from WMM to RMM because of low threshold", "EvSel": 192, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.WMM_TO_RMM.STARVE": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Desc": "Transition from WMM to RMM because of low threshold", "EvSel": 192, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.WPQ_CYCLES_FULL": { "Box": "iMC", "Category": "iMC WPQ Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the Write Pending Queue is full. When the WPQ is full, the HA will not be able to issue any additional read requests into the iMC. This count should be similar count in the HA which tracks the number of cycles that the HA has no WPQ credits, just somewhat smaller to account for the credit return overhead.", "Desc": "Write Pending Queue Full Cycles", "EvSel": 34, "ExtSel": "", }, "iMC.WPQ_CYCLES_NE": { "Box": "iMC", "Category": "iMC WPQ Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the Write Pending Queue is not empty. This can then be used to calculate the average queue occupancy (in conjunction with the WPQ Occupancy Accumulation count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have \"posted\" to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies.", "Desc": "Write Pending Queue Not Empty", "EvSel": 33, "ExtSel": "", }, "iMC.WPQ_READ_HIT": { "Box": "iMC", "Category": "iMC WPQ Events", "Counters": "0-3", "Defn": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.", "Desc": "Write Pending Queue CAM Match", "EvSel": 35, "ExtSel": "", }, "iMC.WPQ_WRITE_HIT": { "Box": "iMC", "Category": "iMC WPQ Events", "Counters": "0-3", "Defn": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.", "Desc": "Write Pending Queue CAM Match", "EvSel": 36, "ExtSel": "", }, "iMC.WRONG_MM": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Desc": "Not getting the requested Major Mode", "EvSel": 193, "ExtSel": "", }, "iMC.WR_CAS_RANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", }, "iMC.WR_CAS_RANK0.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00000011", }, "iMC.WR_CAS_RANK0.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00001100", }, "iMC.WR_CAS_RANK0.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00001110", }, "iMC.WR_CAS_RANK0.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00000110", }, "iMC.WR_CAS_RANK0.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00001010", }, "iMC.WR_CAS_RANK0.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00010010", }, "iMC.WR_CAS_RANK0.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00010000", }, "iMC.WR_CAS_RANK0.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00001011", }, "iMC.WR_CAS_RANK0.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00000000", }, "iMC.WR_CAS_RANK0.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00010100", }, "iMC.WR_CAS_RANK0.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00000010", }, "iMC.WR_CAS_RANK0.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00010001", }, "iMC.WR_CAS_RANK0.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00000001", }, "iMC.WR_CAS_RANK0.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00010011", }, "iMC.WR_CAS_RANK0.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00000101", }, "iMC.WR_CAS_RANK0.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00001111", }, "iMC.WR_CAS_RANK0.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00001001", }, "iMC.WR_CAS_RANK0.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00001000", }, "iMC.WR_CAS_RANK0.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00000100", }, "iMC.WR_CAS_RANK0.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00000111", }, "iMC.WR_CAS_RANK0.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00001101", }, "iMC.WR_CAS_RANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", }, "iMC.WR_CAS_RANK1.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00000001", }, "iMC.WR_CAS_RANK1.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00010011", }, "iMC.WR_CAS_RANK1.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00000101", }, "iMC.WR_CAS_RANK1.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00001111", }, "iMC.WR_CAS_RANK1.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00001001", }, "iMC.WR_CAS_RANK1.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00000100", }, "iMC.WR_CAS_RANK1.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00001000", }, "iMC.WR_CAS_RANK1.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00000111", }, "iMC.WR_CAS_RANK1.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00001101", }, "iMC.WR_CAS_RANK1.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00010100", }, "iMC.WR_CAS_RANK1.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00000010", }, "iMC.WR_CAS_RANK1.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00010001", }, "iMC.WR_CAS_RANK1.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00000110", }, "iMC.WR_CAS_RANK1.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00001010", }, "iMC.WR_CAS_RANK1.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00010010", }, "iMC.WR_CAS_RANK1.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00010000", }, "iMC.WR_CAS_RANK1.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00000000", }, "iMC.WR_CAS_RANK1.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00001011", }, "iMC.WR_CAS_RANK1.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00000011", }, "iMC.WR_CAS_RANK1.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00001100", }, "iMC.WR_CAS_RANK1.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00001110", }, "iMC.WR_CAS_RANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 2", "EvSel": 186, "ExtSel": "", }, "iMC.WR_CAS_RANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 3", "EvSel": 187, "ExtSel": "", }, "iMC.WR_CAS_RANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", }, "iMC.WR_CAS_RANK4.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00000001", }, "iMC.WR_CAS_RANK4.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00010011", }, "iMC.WR_CAS_RANK4.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00000101", }, "iMC.WR_CAS_RANK4.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00001111", }, "iMC.WR_CAS_RANK4.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00001001", }, "iMC.WR_CAS_RANK4.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00000100", }, "iMC.WR_CAS_RANK4.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00000111", }, "iMC.WR_CAS_RANK4.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00001000", }, "iMC.WR_CAS_RANK4.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00001101", }, "iMC.WR_CAS_RANK4.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00010100", }, "iMC.WR_CAS_RANK4.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00000010", }, "iMC.WR_CAS_RANK4.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00010001", }, "iMC.WR_CAS_RANK4.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00000110", }, "iMC.WR_CAS_RANK4.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00010010", }, "iMC.WR_CAS_RANK4.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00001010", }, "iMC.WR_CAS_RANK4.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00010000", }, "iMC.WR_CAS_RANK4.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00001011", }, "iMC.WR_CAS_RANK4.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00000000", }, "iMC.WR_CAS_RANK4.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00000011", }, "iMC.WR_CAS_RANK4.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00001100", }, "iMC.WR_CAS_RANK4.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00001110", }, "iMC.WR_CAS_RANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", }, "iMC.WR_CAS_RANK5.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00001010", }, "iMC.WR_CAS_RANK5.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00010010", }, "iMC.WR_CAS_RANK5.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00000110", }, "iMC.WR_CAS_RANK5.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00001011", }, "iMC.WR_CAS_RANK5.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00000000", }, "iMC.WR_CAS_RANK5.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00010000", }, "iMC.WR_CAS_RANK5.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00001110", }, "iMC.WR_CAS_RANK5.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00001100", }, "iMC.WR_CAS_RANK5.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00000011", }, "iMC.WR_CAS_RANK5.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00001000", }, "iMC.WR_CAS_RANK5.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00000100", }, "iMC.WR_CAS_RANK5.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00000111", }, "iMC.WR_CAS_RANK5.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00001101", }, "iMC.WR_CAS_RANK5.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00010011", }, "iMC.WR_CAS_RANK5.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00000001", }, "iMC.WR_CAS_RANK5.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00001111", }, "iMC.WR_CAS_RANK5.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00001001", }, "iMC.WR_CAS_RANK5.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00000101", }, "iMC.WR_CAS_RANK5.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00010001", }, "iMC.WR_CAS_RANK5.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00000010", }, "iMC.WR_CAS_RANK5.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00010100", }, "iMC.WR_CAS_RANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", }, "iMC.WR_CAS_RANK6.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00010000", }, "iMC.WR_CAS_RANK6.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00001011", }, "iMC.WR_CAS_RANK6.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00000000", }, "iMC.WR_CAS_RANK6.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00000110", }, "iMC.WR_CAS_RANK6.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00001010", }, "iMC.WR_CAS_RANK6.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00010010", }, "iMC.WR_CAS_RANK6.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00001100", }, "iMC.WR_CAS_RANK6.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00001110", }, "iMC.WR_CAS_RANK6.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00000011", }, "iMC.WR_CAS_RANK6.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00001101", }, "iMC.WR_CAS_RANK6.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00001000", }, "iMC.WR_CAS_RANK6.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00000100", }, "iMC.WR_CAS_RANK6.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00000111", }, "iMC.WR_CAS_RANK6.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00000101", }, "iMC.WR_CAS_RANK6.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00001001", }, "iMC.WR_CAS_RANK6.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00001111", }, "iMC.WR_CAS_RANK6.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00000001", }, "iMC.WR_CAS_RANK6.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00010011", }, "iMC.WR_CAS_RANK6.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00000010", }, "iMC.WR_CAS_RANK6.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00010001", }, "iMC.WR_CAS_RANK6.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00010100", }, "iMC.WR_CAS_RANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", }, "iMC.WR_CAS_RANK7.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00010100", }, "iMC.WR_CAS_RANK7.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00010001", }, "iMC.WR_CAS_RANK7.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00000010", }, "iMC.WR_CAS_RANK7.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00000101", }, "iMC.WR_CAS_RANK7.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00001111", }, "iMC.WR_CAS_RANK7.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00001001", }, "iMC.WR_CAS_RANK7.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00000001", }, "iMC.WR_CAS_RANK7.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00010011", }, "iMC.WR_CAS_RANK7.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00001101", }, "iMC.WR_CAS_RANK7.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00001000", }, "iMC.WR_CAS_RANK7.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00000100", }, "iMC.WR_CAS_RANK7.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00000111", }, "iMC.WR_CAS_RANK7.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00000011", }, "iMC.WR_CAS_RANK7.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00001100", }, "iMC.WR_CAS_RANK7.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00001110", }, "iMC.WR_CAS_RANK7.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00010000", }, "iMC.WR_CAS_RANK7.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00001011", }, "iMC.WR_CAS_RANK7.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00000000", }, "iMC.WR_CAS_RANK7.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00000110", }, "iMC.WR_CAS_RANK7.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00010010", }, "iMC.WR_CAS_RANK7.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00001010", }, # IRP: "IRP.CACHE_TOTAL_OCCUPANCY": { "Box": "IRP", "Category": "IRP WRITE_CACHE Events", "Counters": "0-1", "Defn": "Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.", "Desc": "Total Write Cache Occupancy", "EvSel": 18, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, }, "IRP.CACHE_TOTAL_OCCUPANCY.ANY": { "Box": "IRP", "Category": "IRP WRITE_CACHE Events", "Counters": "0-1", "Defn": "Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.", "Desc": "Total Write Cache Occupancy", "EvSel": 18, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "b00000001", }, "IRP.CACHE_TOTAL_OCCUPANCY.SOURCE": { "Box": "IRP", "Category": "IRP WRITE_CACHE Events", "Counters": "0-1", "Defn": "Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.", "Desc": "Total Write Cache Occupancy", "EvSel": 18, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "b00000010", }, "IRP.CLOCKTICKS": { "Box": "IRP", "Category": "IRP IO_CLKS Events", "Counters": "0-1", "Defn": "Number of clocks in the IRP.", "Desc": "Clocks in the IRP", "EvSel": 0, "ExtSel": "", }, "IRP.COHERENT_OPS": { "Box": "IRP", "Category": "IRP Coherency Events", "Counters": "0-1", "Defn": "Counts the number of coherency related operations servied by the IRP", "Desc": "Coherent Ops", "EvSel": 19, "ExtSel": "", }, "IRP.COHERENT_OPS.PCIRDCUR": { "Box": "IRP", "Category": "IRP Coherency Events", "Counters": "0-1", "Defn": "Counts the number of coherency related operations servied by the IRP", "Desc": "Coherent Ops", "EvSel": 19, "ExtSel": "", "Umask": "bxxxxxxx1", }, "IRP.COHERENT_OPS.CRD": { "Box": "IRP", "Category": "IRP Coherency Events", "Counters": "0-1", "Defn": "Counts the number of coherency related operations servied by the IRP", "Desc": "Coherent Ops", "EvSel": 19, "ExtSel": "", "Umask": "bxxxxxx1x", }, "IRP.COHERENT_OPS.DRD": { "Box": "IRP", "Category": "IRP Coherency Events", "Counters": "0-1", "Defn": "Counts the number of coherency related operations servied by the IRP", "Desc": "Coherent Ops", "EvSel": 19, "ExtSel": "", "Umask": "bxxxxx1xx", }, "IRP.COHERENT_OPS.PCITOM": { "Box": "IRP", "Category": "IRP Coherency Events", "Counters": "0-1", "Defn": "Counts the number of coherency related operations servied by the IRP", "Desc": "Coherent Ops", "EvSel": 19, "ExtSel": "", "Umask": "bxxx1xxxx", }, "IRP.COHERENT_OPS.PCIDCAHINT": { "Box": "IRP", "Category": "IRP Coherency Events", "Counters": "0-1", "Defn": "Counts the number of coherency related operations servied by the IRP", "Desc": "Coherent Ops", "EvSel": 19, "ExtSel": "", "Umask": "bxx1xxxxx", }, "IRP.COHERENT_OPS.CLFLUSH": { "Box": "IRP", "Category": "IRP Coherency Events", "Counters": "0-1", "Defn": "Counts the number of coherency related operations servied by the IRP", "Desc": "Coherent Ops", "EvSel": 19, "ExtSel": "", "Umask": "b1xxxxxxx", }, "IRP.COHERENT_OPS.WBMTOI": { "Box": "IRP", "Category": "IRP Coherency Events", "Counters": "0-1", "Defn": "Counts the number of coherency related operations servied by the IRP", "Desc": "Coherent Ops", "EvSel": 19, "ExtSel": "", "Umask": "bx1xxxxxx", }, "IRP.COHERENT_OPS.RFO": { "Box": "IRP", "Category": "IRP Coherency Events", "Counters": "0-1", "Defn": "Counts the number of coherency related operations servied by the IRP", "Desc": "Coherent Ops", "EvSel": 19, "ExtSel": "", "Umask": "bxxxx1xxx", }, "IRP.MISC0": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 0", "EvSel": 20, "ExtSel": "", }, "IRP.MISC0.2ND_ATOMIC_INSERT": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 0", "EvSel": 20, "ExtSel": "", "Umask": "bx001xx00", }, "IRP.MISC0.2ND_RD_INSERT": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 0", "EvSel": 20, "ExtSel": "", "Umask": "bx00xx100", }, "IRP.MISC0.FAST_REJ": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 0", "EvSel": 20, "ExtSel": "", "Umask": "b0000001x", }, "IRP.MISC0.FAST_XFER": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 0", "EvSel": 20, "ExtSel": "", "Umask": "bxx100000", }, "IRP.MISC0.PF_TIMEOUT": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 0", "EvSel": 20, "ExtSel": "", "Umask": "b1xx00000", }, "IRP.MISC0.FAST_REQ": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 0", "EvSel": 20, "ExtSel": "", "Umask": "b000000x1", }, "IRP.MISC0.2ND_WR_INSERT": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 0", "EvSel": 20, "ExtSel": "", "Umask": "bx00x1x00", }, "IRP.MISC0.PF_ACK_HINT": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 0", "EvSel": 20, "ExtSel": "", "Umask": "bx1x00000", }, "IRP.MISC1": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 1", "EvSel": 21, "ExtSel": "", }, "IRP.MISC1.SLOW_S": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 1", "EvSel": 21, "ExtSel": "", "Umask": "b000xxx1x", }, "IRP.MISC1.SLOW_M": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 1", "EvSel": 21, "ExtSel": "", "Umask": "b000x1xxx", }, "IRP.MISC1.SLOW_I": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 1", "EvSel": 21, "ExtSel": "", "Umask": "b000xxxx1", }, "IRP.MISC1.SEC_RCVD_INVLD": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 1", "EvSel": 21, "ExtSel": "", "Umask": "bxx1x0000", }, "IRP.MISC1.SLOW_E": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 1", "EvSel": 21, "ExtSel": "", "Umask": "b000xx1xx", }, "IRP.MISC1.LOST_FWD": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 1", "EvSel": 21, "ExtSel": "", "Umask": "b0001xxxx", }, "IRP.MISC1.SEC_RCVD_VLD": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 1", "EvSel": 21, "ExtSel": "", "Umask": "bx1xx0000", }, "IRP.MISC1.DATA_THROTTLE": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 1", "EvSel": 21, "ExtSel": "", "Umask": "b1xxx0000", }, "IRP.RxR_AK_INSERTS": { "Box": "IRP", "Category": "IRP AK_INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the AK Ingress. This queue is where the IRP receives responses from R2PCIe (the ring).", "Desc": "AK Ingress Occupancy", "EvSel": 10, "ExtSel": "", }, "IRP.RxR_BL_DRS_CYCLES_FULL": { "Box": "IRP", "Category": "IRP BL_INGRESS_DRS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.", "EvSel": 4, "ExtSel": "", }, "IRP.RxR_BL_DRS_INSERTS": { "Box": "IRP", "Category": "IRP BL_INGRESS_DRS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.", "Desc": "BL Ingress Occupancy - DRS", "EvSel": 1, "ExtSel": "", }, "IRP.RxR_BL_DRS_OCCUPANCY": { "Box": "IRP", "Category": "IRP BL_INGRESS_DRS Events", "Counters": "0-1", "Defn": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.", "EvSel": 7, "ExtSel": "", "MaxIncCyc": 24, "SubCtr": 1, }, "IRP.RxR_BL_NCB_CYCLES_FULL": { "Box": "IRP", "Category": "IRP BL_INGRESS_NCB Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.", "EvSel": 5, "ExtSel": "", }, "IRP.RxR_BL_NCB_INSERTS": { "Box": "IRP", "Category": "IRP BL_INGRESS_NCB Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.", "Desc": "BL Ingress Occupancy - NCB", "EvSel": 2, "ExtSel": "", }, "IRP.RxR_BL_NCB_OCCUPANCY": { "Box": "IRP", "Category": "IRP BL_INGRESS_NCB Events", "Counters": "0-1", "Defn": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.", "EvSel": 8, "ExtSel": "", "MaxIncCyc": 24, "SubCtr": 1, }, "IRP.RxR_BL_NCS_CYCLES_FULL": { "Box": "IRP", "Category": "IRP BL_INGRESS_NCS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.", "EvSel": 6, "ExtSel": "", }, "IRP.RxR_BL_NCS_INSERTS": { "Box": "IRP", "Category": "IRP BL_INGRESS_NCS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.", "Desc": "BL Ingress Occupancy - NCS", "EvSel": 3, "ExtSel": "", }, "IRP.RxR_BL_NCS_OCCUPANCY": { "Box": "IRP", "Category": "IRP BL_INGRESS_NCS Events", "Counters": "0-1", "Defn": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.", "EvSel": 9, "ExtSel": "", "MaxIncCyc": 24, "SubCtr": 1, }, "IRP.SNOOP_RESP": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Desc": "Snoop Responses", "EvSel": 23, "ExtSel": "", "Notes": "The first 4 subevent bits are the Responses to the Code/Data/Invalid Snoops represented by the last 3 subevent bits. At least 1 of the bottom 4 bits must be combined with 1 of the top 3 bits to obtain counts. Unsure which combinations are possible.", }, "IRP.SNOOP_RESP.SNPINV": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Desc": "Snoop Responses", "EvSel": 23, "ExtSel": "", "Notes": "The first 4 subevent bits are the Responses to the Code/Data/Invalid Snoops represented by the last 3 subevent bits. At least 1 of the bottom 4 bits must be combined with 1 of the top 3 bits to obtain counts. Unsure which combinations are possible.", "Umask": "bx1xxxxxx", }, "IRP.SNOOP_RESP.HIT_I": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Desc": "Snoop Responses", "EvSel": 23, "ExtSel": "", "Notes": "The first 4 subevent bits are the Responses to the Code/Data/Invalid Snoops represented by the last 3 subevent bits. At least 1 of the bottom 4 bits must be combined with 1 of the top 3 bits to obtain counts. Unsure which combinations are possible.", "Umask": "bxxxxxx1x", }, "IRP.SNOOP_RESP.SNPDATA": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Desc": "Snoop Responses", "EvSel": 23, "ExtSel": "", "Notes": "The first 4 subevent bits are the Responses to the Code/Data/Invalid Snoops represented by the last 3 subevent bits. At least 1 of the bottom 4 bits must be combined with 1 of the top 3 bits to obtain counts. Unsure which combinations are possible.", "Umask": "bxx1xxxxx", }, "IRP.SNOOP_RESP.HIT_ES": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Desc": "Snoop Responses", "EvSel": 23, "ExtSel": "", "Notes": "The first 4 subevent bits are the Responses to the Code/Data/Invalid Snoops represented by the last 3 subevent bits. At least 1 of the bottom 4 bits must be combined with 1 of the top 3 bits to obtain counts. Unsure which combinations are possible.", "Umask": "bxxxxx1xx", }, "IRP.SNOOP_RESP.SNPCODE": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Desc": "Snoop Responses", "EvSel": 23, "ExtSel": "", "Notes": "The first 4 subevent bits are the Responses to the Code/Data/Invalid Snoops represented by the last 3 subevent bits. At least 1 of the bottom 4 bits must be combined with 1 of the top 3 bits to obtain counts. Unsure which combinations are possible.", "Umask": "bxxx1xxxx", }, "IRP.SNOOP_RESP.MISS": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Desc": "Snoop Responses", "EvSel": 23, "ExtSel": "", "Notes": "The first 4 subevent bits are the Responses to the Code/Data/Invalid Snoops represented by the last 3 subevent bits. At least 1 of the bottom 4 bits must be combined with 1 of the top 3 bits to obtain counts. Unsure which combinations are possible.", "Umask": "bxxxxxxx1", }, "IRP.SNOOP_RESP.HIT_M": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Desc": "Snoop Responses", "EvSel": 23, "ExtSel": "", "Notes": "The first 4 subevent bits are the Responses to the Code/Data/Invalid Snoops represented by the last 3 subevent bits. At least 1 of the bottom 4 bits must be combined with 1 of the top 3 bits to obtain counts. Unsure which combinations are possible.", "Umask": "bxxxx1xxx", }, "IRP.TRANSACTIONS": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Defn": "Counts the number of \"Inbound\" transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.", "Desc": "Inbound Transaction Count", "EvSel": 22, "ExtSel": "", "Notes": "Bit 7 is a filter that can be applied to the other subevents. Meaningless by itself.", }, "IRP.TRANSACTIONS.WRITES": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Defn": "Counts the number of \"Inbound\" transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.", "Desc": "Inbound Transaction Count", "EvSel": 22, "ExtSel": "", "Notes": "Bit 7 is a filter that can be applied to the other subevents. Meaningless by itself.", "Umask": "bxxxxxx1x", }, "IRP.TRANSACTIONS.WR_PREF": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Defn": "Counts the number of \"Inbound\" transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.", "Desc": "Inbound Transaction Count", "EvSel": 22, "ExtSel": "", "Notes": "Bit 7 is a filter that can be applied to the other subevents. Meaningless by itself.", "Umask": "bxxxx1xxx", }, "IRP.TRANSACTIONS.RD_PREF": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Defn": "Counts the number of \"Inbound\" transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.", "Desc": "Inbound Transaction Count", "EvSel": 22, "ExtSel": "", "Notes": "Bit 7 is a filter that can be applied to the other subevents. Meaningless by itself.", "Umask": "bxxxxx1xx", }, "IRP.TRANSACTIONS.ATOMIC": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Defn": "Counts the number of \"Inbound\" transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.", "Desc": "Inbound Transaction Count", "EvSel": 22, "ExtSel": "", "Notes": "Bit 7 is a filter that can be applied to the other subevents. Meaningless by itself.", "Umask": "bxxx1xxxx", }, "IRP.TRANSACTIONS.OTHER": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Defn": "Counts the number of \"Inbound\" transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.", "Desc": "Inbound Transaction Count", "EvSel": 22, "ExtSel": "", "Notes": "Bit 7 is a filter that can be applied to the other subevents. Meaningless by itself.", "Umask": "bxx1xxxxx", }, "IRP.TRANSACTIONS.ORDERINGQ": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Defn": "Counts the number of \"Inbound\" transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.", "Desc": "Inbound Transaction Count", "EvSel": 22, "ExtSel": "", "Notes": "Bit 7 is a filter that can be applied to the other subevents. Meaningless by itself.", "Umask": "bx1xxxxxx", }, "IRP.TRANSACTIONS.READS": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Defn": "Counts the number of \"Inbound\" transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.", "Desc": "Inbound Transaction Count", "EvSel": 22, "ExtSel": "", "Notes": "Bit 7 is a filter that can be applied to the other subevents. Meaningless by itself.", "Umask": "bxxxxxxx1", }, "IRP.TxR_AD_STALL_CREDIT_CYCLES": { "Box": "IRP", "Category": "IRP STALL_CYCLES Events", "Counters": "0-1", "Defn": "Counts the number times when it is not possible to issue a request to the R2PCIe because there are no AD Egress Credits available.", "Desc": "No AD Egress Credit Stalls", "EvSel": 24, "ExtSel": "", }, "IRP.TxR_BL_STALL_CREDIT_CYCLES": { "Box": "IRP", "Category": "IRP STALL_CYCLES Events", "Counters": "0-1", "Defn": "Counts the number times when it is not possible to issue data to the R2PCIe because there are no BL Egress Credits available.", "Desc": "No BL Egress Credit Stalls", "EvSel": 25, "ExtSel": "", }, "IRP.TxR_DATA_INSERTS_NCB": { "Box": "IRP", "Category": "IRP OUTBOUND_REQUESTS Events", "Counters": "0-1", "Defn": "Counts the number of requests issued to the switch (towards the devices).", "Desc": "Outbound Read Requests", "EvSel": 14, "ExtSel": "", }, "IRP.TxR_DATA_INSERTS_NCS": { "Box": "IRP", "Category": "IRP OUTBOUND_REQUESTS Events", "Counters": "0-1", "Defn": "Counts the number of requests issued to the switch (towards the devices).", "Desc": "Outbound Read Requests", "EvSel": 15, "ExtSel": "", }, "IRP.TxR_REQUEST_OCCUPANCY": { "Box": "IRP", "Category": "IRP OUTBOUND_REQUESTS Events", "Counters": "0-1", "Defn": "Accumultes the number of outstanding outbound requests from the IRP to the switch (towards the devices). This can be used in conjuection with the allocations event in order to calculate average latency of outbound requests.", "Desc": "Outbound Request Queue Occupancy", "EvSel": 13, "ExtSel": "", "SubCtr": 1, }, # PCU: "PCU.CLOCKTICKS": { "Box": "PCU", "Category": "PCU PCLK Events", "Counters": "0-3", "Defn": "The PCU runs off a fixed 1 GHz clock. This event counts the number of pclk cycles measured while the counter was enabled. The pclk, like the Memory Controller's dclk, counts at a constant rate making it a good measure of actual wall time.", "Desc": "pclk Cycles", "EvSel": 0, "ExtSel": "", }, "PCU.CORE0_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 96, "ExtSel": "", "Notes": "This only tracks the hardware portion in the RCFSM (CFCFSM). This portion is just doing the core C state transition. It does not include any necessary frequency/voltage transitions.", }, "PCU.CORE10_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 106, "ExtSel": "", }, "PCU.CORE11_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 107, "ExtSel": "", }, "PCU.CORE12_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 108, "ExtSel": "", }, "PCU.CORE13_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 109, "ExtSel": "", }, "PCU.CORE14_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 110, "ExtSel": "", }, "PCU.CORE15_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 111, "ExtSel": "", }, "PCU.CORE16_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 112, "ExtSel": "", }, "PCU.CORE17_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 113, "ExtSel": "", }, "PCU.CORE1_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 97, "ExtSel": "", }, "PCU.CORE2_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 98, "ExtSel": "", }, "PCU.CORE3_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 99, "ExtSel": "", }, "PCU.CORE4_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 100, "ExtSel": "", }, "PCU.CORE5_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 101, "ExtSel": "", }, "PCU.CORE6_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 102, "ExtSel": "", }, "PCU.CORE7_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 103, "ExtSel": "", }, "PCU.CORE8_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 104, "ExtSel": "", "Notes": "This only tracks the hardware portion in the RCFSM (CFCFSM). This portion is just doing the core C state transition. It does not include any necessary frequency/voltage transitions.", }, "PCU.CORE9_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 105, "ExtSel": "", }, "PCU.DEMOTIONS_CORE0": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 48, "ExtSel": "", }, "PCU.DEMOTIONS_CORE1": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 49, "ExtSel": "", }, "PCU.DEMOTIONS_CORE10": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 58, "ExtSel": "", }, "PCU.DEMOTIONS_CORE11": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 59, "ExtSel": "", }, "PCU.DEMOTIONS_CORE12": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 60, "ExtSel": "", }, "PCU.DEMOTIONS_CORE13": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 61, "ExtSel": "", }, "PCU.DEMOTIONS_CORE14": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 62, "ExtSel": "", }, "PCU.DEMOTIONS_CORE15": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 63, "ExtSel": "", }, "PCU.DEMOTIONS_CORE16": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 64, "ExtSel": "", }, "PCU.DEMOTIONS_CORE17": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 65, "ExtSel": "", }, "PCU.DEMOTIONS_CORE2": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 50, "ExtSel": "", }, "PCU.DEMOTIONS_CORE3": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 51, "ExtSel": "", }, "PCU.DEMOTIONS_CORE4": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 52, "ExtSel": "", }, "PCU.DEMOTIONS_CORE5": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 53, "ExtSel": "", }, "PCU.DEMOTIONS_CORE6": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 54, "ExtSel": "", }, "PCU.DEMOTIONS_CORE7": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 55, "ExtSel": "", }, "PCU.DEMOTIONS_CORE8": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 56, "ExtSel": "", }, "PCU.DEMOTIONS_CORE9": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 57, "ExtSel": "", }, "PCU.FREQ_MAX_LIMIT_THERMAL_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_MAX_LIMIT Events", "Counters": "0-3", "Defn": "Counts the number of cycles when thermal conditions are the upper limit on frequency. This is related to the THERMAL_THROTTLE CYCLES_ABOVE_TEMP event, which always counts cycles when we are above the thermal temperature. This event (STRONGEST_UPPER_LIMIT) is sampled at the output of the algorithm that determines the actual frequency, while THERMAL_THROTTLE looks at the input.", "Desc": "Thermal Strongest Upper Limit Cycles", "EvSel": 4, "ExtSel": "", }, "PCU.FREQ_MAX_OS_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_MAX_LIMIT Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the OS is the upper limit on frequency.", "Desc": "OS Strongest Upper Limit Cycles", "EvSel": 6, "ExtSel": "", "Notes": "Essentially, this event says the OS is getting the frequency it requested.", }, "PCU.FREQ_MAX_POWER_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_MAX_LIMIT Events", "Counters": "0-3", "Defn": "Counts the number of cycles when power is the upper limit on frequency.", "Desc": "Power Strongest Upper Limit Cycles", "EvSel": 5, "ExtSel": "", }, "PCU.FREQ_MIN_IO_P_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_MIN_LIMIT Events", "Counters": "0-3", "Defn": "Counts the number of cycles when IO P Limit is preventing us from dropping the frequency lower. This algorithm monitors the needs to the IO subsystem on both local and remote sockets and will maintain a frequency high enough to maintain good IO BW. This is necessary for when all the IA cores on a socket are idle but a user still would like to maintain high IO Bandwidth.", "Desc": "IO P Limit Strongest Lower Limit Cycles", "EvSel": 115, "ExtSel": "", }, "PCU.FREQ_TRANS_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_TRANS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the system is changing frequency. This can not be filtered by thread ID. One can also use it with the occupancy counter that monitors number of threads in C0 to estimate the performance impact that frequency transitions had on the system.", "Desc": "Cycles spent changing Frequency", "EvSel": 116, "ExtSel": "", }, "PCU.MEMORY_PHASE_SHEDDING_CYCLES": { "Box": "PCU", "Category": "PCU MEMORY_PHASE_SHEDDING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the PCU has triggered memory phase shedding. This is a mode that can be run in the iMC physicals that saves power at the expense of additional latency.", "Desc": "Memory Phase Shedding Cycles", "EvSel": 47, "ExtSel": "", "Notes": "Package C1", }, "PCU.PKG_RESIDENCY_C0_CYCLES": { "Box": "PCU", "Category": "PCU PKG_C_STATE_RESIDENCY Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the package was in C0. This event can be used in conjunction with edge detect to count C0 entrances (or exits using invert). Residency events do not include transition times.", "Desc": "Package C State Residency - C0", "EvSel": 42, "ExtSel": "", }, "PCU.PKG_RESIDENCY_C1E_CYCLES": { "Box": "PCU", "Category": "PCU PKG_C_STATE_RESIDENCY Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the package was in C1E. This event can be used in conjunction with edge detect to count C1E entrances (or exits using invert). Residency events do not include transition times.", "Desc": "Package C State Residency - C1E", "EvSel": 78, "ExtSel": "", }, "PCU.PKG_RESIDENCY_C2E_CYCLES": { "Box": "PCU", "Category": "PCU PKG_C_STATE_RESIDENCY Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the package was in C2E. This event can be used in conjunction with edge detect to count C2E entrances (or exits using invert). Residency events do not include transition times.", "Desc": "Package C State Residency - C2E", "EvSel": 43, "ExtSel": "", }, "PCU.PKG_RESIDENCY_C3_CYCLES": { "Box": "PCU", "Category": "PCU PKG_C_STATE_RESIDENCY Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the package was in C3. This event can be used in conjunction with edge detect to count C3 entrances (or exits using invert). Residency events do not include transition times.", "Desc": "Package C State Residency - C3", "EvSel": 44, "ExtSel": "", }, "PCU.PKG_RESIDENCY_C6_CYCLES": { "Box": "PCU", "Category": "PCU PKG_C_STATE_RESIDENCY Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the package was in C6. This event can be used in conjunction with edge detect to count C6 entrances (or exits using invert). Residency events do not include transition times.", "Desc": "Package C State Residency - C6", "EvSel": 45, "ExtSel": "", }, "PCU.PKG_RESIDENCY_C7_CYCLES": { "Box": "PCU", "Category": "PCU PKG_C_STATE_RESIDENCY Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the package was in C7. This event can be used in conjunction with edge detect to count C7 entrances (or exits using invert). Residency events do not include transition times.", "Desc": "Package C7 State Residency", "EvSel": 46, "ExtSel": "", }, "PCU.POWER_STATE_OCCUPANCY": { "Box": "PCU", "Category": "PCU POWER_STATE_OCC Events", "Counters": "0-3", "Defn": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", "Desc": "Number of cores in C-State", "EvSel": 128, "ExtSel": "", "MaxIncCyc": 8, "SubCtr": 1, }, "PCU.POWER_STATE_OCCUPANCY.CORES_C3": { "Box": "PCU", "Category": "PCU POWER_STATE_OCC Events", "Counters": "0-3", "Defn": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", "Desc": "Number of cores in C-State", "EvSel": 128, "ExtSel": "", "MaxIncCyc": 8, "SubCtr": 1, "Umask": "b10000000", }, "PCU.POWER_STATE_OCCUPANCY.CORES_C6": { "Box": "PCU", "Category": "PCU POWER_STATE_OCC Events", "Counters": "0-3", "Defn": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", "Desc": "Number of cores in C-State", "EvSel": 128, "ExtSel": "", "MaxIncCyc": 8, "SubCtr": 1, "Umask": "b11000000", }, "PCU.POWER_STATE_OCCUPANCY.CORES_C0": { "Box": "PCU", "Category": "PCU POWER_STATE_OCC Events", "Counters": "0-3", "Defn": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", "Desc": "Number of cores in C-State", "EvSel": 128, "ExtSel": "", "MaxIncCyc": 8, "SubCtr": 1, "Umask": "b01000000", }, "PCU.PROCHOT_EXTERNAL_CYCLES": { "Box": "PCU", "Category": "PCU PROCHOT Events", "Counters": "0-3", "Defn": "Counts the number of cycles that we are in external PROCHOT mode. This mode is triggered when a sensor off the die determines that something off-die (like DRAM) is too hot and must throttle to avoid damaging the chip.", "Desc": "External Prochot", "EvSel": 10, "ExtSel": "", }, "PCU.PROCHOT_INTERNAL_CYCLES": { "Box": "PCU", "Category": "PCU PROCHOT Events", "Counters": "0-3", "Defn": "Counts the number of cycles that we are in Interal PROCHOT mode. This mode is triggered when a sensor on the die determines that we are too hot and must throttle to avoid damaging the chip.", "Desc": "Internal Prochot", "EvSel": 9, "ExtSel": "", }, "PCU.TOTAL_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions across all cores.", "Desc": "Total Core C State Transition Cycles", "EvSel": 114, "ExtSel": "", }, "PCU.UFS_TRANSITIONS_RING_GV": { "Box": "PCU", "Category": "PCU UFS Events", "Counters": "0-3", "Defn": "Ring GV with same final and initial frequency", "EvSel": 121, "ExtSel": "", }, "PCU.VR_HOT_CYCLES": { "Box": "PCU", "Category": "PCU VR_HOT Events", "Counters": "0-3", "Desc": "VR Hot", "EvSel": 66, "ExtSel": "", }, } derived = { # R2PCIe: "R2PCIe.CYC_USED_DN": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Defn": "Cycles Used in the Down direction, Even polarity", "Desc": "Cycles Used Down and Even", "Equation": "RING_BL_USED.CCW / SAMPLE_INTERVAL", }, "R2PCIe.CYC_USED_UP": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Defn": "Cycles Used in the Up direction, Even polarity", "Desc": "Cycles Used Up and Even", "Equation": "RING_BL_USED.CW / SAMPLE_INTERVAL", }, "R2PCIe.RING_THRU_DN_BYTES": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Defn": "Ring throughput in the Down direction, Even polarity in Bytes", "Desc": "Ring Throughput Down and Even", "Equation": "RING_BL_USED.CCW* 32", }, "R2PCIe.RING_THRU_UP_BYTES": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Defn": "Ring throughput in the Up direction, Even polarity in Bytes", "Desc": "Ring Throughput Up and Even", "Equation": "RING_BL_USED.CW * 32", }, # CBO: "CBO.AVG_INGRESS_DEPTH": { "Box": "CBO", "Category": "CBO INGRESS Events", "Defn": "Average Depth of the Ingress Queue through the sample interval", "Desc": "Average Ingress Depth", "Equation": "RxR_OCCUPANCY.IRQ / SAMPLE_INTERVAL", }, "CBO.AVG_INGRESS_LATENCY": { "Box": "CBO", "Category": "CBO INGRESS Events", "Defn": "Average Latency of Requests through the Ingress Queue in Uncore Clocks", "Desc": "Average Ingress Latency", "Equation": "RxR_OCCUPANCY.IRQ / RxR_INSERTS.IRQ", }, "CBO.AVG_INGRESS_LATENCY_WHEN_NE": { "Box": "CBO", "Category": "CBO INGRESS Events", "Defn": "Average Latency of Requests through the Ingress Queue in Uncore Clocks when Ingress Queue has at least one entry", "Desc": "Average Latency in Non-Empty Ingress", "Equation": "RxR_OCCUPANCY.IRQ / COUNTER0_OCCUPANCY{edge_det,thresh=0x1}", }, "CBO.AVG_TOR_DRDS_MISS_WHEN_NE": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Average Number of Data Read Entries that Miss the LLC when the TOR is not empty.", "Desc": "Average Data Read Misses in Non-Empty TOR", "Equation": "(TOR_OCCUPANCY.MISS_OPCODE / COUNTER0_OCCUPANCY{edge_det,thresh=0x1})) with:Cn_MSR_PMON_BOX_FILTER1.opc=0x182", "Filter": "CBoFilter1[28:20]", }, "CBO.AVG_TOR_DRDS_WHEN_NE": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Average Number of Data Read Entries when the TOR is not empty.", "Desc": "Average Data Reads in Non-Empty TOR", "Equation": "(TOR_OCCUPANCY.OPCODE / COUNTER0_OCCUPANCY{edge_det,thresh=0x1}) with:Cn_MSR_PMON_BOX_FILTER1.opc=0x182", "Filter": "CBoFilter1[28:20]", }, "CBO.AVG_TOR_DRD_HIT_LATENCY": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Average Latency of Data Reads through the TOR that hit the LLC", "Desc": "Data Read Hit Latency through TOR", "Equation": "((TOR_OCCUPANCY.OPCODE - TOR_OCCUPANCY.MISS_OPCODE) / (TOR_INSERTS.OPCODE - TOR_INSERTS.MISS_OPCODE)) with:Cn_MSR_PMON_BOX_FILTER.opc=0x182", "Filter": "CBoFilter1[28:20]", }, "CBO.AVG_TOR_DRD_LATENCY": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Average Latency of Data Read Entries making their way through the TOR", "Desc": "Data Read Latency through TOR", "Equation": "(TOR_OCCUPANCY.OPCODE / TOR_INSERTS.OPCODE) with:Cn_MSR_PMON_BOX_FILTER1.opc=0x182", "Filter": "CBoFilter1[28:20]", }, "CBO.AVG_TOR_DRD_LOC_MISS_LATENCY": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Average Latency of Data Reads through the TOR that miss the LLC and were satsified by Local Memory", "Desc": "Data Read Local Miss Latency through TOR", "Equation": "(TOR_OCCUPANCY.MISS_OPCODE / TOR_INSERTS.MISS_OPCODE) with:Cn_MSR_PMON_BOX_FILTER1.{opc,nid}={0x182,my_node}", "Filter": "CBoFilter1[28:20], CBoFilter1[15:0]", }, "CBO.AVG_TOR_DRD_MISS_LATENCY": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Average Latency of Data Reads through the TOR that miss the LLC", "Desc": "Data Read Miss Latency through TOR", "Equation": "(TOR_OCCUPANCY.MISS_OPCODE / TOR_INSERTS.MISS_OPCODE) with:Cn_MSR_PMON_BOX_FILTER1.opc=0x182", "Filter": "CBoFilter1[28:20]", }, "CBO.AVG_TOR_DRD_REM_MISS_LATENCY": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Average Latency of Data Reads through the TOR that miss the LLC and were satsified by a Remote cache or Remote Memory", "Desc": "Data Read Remote Miss Latency through TOR", "Equation": "(TOR_OCCUPANCY.MISS_OPCODE / TOR_INSERTS.MISS_OPCODE) with:Cn_MSR_PMON_BOX_FILTER.{opc,nid}={0x182,other_nodes}", "Filter": "CBoFilter1[28:20], CBoFilter1[15:0]", }, "CBO.CYC_INGRESS_BLOCKED": { "Box": "CBO", "Category": "CBO INGRESS Events", "Defn": "Cycles the Ingress Request Queue arbiter was Blocked", "Desc": "Cycles Ingress Blocked", "Equation": "RxR_EXT_STARVED.IRQ / SAMPLE_INTERVAL", }, "CBO.CYC_USED_DN": { "Box": "CBO", "Category": "CBO RING Events", "Defn": "Cycles Used in the Down direction, Even polarity", "Desc": "Cycles Used Down and Even", "Equation": "RING_BL_USED.CCW / SAMPLE_INTERVAL", }, "CBO.CYC_USED_UP": { "Box": "CBO", "Category": "CBO RING Events", "Defn": "Cycles Used in the Up direction, Even polarity", "Desc": "Cycles Used Up and Even", "Equation": "RING_BL_USED.CW / SAMPLE_INTERVAL", }, "CBO.FAST_STR_LLC_MISS": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Number of ItoM (fast string) operations that miss the LLC", "Desc": "Fast String misses", "Equation": "TOR_INSERTS.MISS_OPCODE with:Cn_MSR_PMON_BOX_FILTER1.opc=0x1C8 + TOR_INSERTS.MISS_OPCODE with:{Cn_MSR_PMON_BOX_FILTER0.tid=0x3E, Cn_MSR_PMON_BOX_FILTER1.opc=0x1C8}", "Filter": "CBoFilter1[28:20]", }, "CBO.FAST_STR_LLC_REQ": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Number of ItoM (fast string) operations that reference the LLC", "Desc": "Fast String operations", "Equation": "TOR_INSERTS.OPCODE with:Cn_MSR_PMON_BOX_FILTER1.opc=0x1C8 + TOR_INSERTS.OPCODE with:{Cn_MSR_PMON_BOX_FILTER0.tid=0x3E, Cn_MSR_PMON_BOX_FILTER1.opc=0x1C8}", "Filter": "CBoFilter1[28:20]", }, "CBO.INGRESS_REJ_V_INS": { "Box": "CBO", "Category": "CBO INGRESS Events", "Defn": "Ratio of Ingress Request Entries that were rejected vs. inserted", "Desc": "Ingress Rejects vs. Inserts", "Equation": "RxR_INSERTS.IRQ_REJ / RxR_INSERTS.IRQ", }, "CBO.IO_READ_BW": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "IO Read Bandwidth in MB - Disk or Network Reads", "Desc": "IO Read Bandwidth", "Equation": "(TOR_INSERTS.OPCODE with:{Cn_MSR_PMON_BOX_FILTER0.tid=0x3E, Cn_MSR_PMON_BOX_FILTER1.opc=0x1C8} + TOR_INSERTS.OPCODE with:Cn_MSR_PMON_BOX_FILTER.opc=0x1E6) * 64 / 1000000", "Filter": "CBoFilter0[5:0], CBoFilter1[28:20]", }, "CBO.IO_WRITE_BW": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "IO Write Bandwidth in MB - Disk or Network Writes", "Desc": "IO Write Bandwidth", "Equation": "(TOR_INSERTS.OPCODE with:Cn_MSR_PMON_BOX_FILTER1.opc=0x19E + TOR_INSERTS.OPCODE with:Cn_MSR_PMON_BOX_FILTER.opc=0x1E4) * 64 / 1000000", "Filter": "CBoFilter1[28:20]", }, "CBO.LLC_DRD_MISS_PCT": { "Box": "CBO", "Category": "CBO CACHE Events", "Defn": "LLC Data Read miss ratio", "Desc": "LLC DRD Miss Ratio", "Equation": "LLC_LOOKUP.DATA_READ with:Cn_MSR_PMON_BOX_FILTER0.state=0x1 / LLC_LOOKUP.DATA_READ with:Cn_MSR_PMON_BOX_FILTER.state=0x3F", "Filter": "CBoFilter0[23:17]", }, "CBO.LLC_RFO_MISS_PCT": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "LLC RFO Miss Ratio", "Desc": "LLC RFO Miss Ratio", "Equation": "(TOR_INSERTS.MISS_OPCODE / TOR_INSERTS.OPCODE) with:Cn_MSR_PMON_BOX_FILTER1.opc=0x180 - (TOR_INSERTS.MISS_OPCODE / TOR_INSERTS.OPCODE) with:{Cn_MSR_PMON_BOX_FILTER0.tid=0x3E,Cn_MSR_PMON_BOX_FILTER1.opc=0x180}", "Filter": "CBoFilter1[28:20]", }, "CBO.MEM_WB_BYTES": { "Box": "CBO", "Category": "CBO CACHE Events", "Defn": "Data written back to memory in Number of Bytes", "Desc": "Memory Writebacks", "Equation": "LLC_VICTIMS.M_STATE * 64", }, "CBO.MMIO_PARTIAL_READS_CPU": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Number of Partial MMIO Reads initiated by a Core", "Desc": "MMIO Partial Reads - CPU", "Equation": "TOR_INSERTS.OPCODE with:{Cn_MSR_PMON_BOX_FILTER0.nc=1, Cn_MSR_PMON_BOX_FILTER1.opc=0x187}", "Filter": "CBoFilter1[28:20], CBoFilter1[30]", }, "CBO.MMIO_WRITES_CPU": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Number of MMIO Writes initiated by a Core", "Desc": "MMIO Writes - CPU", "Equation": "TOR_INSERTS.OPCODE with:{Cn_MSR_PMON_BOX_FILTER0.nc=1, Cn_MSR_PMON_BOX_FILTER1.opc=0x18F}", "Filter": "CBoFilter1[28:20], CBoFilter1[30]", }, "CBO.PARTIAL_PCI_WRITES": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Number of partial PCI writes", "Desc": "Partial PCI Writes", "Equation": "TOR_INSERTS.OPCODE with:{Cn_MSR_PMON_BOX_FILTER0.tid=0x3E,Cn_MSR_PMON_BOX_FILTER1.opc=0x180}", "Filter": "CBoFilter0[5:0], CBoFilter1[28:20]", }, "CBO.PCI_READS": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Number of PCI reads (full and partial)", "Desc": "PCI Reads", "Equation": "TOR_INSERTS.OPCODE with:Cn_MSR_PMON_BOX_FILTER1.opc=0x19E", "Filter": "CBoFilter1[28:20]", }, "CBO.PCI_WRITES": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Number of PCI writes", "Desc": "PCI Writes", "Equation": "TOR_INSERTS.OPCODE with:{Cn_MSR_PMON_BOX_FILTER0.tid=0x3E,Cn_MSR_PMON_BOX_FILTER1.opc=0x1C8}", "Filter": "CBoFilter0[5:0], CBoFilter1[28:20]", }, "CBO.RING_THRU_DN_BYTES": { "Box": "CBO", "Category": "CBO RING Events", "Defn": "Ring throughput in the Down direction, Even polarity in Bytes", "Desc": "Ring Throughput Down and Even", "Equation": "RING_BL_USED.CCW* 32", }, "CBO.RING_THRU_UP_BYTES": { "Box": "CBO", "Category": "CBO RING Events", "Defn": "Ring throughput in the Up direction, Even polarity in Bytes", "Desc": "Ring Throughput Up and Even", "Equation": "RING_BL_USED.CW * 32", }, "CBO.STREAMED_FULL_STORES": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Number of Streamed Store (of Full Cache Line) Transactions", "Desc": "Streaming Stores (Full Line)", "Equation": "TOR_INSERTS.OPCODE with:Cn_MSR_PMON_BOX_FILTER1.opc=0x18C", "Filter": "CBoFilter1[28:20]", }, "CBO.STREAMED_PART_STORES": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Number of Streamed Store (of Partial Cache Line) Transactions", "Desc": "Streaming Stores (Partial Line)", "Equation": "TOR_INSERTS.OPCODE with:Cn_MSR_PMON_BOX_FILTER1.opc=0x18D", "Filter": "CBoFilter1[28:20]", }, "CBO.UC_READS": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Uncachable Read Transactions", "Desc": "Uncacheable Reads", "Equation": "TOR_INSERTS.MISS_OPCODE with:Cn_MSR_PMON_BOX_FILTER1.opc=0x187", "Filter": "CBoFilter1[28:20]", }, # HA: "HA.HITME_INSERTS": { "Box": "HA", "Category": "HA HitME Events", "Equation": "HITME_LOOKUP.ALLOCS - HITME_HIT.ALLOCS", }, "HA.HITME_INVAL": { "Box": "HA", "Category": "HA HitME Events", "Equation": "HITME_HIT.INVALS", }, "HA.PCT_CYCLES_BL_FULL": { "Box": "HA", "Category": "HA EGRESS Events", "Defn": "Percentage of time the BL Egress Queue is full", "Desc": "Percent BL Egress Full", "Equation": "TxR_BL_CYCLES_FULL.ALL / SAMPLE_INTERVAL", }, "HA.PCT_CYCLES_D2C_DISABLED": { "Box": "HA", "Category": "HA DIRECT2CORE Events", "Defn": "Percentage of time that Direct2Core was disabled.", "Desc": "Percent D2C Disabled", "Equation": "DIRECT2CORE_CYCLES_DISABLED / SAMPLE_INTERVAL", }, "HA.PCT_RD_REQUESTS": { "Box": "HA", "Category": "HA REQUESTS Events", "Defn": "Percentage of HA traffic that is from Read Requests", "Desc": "Percent Read Requests", "Equation": "REQUESTS.READS / (REQUESTS.READS + REQUESTS.WRITES)", }, "HA.PCT_WR_REQUESTS": { "Box": "HA", "Category": "HA REQUESTS Events", "Defn": "Percentage of HA traffic that is from Write Requests", "Desc": "Percent Write Requests", "Equation": "REQUESTS.WRITES / (REQUESTS.READS + REQUESTS.WRITES)", }, # iMC: "iMC.MEM_BW_READS": { "Box": "iMC", "Category": "iMC CAS Events", "Defn": "Memory bandwidth consumed by reads. Expressed in bytes.", "Desc": "Read Memory Bandwidth", "Equation": "(CAS_COUNT.RD * 64)", }, "iMC.MEM_BW_TOTAL": { "Box": "iMC", "Category": "iMC CAS Events", "Defn": "Total memory bandwidth. Expressed in bytes.", "Desc": "Total Memory Bandwidth", "Equation": "MEM_BW_READS + MEM_BW_WRITES", }, "iMC.MEM_BW_WRITES": { "Box": "iMC", "Category": "iMC CAS Events", "Defn": "Memory bandwidth consumed by writes Expressed in bytes.", "Desc": "Write Memory Bandwidth", "Equation": "(CAS_COUNT.WR * 64)", }, "iMC.PCT_CYCLES_CRITICAL_THROTTLE": { "Box": "iMC", "Category": "iMC POWER Events", "Defn": "The percentage of cycles all DRAM ranks in critical thermal throttling", "Desc": "Percent Cycles Critical Throttle", "Equation": "POWER_CRITICAL_THROTTLE_CYCLES / MC_Chy_PCI_PMON_CTR_FIXED", }, "iMC.PCT_CYCLES_DLLOFF": { "Box": "iMC", "Category": "iMC POWER Events", "Defn": "The percentage of cycles all DRAM ranks in CKE slow (DLOFF) mode", "Desc": "Percent Cycles DLOFF", "Equation": "POWER_CHANNEL_DLLOFF / MC_Chy_PCI_PMON_CTR_FIXED", }, "iMC.PCT_CYCLES_DRAM_RANKx_IN_CKE": { "Box": "iMC", "Category": "iMC POWER Events", "Defn": "The percentage of cycles DRAM rank (x) spent in CKE ON mode.", "Desc": "Percent Cycles DRAM Rank x in CKE", "Equation": "POWER_CKE_CYCLES.RANKx / MC_Chy_PCI_PMON_CTR_FIXED", }, "iMC.PCT_CYCLES_DRAM_RANKx_IN_THR": { "Box": "iMC", "Category": "iMC POWER Events", "Defn": "The percentage of cycles DRAM rank (x) spent in thermal throttling.", "Desc": "Percent Cycles DRAM Rank x in CKE", "Equation": "POWER_THROTTLE_CYCLES.RANKx / MC_Chy_PCI_PMON_CTR_FIXED", }, "iMC.PCT_CYCLES_PPD": { "Box": "iMC", "Category": "iMC POWER Events", "Defn": "The percentage of cycles all DRAM ranks in PPD mode", "Desc": "Percent Cycles PPD", "Equation": "POWER_CHANNEL_PPD / MC_Chy_PCI_PMON_CTR_FIXED", }, "iMC.PCT_CYCLES_SELF_REFRESH": { "Box": "iMC", "Category": "iMC POWER Events", "Defn": "The percentage of cycles Memory is in self refresh power mode", "Desc": "Percent Cycles Self Refresh", "Equation": "POWER_SELF_REFRESH / MC_Chy_PCI_PMON_CTR_FIXED", }, "iMC.PCT_REQUESTS_PAGE_EMPTY": { "Box": "iMC", "Category": "iMC CAS Events", "Defn": "Percentage of memory requests that resulted in Page Empty", "Desc": "Percent Requests Page Empty", "Equation": "(ACT_COUNT - PRE_COUNT.PAGE_MISS)/ (CAS_COUNT.RD + CAS_COUNT.WR)", }, "iMC.PCT_REQUESTS_PAGE_HIT": { "Box": "iMC", "Category": "iMC CAS Events", "Defn": "Percentage of memory requests that resulted in Page Hits", "Desc": "Percent Requests Page Hit", "Equation": "1 - (PCT_REQUESTS_PAGE_EMPTY + PCT_REQUESTS_PAGE_MISS)", }, "iMC.PCT_REQUESTS_PAGE_MISS": { "Box": "iMC", "Category": "iMC CAS Events", "Defn": "Percentage of memory requests that resulted in Page Misses", "Desc": "Percent Requests Page Miss", "Equation": "PRE_COUNT.PAGE_MISS / (CAS_COUNT.RD + CAS_COUNT.WR)", }, # PCU: "PCU.PCT_CYC_FREQ_OS_LTD": { "Box": "PCU", "Category": "PCU FREQ_MAX_LIMIT Events", "Defn": "Percentage of Cycles the Max Frequency is limited by the OS", "Desc": "Percent Frequency OS Limited", "Equation": "FREQ_MAX_OS_CYCLES / CLOCKTICKS", }, "PCU.PCT_CYC_FREQ_POWER_LTD": { "Box": "PCU", "Category": "PCU FREQ_MAX_LIMIT Events", "Defn": "Percentage of Cycles the Max Frequency is limited by power", "Desc": "Percent Frequency Power Limited", "Equation": "FREQ_MAX_POWER_CYCLES / CLOCKTICKS", }, "PCU.PCT_CYC_FREQ_THERMAL_LTD": { "Box": "PCU", "Category": "PCU FREQ_MAX_LIMIT Events", "Defn": "Percentage of Cycles the Max Frequency is limited by thermal issues", "Desc": "Percent Frequency Thermal Limited", "Equation": "FREQ_MAX_LIMIT_THERMAL_CYCLES / CLOCKTICKS", }, } categories = ( "CBO CACHE Events", "CBO EGRESS Events", "CBO INGRESS Events", "CBO INGRESS_RETRY Events", "CBO MISC Events", "CBO OCCUPANCY Events", "CBO RING Events", "CBO SBO Credit Events", "CBO TOR Events", "CBO UCLK Events", "HA ADDR_OPCODE_MATCH Events", "HA BL_EGRESS Events", "HA BT (Backup Tracker) Events", "HA BYPASS Events", "HA CONFLICTS Events", "HA DIRECT2CORE Events", "HA DIRECTORY Events", "HA EGRESS Events", "HA HitME Events", "HA IMC_MISC Events", "HA IMC_READS Events", "HA IMC_WRITES Events", "HA OSB (Opportunistic Snoop Broadcast) Events", "HA OUTBOUND_TX Events", "HA QPI_IGR_CREDITS Events", "HA REQUESTS Events", "HA RING Events", "HA RPQ_CREDITS Events", "HA SBO Credit Events", "HA SNOOPS Events", "HA SNP_RESP Events", "HA TAD Events", "HA TRACKER Events", "HA UCLK Events", "HA WPQ_CREDITS Events", "IRP AK_INGRESS Events", "IRP BL_INGRESS_DRS Events", "IRP BL_INGRESS_NCB Events", "IRP BL_INGRESS_NCS Events", "IRP Coherency Events", "IRP IO_CLKS Events", "IRP MISC Events", "IRP OUTBOUND_REQUESTS Events", "IRP STALL_CYCLES Events", "IRP TRANSACTIONS Events", "IRP WRITE_CACHE Events", "PCU CORE_C_STATE_TRANSITION Events", "PCU FREQ_MAX_LIMIT Events", "PCU FREQ_MIN_LIMIT Events", "PCU FREQ_TRANS Events", "PCU MEMORY_PHASE_SHEDDING Events", "PCU PCLK Events", "PCU PKG_C_STATE_RESIDENCY Events", "PCU POWER_STATE_OCC Events", "PCU PROCHOT Events", "PCU UFS Events", "PCU VR_HOT Events", "R2PCIe EGRESS Events", "R2PCIe IIO Credit Events", "R2PCIe INGRESS Events", "R2PCIe RING Events", "R2PCIe SBO Credit Events", "R2PCIe UCLK Events", "UBOX EVENT_MSG Events", "UBOX PHOLD Events", "UBOX RACU Events", "iMC ACT Events", "iMC BYPASS Command Events", "iMC CAS Events", "iMC DCLK Events", "iMC DRAM_PRE_ALL Events", "iMC DRAM_REFRESH Events", "iMC ECC Events", "iMC MAJOR_MODES Events", "iMC POWER Events", "iMC PRE Events", "iMC PREEMPTION Events", "iMC RPQ Events", "iMC VMSE Events", "iMC WPQ Events", );
439,736
Python
.py
9,595
35.927671
842
0.560947
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,952
ivt_extra.py
andikleen_pmu-tools/ucevent/ivt_extra.py
extra_derived = { # CBO "CBO.LLC_PCIE_MEM_WRITE_BYTES": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "LLC Miss Data from PCIe to memory written", "Desc": "LLC Miss Data from PCIe to memory written", "Equation": "TOR_INSERTS.OPCODE with:Cn_MSR_PMON_BOX_FILTER.opc=0x19e * 64", }, "CBO.LLC_PCIE_MEM_READ_BYTES": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "LLC Miss Data from PCIe read from memory", "Desc": "LLC Miss Data from PCIe read from memory", "Equation": "TOR_INSERTS.OPCODE with:Cn_MSR_PMON_BOX_FILTER.opc=0x19c * 64", }, "CBO.LLC_PCIE_MEM_TOTAL_BYTES": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "LLC Miss Data from PCIe read from memory", "Desc": "LLC Miss Data from PCIe read from memory", "Equation": "LLC_PCIE_MEM_READ_BYTES + LLC_PCIE_MEM_WRITE_BYTES" }, "CBO.LLC_DDIO_MEM_WRITE_BYTES": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "LLC Miss Data from DDIO to memory written", "Desc": "LLC Miss Data from DDIO to memory written", "Equation": "TOR_INSERTS.MISS_OPCODE with:Cn_MSR_PMON_BOX_FILTER.opc=0x19e * 64", }, "CBO.LLC_DDIO_MEM_READ_BYTES": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "LLC Miss Data from PCIe read from memory", "Desc": "LLC Miss Data from PCIe read from memory", "Equation": "TOR_INSERTS.MISS_OPCODE with:Cn_MSR_PMON_BOX_FILTER.opc=0x19c * 64", }, "CBO.LLC_DDIO_MEM_TOTAL_BYTES": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "LLC Miss Data from DDIO read from memory", "Desc": "LLC Miss Data from DDIO read from memory", "Equation": "LLC_DDIO_MEM_READ_BYTES + LLC_DDIO_MEM_WRITE_BYTES" }, "CBO.AVG_LLC_DATA_READ_MISS_LATENCY": { "Box": "CBO", "Category": "CBO CACHE Events", "Defn": "Average LLC data read (demand+prefetch) miss latency (core clocks)", "Desc": "Average LLC data read (demand+prefetch) miss latency (core clocks)", "Equation": "(TOR_OCCUPANCY.MISS_OPCODE / TOR_INSERTS.MISS_OPCODE) with:Cn_MSR_PMON_BOX_FILTER.opc=0x182" }, # PCU "PCU.PCT_FREQ_BAND0": { "Box": "PCU", "Category": "PCU FREQ_RESIDENCY Events", "Defn": "Counts the percent that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.", "Desc": "Frequency Residency", "Notes": "The PMON control registers in the PCU only update on a frequency transition. Changing the measuring threshold during a sample interval may introduce errors in the counts. This is especially true when running at a constant frequency for an extended period of time. There is a corner case here: we set this code on the GV transition. So, if we never GV we will never call this code. This event does not include transition times. It is handled on fast path.", "Equation": "FREQ_BAND0_CYCLES / CLOCKTICKS" }, "PCU.PCT_FREQ_BAND1": { "Box": "PCU", "Category": "PCU FREQ_RESIDENCY Events", "Defn": "Counts the percent that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.", "Desc": "Frequency Residency", "Notes": "The PMON control registers in the PCU only update on a frequency transition. Changing the measuring threshold during a sample interval may introduce errors in the counts. This is especially true when running at a constant frequency for an extended period of time. There is a corner case here: we set this code on the GV transition. So, if we never GV we will never call this code. This event does not include transition times. It is handled on fast path.", "Equation": "FREQ_BAND1_CYCLES / CLOCKTICKS" }, "PCU.PCT_FREQ_BAND2": { "Box": "PCU", "Category": "PCU FREQ_RESIDENCY Events", "Defn": "Counts the percent that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.", "Desc": "Frequency Residency", "Notes": "The PMON control registers in the PCU only update on a frequency transition. Changing the measuring threshold during a sample interval may introduce errors in the counts. This is especially true when running at a constant frequency for an extended period of time. There is a corner case here: we set this code on the GV transition. So, if we never GV we will never call this code. This event does not include transition times. It is handled on fast path.", "Equation": "FREQ_BAND2_CYCLES / CLOCKTICKS" }, "PCU.PCT_FREQ_BAND3": { "Box": "PCU", "Category": "PCU FREQ_RESIDENCY Events", "Defn": "Counts the percent that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.", "Desc": "Frequency Residency", "Notes": "The PMON control registers in the PCU only update on a frequency transition. Changing the measuring threshold during a sample interval may introduce errors in the counts. This is especially true when running at a constant frequency for an extended period of time. There is a corner case here: we set this code on the GV transition. So, if we never GV we will never call this code. This event does not include transition times. It is handled on fast path.", "Equation": "FREQ_BAND3_CYCLES / CLOCKTICKS" }, "QPI_LL.QPI_SPEED": { "Box": "QPI_LL", "Category": "QPI_LL CFCLK Events", "Counters": "0-3", "Defn": "QPI speed - GT/s", "Desc": "QPI speed - GT/s", "Equation": "CLOCKTICKS/NUM_R3QPI*8/1000000000", }, "iMC.DIMM_SPEED": { "Box": "iMC", "Category": "iMC CAS Events", "Defn": "DIMM Speed", "Desc": "DIMM Speed", "Equation": "MC_Chy_PCI_PMON_CTR_FIXED / 2", "Obscure": 1, }, }
7,755
Python
.py
101
67.455446
526
0.668279
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,953
jkt_extra.py
andikleen_pmu-tools/ucevent/jkt_extra.py
extra_derived = { # CBO "CBO.LLC_PCIE_MEM_WRITE_BYTES": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "LLC Miss Data from PCIe to memory written", "Desc": "LLC Miss Data from PCIe to memory written", "Equation": "TOR_INSERTS.OPCODE with:Cn_MSR_PMON_BOX_FILTER.opc=0x19e * 64", }, "CBO.LLC_PCIE_MEM_READ_BYTES": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "LLC Miss Data from PCIe read from memory", "Desc": "LLC Miss Data from PCIe read from memory", "Equation": "TOR_INSERTS.OPCODE with:Cn_MSR_PMON_BOX_FILTER.opc=0x19c * 64", }, "CBO.LLC_PCIE_MEM_TOTAL_BYTES": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "LLC Miss Data from PCIe read from memory", "Desc": "LLC Miss Data from PCIe read from memory", "Equation": "LLC_PCIE_MEM_READ_BYTES + LLC_PCIE_MEM_WRITE_BYTES" }, "CBO.LLC_DDIO_MEM_WRITE_BYTES": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "LLC Miss Data from DDIO to memory written", "Desc": "LLC Miss Data from DDIO to memory written", "Equation": "TOR_INSERTS.MISS_OPCODE with:Cn_MSR_PMON_BOX_FILTER.opc=0x19e * 64", }, "CBO.LLC_DDIO_MEM_READ_BYTES": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "LLC Miss Data from PCIe read from memory", "Desc": "LLC Miss Data from PCIe read from memory", "Equation": "TOR_INSERTS.MISS_OPCODE with:Cn_MSR_PMON_BOX_FILTER.opc=0x19c * 64", }, "CBO.LLC_DDIO_MEM_TOTAL_BYTES": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "LLC Miss Data from DDIO read from memory", "Desc": "LLC Miss Data from DDIO read from memory", "Equation": "LLC_DDIO_MEM_READ_BYTES + LLC_DDIO_MEM_WRITE_BYTES" }, "CBO.AVG_LLC_DATA_READ_MISS_LATENCY": { "Box": "CBO", "Category": "CBO CACHE Events", "Defn": "Average LLC data read (demand+prefetch) miss latency (core clocks)", "Desc": "Average LLC data read (demand+prefetch) miss latency (core clocks)", "Equation": "(TOR_OCCUPANCY.MISS_OPCODE / TOR_INSERTS.MISS_OPCODE) with:Cn_MSR_PMON_BOX_FILTER.opc=0x182" }, # PCU "PCU.PCT_FREQ_BAND0": { "Box": "PCU", "Category": "PCU FREQ_RESIDENCY Events", "Defn": "Counts the percent that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.", "Desc": "Frequency Residency", "Notes": "The PMON control registers in the PCU only update on a frequency transition. Changing the measuring threshold during a sample interval may introduce errors in the counts. This is especially true when running at a constant frequency for an extended period of time. There is a corner case here: we set this code on the GV transition. So, if we never GV we will never call this code. This event does not include transition times. It is handled on fast path.", "Equation": "FREQ_BAND0_CYCLES / CLOCKTICKS" }, "PCU.PCT_FREQ_BAND1": { "Box": "PCU", "Category": "PCU FREQ_RESIDENCY Events", "Defn": "Counts the percent that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.", "Desc": "Frequency Residency", "Notes": "The PMON control registers in the PCU only update on a frequency transition. Changing the measuring threshold during a sample interval may introduce errors in the counts. This is especially true when running at a constant frequency for an extended period of time. There is a corner case here: we set this code on the GV transition. So, if we never GV we will never call this code. This event does not include transition times. It is handled on fast path.", "Equation": "FREQ_BAND1_CYCLES / CLOCKTICKS" }, "PCU.PCT_FREQ_BAND2": { "Box": "PCU", "Category": "PCU FREQ_RESIDENCY Events", "Defn": "Counts the percent that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.", "Desc": "Frequency Residency", "Notes": "The PMON control registers in the PCU only update on a frequency transition. Changing the measuring threshold during a sample interval may introduce errors in the counts. This is especially true when running at a constant frequency for an extended period of time. There is a corner case here: we set this code on the GV transition. So, if we never GV we will never call this code. This event does not include transition times. It is handled on fast path.", "Equation": "FREQ_BAND2_CYCLES / CLOCKTICKS" }, "PCU.PCT_FREQ_BAND3": { "Box": "PCU", "Category": "PCU FREQ_RESIDENCY Events", "Defn": "Counts the percent that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.", "Desc": "Frequency Residency", "Notes": "The PMON control registers in the PCU only update on a frequency transition. Changing the measuring threshold during a sample interval may introduce errors in the counts. This is especially true when running at a constant frequency for an extended period of time. There is a corner case here: we set this code on the GV transition. So, if we never GV we will never call this code. This event does not include transition times. It is handled on fast path.", "Equation": "FREQ_BAND3_CYCLES / CLOCKTICKS" }, "QPI_LL.QPI_SPEED": { "Box": "QPI_LL", "Category": "QPI_LL CFCLK Events", "Counters": "0-3", "Defn": "QPI speed - GT/s", "Desc": "QPI speed - GT/s", "Equation": "CLOCKTICKS/NUM_R3QPI*8/1000000000", }, "iMC.DIMM_SPEED": { "Box": "iMC", "Category": "iMC CAS Events", "Defn": "DIMM Speed", "Desc": "DIMM Speed", "Equation": "MC_Chy_PCI_PMON_CTR_FIXED / 2", "Obscure": 1, }, }
7,755
Python
.py
101
67.455446
526
0.668279
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,954
icx_uc.py
andikleen_pmu-tools/ucevent/icx_uc.py
# ICX icx_uc_events.v1.00p.txt icx_uc_derived.v1.00p.txt # aliases aliases = { "M2MAddrMask0": "M2M{i}_PCI_PMON_ADDRMASK0", "UBoxFilter": "U_MSR_PMON_BOX_FILTER", "M2MAddrMatch0": "M2M{i}_PCI_PMON_ADDRMATCH1", "M2MAddrMask1": "M2M{i}_PCI_PMON_ADDRMASK1", "CHAFilter": "C{i}_MSR_PMON_BOX_FILTER", "M2MOpcMask": "M2M{i}_PCI_PMON_OPCMASK", "PCUFilter": "PCU_MSR_PMON_BOX_FILTER", } events = { # PCIE3: "PCIE3.CORR_ERR": { "Box": "PCIE3", "Category": "PCIE3 Misc Events", "Counters": "0-3", "Desc": "Number of Correctable Errors", "EvSel": 31, "ExtSel": "", }, "PCIE3.LINK_CYCLES": { "Box": "PCIE3", "Category": "PCIE3 Link Cycle Events", "Counters": "0-3", "Desc": "Cycles a Link is in a power state or busy/idle", "EvSel": 24, "ExtSel": "", }, "PCIE3.LINK_CYCLES.L0_BUSY.PORT0": { "Box": "PCIE3", "Category": "PCIE3 Link Cycle Events", "Counters": "0-3", "Desc": "Cycles a Link is in a power state or busy/idle", "EvSel": 24, "ExtSel": "", "PortMask": "bxxxxxxx1", "Umask": "bxxxxxxx1", }, "PCIE3.LINK_CYCLES.L0_BUSY.PORT2": { "Box": "PCIE3", "Category": "PCIE3 Link Cycle Events", "Counters": "0-3", "Desc": "Cycles a Link is in a power state or busy/idle", "EvSel": 24, "ExtSel": "", "PortMask": "bxxxxx1xx", "Umask": "bxxxxxxx1", }, "PCIE3.LINK_CYCLES.L0_IDLE.PORT3": { "Box": "PCIE3", "Category": "PCIE3 Link Cycle Events", "Counters": "0-3", "Desc": "Cycles a Link is in a power state or busy/idle", "EvSel": 24, "ExtSel": "", "PortMask": "bxxxx1xxx", "Umask": "bxxxxx1xx", }, "PCIE3.LINK_CYCLES.L1.PORT1": { "Box": "PCIE3", "Category": "PCIE3 Link Cycle Events", "Counters": "0-3", "Desc": "Cycles a Link is in a power state or busy/idle", "EvSel": 24, "ExtSel": "", "PortMask": "bxxxxxx1x", "Umask": "bxxxxxx1x", }, "PCIE3.LINK_CYCLES.L0_IDLE.PORT1": { "Box": "PCIE3", "Category": "PCIE3 Link Cycle Events", "Counters": "0-3", "Desc": "Cycles a Link is in a power state or busy/idle", "EvSel": 24, "ExtSel": "", "PortMask": "bxxxxxx1x", "Umask": "bxxxxx1xx", }, "PCIE3.LINK_CYCLES.L0_IDLE.PORT0": { "Box": "PCIE3", "Category": "PCIE3 Link Cycle Events", "Counters": "0-3", "Desc": "Cycles a Link is in a power state or busy/idle", "EvSel": 24, "ExtSel": "", "PortMask": "bxxxxxxx1", "Umask": "bxxxxx1xx", }, "PCIE3.LINK_CYCLES.L1.PORT0": { "Box": "PCIE3", "Category": "PCIE3 Link Cycle Events", "Counters": "0-3", "Desc": "Cycles a Link is in a power state or busy/idle", "EvSel": 24, "ExtSel": "", "PortMask": "bxxxxxxx1", "Umask": "bxxxxxx1x", }, "PCIE3.LINK_CYCLES.L0_IDLE.PORT2": { "Box": "PCIE3", "Category": "PCIE3 Link Cycle Events", "Counters": "0-3", "Desc": "Cycles a Link is in a power state or busy/idle", "EvSel": 24, "ExtSel": "", "PortMask": "bxxxxx1xx", "Umask": "bxxxxx1xx", }, "PCIE3.LINK_CYCLES.L1.PORT3": { "Box": "PCIE3", "Category": "PCIE3 Link Cycle Events", "Counters": "0-3", "Desc": "Cycles a Link is in a power state or busy/idle", "EvSel": 24, "ExtSel": "", "PortMask": "bxxxx1xxx", "Umask": "bxxxxxx1x", }, "PCIE3.LINK_CYCLES.L1.PORT2": { "Box": "PCIE3", "Category": "PCIE3 Link Cycle Events", "Counters": "0-3", "Desc": "Cycles a Link is in a power state or busy/idle", "EvSel": 24, "ExtSel": "", "PortMask": "bxxxxx1xx", "Umask": "bxxxxxx1x", }, "PCIE3.LINK_CYCLES.L0_BUSY.PORT1": { "Box": "PCIE3", "Category": "PCIE3 Link Cycle Events", "Counters": "0-3", "Desc": "Cycles a Link is in a power state or busy/idle", "EvSel": 24, "ExtSel": "", "PortMask": "bxxxxxx1x", "Umask": "bxxxxxxx1", }, "PCIE3.LINK_CYCLES.L0_BUSY.PORT3": { "Box": "PCIE3", "Category": "PCIE3 Link Cycle Events", "Counters": "0-3", "Desc": "Cycles a Link is in a power state or busy/idle", "EvSel": 24, "ExtSel": "", "PortMask": "bxxxx1xxx", "Umask": "bxxxxxxx1", }, "PCIE3.LINK_RETRIES": { "Box": "PCIE3", "Category": "PCIE3 Misc Events", "Counters": "0-3", "Desc": "Number of Link Retries", "EvSel": 30, "ExtSel": "", }, "PCIE3.UTIL_IN": { "Box": "PCIE3", "Category": "PCIE3 Utilization Events", "Counters": "0-3", "Defn": "Number of Cycles the Inbound Link is Utilized. Utilized is whenever the IP was not idle", "Desc": "Cycles of Inbound Link Utilization", "EvSel": 22, "ExtSel": "", }, "PCIE3.UTIL_IN.PORT0": { "Box": "PCIE3", "Category": "PCIE3 Utilization Events", "Counters": "0-3", "Defn": "Number of Cycles the Inbound Link is Utilized. Utilized is whenever the IP was not idle", "Desc": "Cycles of Inbound Link Utilization", "EvSel": 22, "ExtSel": "", "PortMask": "bxxxxxxx1", "Umask": "bxxxxxxxx", }, "PCIE3.UTIL_IN.PORT1": { "Box": "PCIE3", "Category": "PCIE3 Utilization Events", "Counters": "0-3", "Defn": "Number of Cycles the Inbound Link is Utilized. Utilized is whenever the IP was not idle", "Desc": "Cycles of Inbound Link Utilization", "EvSel": 22, "ExtSel": "", "PortMask": "bxxxxxx1x", "Umask": "bxxxxxxxx", }, "PCIE3.UTIL_IN.PORT2": { "Box": "PCIE3", "Category": "PCIE3 Utilization Events", "Counters": "0-3", "Defn": "Number of Cycles the Inbound Link is Utilized. Utilized is whenever the IP was not idle", "Desc": "Cycles of Inbound Link Utilization", "EvSel": 22, "ExtSel": "", "PortMask": "bxxxxx1xx", "Umask": "bxxxxxxxx", }, "PCIE3.UTIL_IN.PORT3": { "Box": "PCIE3", "Category": "PCIE3 Utilization Events", "Counters": "0-3", "Defn": "Number of Cycles the Inbound Link is Utilized. Utilized is whenever the IP was not idle", "Desc": "Cycles of Inbound Link Utilization", "EvSel": 22, "ExtSel": "", "PortMask": "bxxxx1xxx", "Umask": "bxxxxxxxx", }, "PCIE3.UTIL_OUT": { "Box": "PCIE3", "Category": "PCIE3 Utilization Events", "Counters": "0-3", "Defn": "Number of Cycles the Outbound Link is Utilized. Utilized is whenever the IP was not idle", "Desc": "Cycles of Outbound Link Utilization", "EvSel": 23, "ExtSel": "", }, "PCIE3.UTIL_OUT.PORT0": { "Box": "PCIE3", "Category": "PCIE3 Utilization Events", "Counters": "0-3", "Defn": "Number of Cycles the Outbound Link is Utilized. Utilized is whenever the IP was not idle", "Desc": "Cycles of Outbound Link Utilization", "EvSel": 23, "ExtSel": "", "PortMask": "bxxxxxxx1", "Umask": "bxxxxxxxx", }, "PCIE3.UTIL_OUT.PORT1": { "Box": "PCIE3", "Category": "PCIE3 Utilization Events", "Counters": "0-3", "Defn": "Number of Cycles the Outbound Link is Utilized. Utilized is whenever the IP was not idle", "Desc": "Cycles of Outbound Link Utilization", "EvSel": 23, "ExtSel": "", "PortMask": "bxxxxxx1x", "Umask": "bxxxxxxxx", }, "PCIE3.UTIL_OUT.PORT2": { "Box": "PCIE3", "Category": "PCIE3 Utilization Events", "Counters": "0-3", "Defn": "Number of Cycles the Outbound Link is Utilized. Utilized is whenever the IP was not idle", "Desc": "Cycles of Outbound Link Utilization", "EvSel": 23, "ExtSel": "", "PortMask": "bxxxxx1xx", "Umask": "bxxxxxxxx", }, "PCIE3.UTIL_OUT.PORT3": { "Box": "PCIE3", "Category": "PCIE3 Utilization Events", "Counters": "0-3", "Defn": "Number of Cycles the Outbound Link is Utilized. Utilized is whenever the IP was not idle", "Desc": "Cycles of Outbound Link Utilization", "EvSel": 23, "ExtSel": "", "PortMask": "bxxxx1xxx", "Umask": "bxxxxxxxx", }, # UPI_LL: "UPI_LL.CLOCKTICKS": { "Box": "UPI_LL", "Category": "UPI_LL CFCLK Events", "Counters": "0-3", "Defn": "Counts the number of clocks in the UPI LL. This clock runs at 1/8th the \"GT/s\" speed of the UPI link. For example, a 8GT/s link will have qfclk or 1GHz. Current products do not support dynamic link speeds, so this frequency is fixed.", "Desc": "Number of kfclks", "EvSel": 1, "ExtSel": "", }, "UPI_LL.DIRECT_ATTEMPTS": { "Box": "UPI_LL", "Category": "UPI_LL DIRECT2CORE Events", "Counters": "0-3", "Defn": "Counts the number of DRS packets that we attempted to do direct2core/direct2UPI on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.", "Desc": "Direct packet attempts", "EvSel": 18, "ExtSel": "", }, "UPI_LL.DIRECT_ATTEMPTS.D2C": { "Box": "UPI_LL", "Category": "UPI_LL DIRECT2CORE Events", "Counters": "0-3", "Defn": "Counts the number of DRS packets that we attempted to do direct2core/direct2UPI on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.", "Desc": "Direct packet attempts", "EvSel": 18, "ExtSel": "", "Umask": "bxxxxxxx1", }, "UPI_LL.DIRECT_ATTEMPTS.D2K": { "Box": "UPI_LL", "Category": "UPI_LL DIRECT2CORE Events", "Counters": "0-3", "Defn": "Counts the number of DRS packets that we attempted to do direct2core/direct2UPI on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.", "Desc": "Direct packet attempts", "EvSel": 18, "ExtSel": "", "Umask": "bxxxxxx1x", }, "UPI_LL.FLOWQ_NO_VNA_CRD": { "Box": "UPI_LL", "Category": "UPI_LL LL to M3 Events", "Counters": "0-3", "EvSel": 24, "ExtSel": "", }, "UPI_LL.FLOWQ_NO_VNA_CRD.AK_VNA_EQ0": { "Box": "UPI_LL", "Category": "UPI_LL LL to M3 Events", "Counters": "0-3", "EvSel": 24, "ExtSel": "", "Umask": "bxxx1xxxx", }, "UPI_LL.FLOWQ_NO_VNA_CRD.AK_VNA_EQ2": { "Box": "UPI_LL", "Category": "UPI_LL LL to M3 Events", "Counters": "0-3", "EvSel": 24, "ExtSel": "", "Umask": "bx1xxxxxx", }, "UPI_LL.FLOWQ_NO_VNA_CRD.AD_VNA_EQ1": { "Box": "UPI_LL", "Category": "UPI_LL LL to M3 Events", "Counters": "0-3", "EvSel": 24, "ExtSel": "", "Umask": "bxxxxxx1x", }, "UPI_LL.FLOWQ_NO_VNA_CRD.AD_VNA_EQ2": { "Box": "UPI_LL", "Category": "UPI_LL LL to M3 Events", "Counters": "0-3", "EvSel": 24, "ExtSel": "", "Umask": "bxxxxx1xx", }, "UPI_LL.FLOWQ_NO_VNA_CRD.AD_VNA_EQ0": { "Box": "UPI_LL", "Category": "UPI_LL LL to M3 Events", "Counters": "0-3", "EvSel": 24, "ExtSel": "", "Umask": "bxxxxxxx1", }, "UPI_LL.FLOWQ_NO_VNA_CRD.AK_VNA_EQ3": { "Box": "UPI_LL", "Category": "UPI_LL LL to M3 Events", "Counters": "0-3", "EvSel": 24, "ExtSel": "", "Umask": "b1xxxxxxx", }, "UPI_LL.FLOWQ_NO_VNA_CRD.AK_VNA_EQ1": { "Box": "UPI_LL", "Category": "UPI_LL LL to M3 Events", "Counters": "0-3", "EvSel": 24, "ExtSel": "", "Umask": "bxx1xxxxx", }, "UPI_LL.FLOWQ_NO_VNA_CRD.BL_VNA_EQ0": { "Box": "UPI_LL", "Category": "UPI_LL LL to M3 Events", "Counters": "0-3", "EvSel": 24, "ExtSel": "", "Umask": "bxxxx1xxx", }, "UPI_LL.L1_POWER_CYCLES": { "Box": "UPI_LL", "Category": "UPI_LL POWER Events", "Counters": "0-3", "Defn": "Number of UPI qfclk cycles spent in L1 power mode. L1 is a mode that totally shuts down a UPI link. Use edge detect to count the number of instances when the UPI link entered L1. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. Because L1 totally shuts down the link, it takes a good amount of time to exit this mode.", "Desc": "Cycles in L1", "EvSel": 33, "ExtSel": "", }, "UPI_LL.M3_BYP_BLOCKED": { "Box": "UPI_LL", "Category": "UPI_LL LL to M3 Events", "Counters": "0-3", "EvSel": 20, "ExtSel": "", }, "UPI_LL.M3_BYP_BLOCKED.FLOWQ_AK_VNA_LE3": { "Box": "UPI_LL", "Category": "UPI_LL LL to M3 Events", "Counters": "0-3", "EvSel": 20, "ExtSel": "", "Umask": "bxxxxx1xx", }, "UPI_LL.M3_BYP_BLOCKED.FLOWQ_AD_VNA_LE2": { "Box": "UPI_LL", "Category": "UPI_LL LL to M3 Events", "Counters": "0-3", "EvSel": 20, "ExtSel": "", "Umask": "bxxxxxxx1", }, "UPI_LL.M3_BYP_BLOCKED.FLOWQ_BL_VNA_EQ0": { "Box": "UPI_LL", "Category": "UPI_LL LL to M3 Events", "Counters": "0-3", "EvSel": 20, "ExtSel": "", "Umask": "bxxxxxx1x", }, "UPI_LL.M3_BYP_BLOCKED.GV_BLOCK": { "Box": "UPI_LL", "Category": "UPI_LL LL to M3 Events", "Counters": "0-3", "EvSel": 20, "ExtSel": "", "Umask": "bxxx1xxxx", }, "UPI_LL.M3_BYP_BLOCKED.BGF_CRD": { "Box": "UPI_LL", "Category": "UPI_LL LL to M3 Events", "Counters": "0-3", "EvSel": 20, "ExtSel": "", "Umask": "bxxxx1xxx", }, "UPI_LL.M3_CRD_RETURN_BLOCKED": { "Box": "UPI_LL", "Category": "UPI_LL LL to M3 Events", "Counters": "0-3", "EvSel": 22, "ExtSel": "", }, "UPI_LL.M3_RXQ_BLOCKED": { "Box": "UPI_LL", "Category": "UPI_LL LL to M3 Events", "Counters": "0-3", "EvSel": 21, "ExtSel": "", }, "UPI_LL.M3_RXQ_BLOCKED.FLOWQ_BL_VNA_EQ0": { "Box": "UPI_LL", "Category": "UPI_LL LL to M3 Events", "Counters": "0-3", "EvSel": 21, "ExtSel": "", "Umask": "bxxxxx1xx", }, "UPI_LL.M3_RXQ_BLOCKED.FLOWQ_AK_VNA_LE3": { "Box": "UPI_LL", "Category": "UPI_LL LL to M3 Events", "Counters": "0-3", "EvSel": 21, "ExtSel": "", "Umask": "bxxx1xxxx", }, "UPI_LL.M3_RXQ_BLOCKED.FLOWQ_AD_VNA_LE2": { "Box": "UPI_LL", "Category": "UPI_LL LL to M3 Events", "Counters": "0-3", "EvSel": 21, "ExtSel": "", "Umask": "bxxxxxxx1", }, "UPI_LL.M3_RXQ_BLOCKED.BGF_CRD": { "Box": "UPI_LL", "Category": "UPI_LL LL to M3 Events", "Counters": "0-3", "EvSel": 21, "ExtSel": "", "Umask": "bxx1xxxxx", }, "UPI_LL.M3_RXQ_BLOCKED.FLOWQ_BL_VNA_BTW_0_THRESH": { "Box": "UPI_LL", "Category": "UPI_LL LL to M3 Events", "Counters": "0-3", "EvSel": 21, "ExtSel": "", "Umask": "bxxxx1xxx", }, "UPI_LL.M3_RXQ_BLOCKED.GV_BLOCK": { "Box": "UPI_LL", "Category": "UPI_LL LL to M3 Events", "Counters": "0-3", "EvSel": 21, "ExtSel": "", "Umask": "bx1xxxxxx", }, "UPI_LL.M3_RXQ_BLOCKED.FLOWQ_AD_VNA_BTW_2_THRESH": { "Box": "UPI_LL", "Category": "UPI_LL LL to M3 Events", "Counters": "0-3", "EvSel": 21, "ExtSel": "", "Umask": "bxxxxxx1x", }, "UPI_LL.PHY_INIT_CYCLES": { "Box": "UPI_LL", "Category": "UPI_LL POWER Events", "Counters": "0-3", "Desc": "Cycles where phy is not in L0, L0c, L0p, L1", "EvSel": 32, "ExtSel": "", }, "UPI_LL.POWER_L1_NACK": { "Box": "UPI_LL", "Category": "UPI_LL POWER Events", "Counters": "0-3", "Defn": "Counts the number of times a link sends/receives a LinkReqNAck. When the UPI links would like to change power state, the Tx side initiates a request to the Rx side requesting to change states. This requests can either be accepted or denied. If the Rx side replies with an Ack, the power mode will change. If it replies with NAck, no change will take place. This can be filtered based on Rx and Tx. An Rx LinkReqNAck refers to receiving an NAck (meaning this agent's Tx originally requested the power change). A Tx LinkReqNAck refers to sending this command (meaning the peer agent's Tx originally requested the power change and this agent accepted it).", "Desc": "L1 Req Nack", "EvSel": 35, "ExtSel": "", "Notes": "L1 only", }, "UPI_LL.POWER_L1_REQ": { "Box": "UPI_LL", "Category": "UPI_LL POWER Events", "Counters": "0-3", "Defn": "Counts the number of times a link sends/receives a LinkReqAck. When the UPI links would like to change power state, the Tx side initiates a request to the Rx side requesting to change states. This requests can either be accepted or denied. If the Rx side replies with an Ack, the power mode will change. If it replies with NAck, no change will take place. This can be filtered based on Rx and Tx. An Rx LinkReqAck refers to receiving an Ack (meaning this agent's Tx originally requested the power change). A Tx LinkReqAck refers to sending this command (meaning the peer agent's Tx originally requested the power change and this agent accepted it).", "Desc": "L1 Req (same as L1 Ack).", "EvSel": 34, "ExtSel": "", "Notes": "L1 only", }, "UPI_LL.REQ_SLOT2_FROM_M3": { "Box": "UPI_LL", "Category": "UPI_LL VNA_CREDIT_RETURN Events", "Counters": "0-3", "EvSel": 70, "ExtSel": "", }, "UPI_LL.REQ_SLOT2_FROM_M3.ACK": { "Box": "UPI_LL", "Category": "UPI_LL VNA_CREDIT_RETURN Events", "Counters": "0-3", "EvSel": 70, "ExtSel": "", "Umask": "bxxxx1xxx", }, "UPI_LL.REQ_SLOT2_FROM_M3.VN0": { "Box": "UPI_LL", "Category": "UPI_LL VNA_CREDIT_RETURN Events", "Counters": "0-3", "EvSel": 70, "ExtSel": "", "Umask": "bxxxxxx1x", }, "UPI_LL.REQ_SLOT2_FROM_M3.VN1": { "Box": "UPI_LL", "Category": "UPI_LL VNA_CREDIT_RETURN Events", "Counters": "0-3", "EvSel": 70, "ExtSel": "", "Umask": "bxxxxx1xx", }, "UPI_LL.REQ_SLOT2_FROM_M3.VNA": { "Box": "UPI_LL", "Category": "UPI_LL VNA_CREDIT_RETURN Events", "Counters": "0-3", "EvSel": 70, "ExtSel": "", "Umask": "bxxxxxxx1", }, "UPI_LL.RxL0P_POWER_CYCLES": { "Box": "UPI_LL", "Category": "UPI_LL POWER_RX Events", "Counters": "0-3", "Defn": "Number of UPI qfclk cycles spent in L0p power mode. L0p is a mode where we disable 1/2 of the UPI lanes, decreasing our bandwidth in order to save power. It increases snoop and data transfer latencies and decreases overall bandwidth. This mode can be very useful in NUMA optimized workloads that largely only utilize UPI for snoops and their responses. Use edge detect to count the number of instances when the UPI link entered L0p. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another.", "Desc": "Cycles in L0p", "EvSel": 37, "ExtSel": "", "Notes": "Using .edge_det to count transitions does not function if L1_POWER_CYCLES > 0.", }, "UPI_LL.RxL0_POWER_CYCLES": { "Box": "UPI_LL", "Category": "UPI_LL POWER_RX Events", "Counters": "0-3", "Defn": "Number of UPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.", "Desc": "Cycles in L0", "EvSel": 36, "ExtSel": "", }, "UPI_LL.RxL_BASIC_HDR_MATCH": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Defn": """Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.""", "Desc": "Matches on Receive path of a UPI Port", "EvSel": 5, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "MaxIncCyc": 3, "Notes": "This event is subject to finer grain filtering. See doc for more information. Components (message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", }, "UPI_LL.RxL_BASIC_HDR_MATCH.RSP_NODATA_OPC": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Defn": """Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.""", "Desc": "Matches on Receive path of a UPI Port", "EvSel": 5, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "MaxIncCyc": 3, "Notes": "This event is subject to finer grain filtering. See doc for more information. Components (message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bXXXX1010", "UmaskExt": 0x1, }, "UPI_LL.RxL_BASIC_HDR_MATCH.RSP_DATA_OPC": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Defn": """Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.""", "Desc": "Matches on Receive path of a UPI Port", "EvSel": 5, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "MaxIncCyc": 3, "Notes": "This event is subject to finer grain filtering. See doc for more information. Components (message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bXXXX1100", "UmaskExt": 0x1, }, "UPI_LL.RxL_BASIC_HDR_MATCH.WB": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Defn": """Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.""", "Desc": "Matches on Receive path of a UPI Port", "EvSel": 5, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "MaxIncCyc": 3, "Notes": "This event is subject to finer grain filtering. See doc for more information. Components (message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bxxxx1101", "UmaskExt": 0x0, }, "UPI_LL.RxL_BASIC_HDR_MATCH.RSP_DATA": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Defn": """Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.""", "Desc": "Matches on Receive path of a UPI Port", "EvSel": 5, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "MaxIncCyc": 3, "Notes": "This event is subject to finer grain filtering. See doc for more information. Components (message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bxxxx1100", "UmaskExt": 0x0, }, "UPI_LL.RxL_BASIC_HDR_MATCH.NCB_OPC": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Defn": """Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.""", "Desc": "Matches on Receive path of a UPI Port", "EvSel": 5, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "MaxIncCyc": 3, "Notes": "This event is subject to finer grain filtering. See doc for more information. Components (message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bxxxx1110", "UmaskExt": 0x1, }, "UPI_LL.RxL_BASIC_HDR_MATCH.RSP_NODATA": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Defn": """Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.""", "Desc": "Matches on Receive path of a UPI Port", "EvSel": 5, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "MaxIncCyc": 3, "Notes": "This event is subject to finer grain filtering. See doc for more information. Components (message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bxxxx1010", "UmaskExt": 0x0, }, "UPI_LL.RxL_BASIC_HDR_MATCH.SNP_OPC": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Defn": """Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.""", "Desc": "Matches on Receive path of a UPI Port", "EvSel": 5, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "MaxIncCyc": 3, "Notes": "This event is subject to finer grain filtering. See doc for more information. Components (message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bXXXX1001", "UmaskExt": 0x1, }, "UPI_LL.RxL_BASIC_HDR_MATCH.REQ": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Defn": """Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.""", "Desc": "Matches on Receive path of a UPI Port", "EvSel": 5, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "MaxIncCyc": 3, "Notes": "This event is subject to finer grain filtering. See doc for more information. Components (message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bxxxx1000", "UmaskExt": 0x0, }, "UPI_LL.RxL_BASIC_HDR_MATCH.NCS": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Defn": """Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.""", "Desc": "Matches on Receive path of a UPI Port", "EvSel": 5, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "MaxIncCyc": 3, "Notes": "This event is subject to finer grain filtering. See doc for more information. Components (message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bxxxx1111", "UmaskExt": 0x0, }, "UPI_LL.RxL_BASIC_HDR_MATCH.WB_OPC": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Defn": """Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.""", "Desc": "Matches on Receive path of a UPI Port", "EvSel": 5, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "MaxIncCyc": 3, "Notes": "This event is subject to finer grain filtering. See doc for more information. Components (message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bxxxx1101", "UmaskExt": 0x1, }, "UPI_LL.RxL_BASIC_HDR_MATCH.RSPCNFLT": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Defn": """Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.""", "Desc": "Matches on Receive path of a UPI Port", "EvSel": 5, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "MaxIncCyc": 3, "Notes": "This event is subject to finer grain filtering. See doc for more information. Components (message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "b10101010", "UmaskExt": 0x1, }, "UPI_LL.RxL_BASIC_HDR_MATCH.NCB": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Defn": """Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.""", "Desc": "Matches on Receive path of a UPI Port", "EvSel": 5, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "MaxIncCyc": 3, "Notes": "This event is subject to finer grain filtering. See doc for more information. Components (message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bxxxx1110", "UmaskExt": 0x0, }, "UPI_LL.RxL_BASIC_HDR_MATCH.RSPI": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Defn": """Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.""", "Desc": "Matches on Receive path of a UPI Port", "EvSel": 5, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "MaxIncCyc": 3, "Notes": "This event is subject to finer grain filtering. See doc for more information. Components (message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "b00101010", "UmaskExt": 0x1, }, "UPI_LL.RxL_BASIC_HDR_MATCH.NCS_OPC": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Defn": """Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.""", "Desc": "Matches on Receive path of a UPI Port", "EvSel": 5, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "MaxIncCyc": 3, "Notes": "This event is subject to finer grain filtering. See doc for more information. Components (message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bxxxx1111", "UmaskExt": 0x1, }, "UPI_LL.RxL_BASIC_HDR_MATCH.REQ_OPC": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Defn": """Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.""", "Desc": "Matches on Receive path of a UPI Port", "EvSel": 5, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "MaxIncCyc": 3, "Notes": "This event is subject to finer grain filtering. See doc for more information. Components (message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bXXXX1000", "UmaskExt": 0x1, }, "UPI_LL.RxL_BASIC_HDR_MATCH.SNP": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Defn": """Matches on Receive path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.""", "Desc": "Matches on Receive path of a UPI Port", "EvSel": 5, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "MaxIncCyc": 3, "Notes": "This event is subject to finer grain filtering. See doc for more information. Components (message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bxxxx1001", "UmaskExt": 0x0, }, "UPI_LL.RxL_BYPASSED": { "Box": "UPI_LL", "Category": "UPI_LL RXQ Events", "Counters": "0-3", "Defn": "Counts the number of times that an incoming flit was able to bypass the flit buffer and pass directly across the BGF and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of flits transfered, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.", "Desc": "RxQ Flit Buffer Bypassed", "EvSel": 49, "ExtSel": "", "MaxIncCyc": 3, }, "UPI_LL.RxL_BYPASSED.SLOT2": { "Box": "UPI_LL", "Category": "UPI_LL RXQ Events", "Counters": "0-3", "Defn": "Counts the number of times that an incoming flit was able to bypass the flit buffer and pass directly across the BGF and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of flits transfered, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.", "Desc": "RxQ Flit Buffer Bypassed", "EvSel": 49, "ExtSel": "", "MaxIncCyc": 3, "Umask": "bxxxxx1xx", }, "UPI_LL.RxL_BYPASSED.SLOT1": { "Box": "UPI_LL", "Category": "UPI_LL RXQ Events", "Counters": "0-3", "Defn": "Counts the number of times that an incoming flit was able to bypass the flit buffer and pass directly across the BGF and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of flits transfered, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.", "Desc": "RxQ Flit Buffer Bypassed", "EvSel": 49, "ExtSel": "", "MaxIncCyc": 3, "Umask": "bxxxxxx1x", }, "UPI_LL.RxL_BYPASSED.SLOT0": { "Box": "UPI_LL", "Category": "UPI_LL RXQ Events", "Counters": "0-3", "Defn": "Counts the number of times that an incoming flit was able to bypass the flit buffer and pass directly across the BGF and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of flits transfered, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.", "Desc": "RxQ Flit Buffer Bypassed", "EvSel": 49, "ExtSel": "", "MaxIncCyc": 3, "Umask": "bxxxxxxx1", }, "UPI_LL.RxL_CRC_ERRORS": { "Box": "UPI_LL", "Category": "UPI_LL CRC_ERRORS_RX Events", "Counters": "0-3", "Defn": "Number of CRC errors detected in the UPI Agent. Each UPI flit incorporates 8 bits of CRC for error detection. This counts the number of flits where the CRC was able to detect an error. After an error has been detected, the UPI agent will send a request to the transmitting socket to resend the flit (as well as any flits that came after it).", "Desc": "CRC Errors Detected", "EvSel": 11, "ExtSel": "", }, "UPI_LL.RxL_CRC_LLR_REQ_TRANSMIT": { "Box": "UPI_LL", "Category": "UPI_LL CRC_ERRORS_RX Events", "Counters": "0-3", "Defn": "Number of LLR Requests were transmitted. This should generally be <= the number of CRC errors detected. If multiple errors are detected before the Rx side receives a LLC_REQ_ACK from the Tx side, there is no need to send more LLR_REQ_NACKs.", "Desc": "LLR Requests Sent", "EvSel": 8, "ExtSel": "", "Notes": "We detected an error in Rx, and so we transmit to enter LLR mode. If we get an error and we ahave not yet recv'd the LLR_ACK, we will not send another request (unless we timeout when we will send another).", }, "UPI_LL.RxL_CREDITS_CONSUMED_VN0": { "Box": "UPI_LL", "Category": "UPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN0 Credit Consumed", "EvSel": 57, "ExtSel": "", }, "UPI_LL.RxL_CREDITS_CONSUMED_VN1": { "Box": "UPI_LL", "Category": "UPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN1 Credit Consumed", "EvSel": 58, "ExtSel": "", }, "UPI_LL.RxL_CREDITS_CONSUMED_VNA": { "Box": "UPI_LL", "Category": "UPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VNA credit was consumed (i.e. message uses a VNA credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VNA Credit Consumed", "EvSel": 56, "ExtSel": "", }, "UPI_LL.RxL_FLITS": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Received", "EvSel": 3, "ExtSel": "", "MaxIncCyc": 3, "Notes": "When Umask is set to all 1's then all Flits should be counted as 3 since a full flit is counted for each valid slot. By counting all legal flit time we exclude impact of L0p, L0c, and the 5/6 ratio in L0. Slot 0 Dual is counted in slot 0 and slot 1 (as a protocol header)", }, "UPI_LL.RxL_FLITS.SLOT1": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Received", "EvSel": 3, "ExtSel": "", "MaxIncCyc": 3, "Notes": "When Umask is set to all 1's then all Flits should be counted as 3 since a full flit is counted for each valid slot. By counting all legal flit time we exclude impact of L0p, L0c, and the 5/6 ratio in L0. Slot 0 Dual is counted in slot 0 and slot 1 (as a protocol header)", "Umask": "bxxxxxx1x", }, "UPI_LL.RxL_FLITS.NON_DATA": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Received", "EvSel": 3, "ExtSel": "", "MaxIncCyc": 3, "Notes": "When Umask is set to all 1's then all Flits should be counted as 3 since a full flit is counted for each valid slot. By counting all legal flit time we exclude impact of L0p, L0c, and the 5/6 ratio in L0. Slot 0 Dual is counted in slot 0 and slot 1 (as a protocol header)", "Umask": "b10010111", }, "UPI_LL.RxL_FLITS.DATA": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Received", "EvSel": 3, "ExtSel": "", "MaxIncCyc": 3, "Notes": "When Umask is set to all 1's then all Flits should be counted as 3 since a full flit is counted for each valid slot. By counting all legal flit time we exclude impact of L0p, L0c, and the 5/6 ratio in L0. Slot 0 Dual is counted in slot 0 and slot 1 (as a protocol header)", "Umask": "bxxxx1xxx", }, "UPI_LL.RxL_FLITS.LLCTRL": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Received", "EvSel": 3, "ExtSel": "", "MaxIncCyc": 3, "Notes": "When Umask is set to all 1's then all Flits should be counted as 3 since a full flit is counted for each valid slot. By counting all legal flit time we exclude impact of L0p, L0c, and the 5/6 ratio in L0. Slot 0 Dual is counted in slot 0 and slot 1 (as a protocol header)", "Umask": "bx1xxxxxx", }, "UPI_LL.RxL_FLITS.ALL_DATA": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Received", "EvSel": 3, "ExtSel": "", "MaxIncCyc": 3, "Notes": "When Umask is set to all 1's then all Flits should be counted as 3 since a full flit is counted for each valid slot. By counting all legal flit time we exclude impact of L0p, L0c, and the 5/6 ratio in L0. Slot 0 Dual is counted in slot 0 and slot 1 (as a protocol header)", "Umask": "b00001111", }, "UPI_LL.RxL_FLITS.SLOT2": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Received", "EvSel": 3, "ExtSel": "", "MaxIncCyc": 3, "Notes": "When Umask is set to all 1's then all Flits should be counted as 3 since a full flit is counted for each valid slot. By counting all legal flit time we exclude impact of L0p, L0c, and the 5/6 ratio in L0. Slot 0 Dual is counted in slot 0 and slot 1 (as a protocol header)", "Umask": "bxxxxx1xx", }, "UPI_LL.RxL_FLITS.PROTHDR": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Received", "EvSel": 3, "ExtSel": "", "MaxIncCyc": 3, "Notes": "When Umask is set to all 1's then all Flits should be counted as 3 since a full flit is counted for each valid slot. By counting all legal flit time we exclude impact of L0p, L0c, and the 5/6 ratio in L0. Slot 0 Dual is counted in slot 0 and slot 1 (as a protocol header)", "Umask": "b1xxxxxxx", }, "UPI_LL.RxL_FLITS.ALL_NULL": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Received", "EvSel": 3, "ExtSel": "", "MaxIncCyc": 3, "Notes": "When Umask is set to all 1's then all Flits should be counted as 3 since a full flit is counted for each valid slot. By counting all legal flit time we exclude impact of L0p, L0c, and the 5/6 ratio in L0. Slot 0 Dual is counted in slot 0 and slot 1 (as a protocol header)", "Umask": "b00100111", }, "UPI_LL.RxL_FLITS.IDLE": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Received", "EvSel": 3, "ExtSel": "", "MaxIncCyc": 3, "Notes": "When Umask is set to all 1's then all Flits should be counted as 3 since a full flit is counted for each valid slot. By counting all legal flit time we exclude impact of L0p, L0c, and the 5/6 ratio in L0. Slot 0 Dual is counted in slot 0 and slot 1 (as a protocol header)", "Umask": "b01000111", }, "UPI_LL.RxL_FLITS.NULL": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Received", "EvSel": 3, "ExtSel": "", "MaxIncCyc": 3, "Notes": "When Umask is set to all 1's then all Flits should be counted as 3 since a full flit is counted for each valid slot. By counting all legal flit time we exclude impact of L0p, L0c, and the 5/6 ratio in L0. Slot 0 Dual is counted in slot 0 and slot 1 (as a protocol header)", "Umask": "bxx1xxxxx", }, "UPI_LL.RxL_FLITS.SLOT0": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Received", "EvSel": 3, "ExtSel": "", "MaxIncCyc": 3, "Notes": "When Umask is set to all 1's then all Flits should be counted as 3 since a full flit is counted for each valid slot. By counting all legal flit time we exclude impact of L0p, L0c, and the 5/6 ratio in L0. Slot 0 Dual is counted in slot 0 and slot 1 (as a protocol header)", "Umask": "bxxxxxxx1", }, "UPI_LL.RxL_FLITS.LLCRD": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Received", "EvSel": 3, "ExtSel": "", "MaxIncCyc": 3, "Notes": "When Umask is set to all 1's then all Flits should be counted as 3 since a full flit is counted for each valid slot. By counting all legal flit time we exclude impact of L0p, L0c, and the 5/6 ratio in L0. Slot 0 Dual is counted in slot 0 and slot 1 (as a protocol header)", "Umask": "bxxx1xxxx", }, "UPI_LL.RxL_INSERTS": { "Box": "UPI_LL", "Category": "UPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.", "Desc": "RxQ Flit Buffer Allocations", "EvSel": 48, "ExtSel": "", "MaxIncCyc": 3, }, "UPI_LL.RxL_INSERTS.SLOT1": { "Box": "UPI_LL", "Category": "UPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.", "Desc": "RxQ Flit Buffer Allocations", "EvSel": 48, "ExtSel": "", "MaxIncCyc": 3, "Umask": "bxxxxxx1x", }, "UPI_LL.RxL_INSERTS.SLOT0": { "Box": "UPI_LL", "Category": "UPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.", "Desc": "RxQ Flit Buffer Allocations", "EvSel": 48, "ExtSel": "", "MaxIncCyc": 3, "Umask": "bxxxxxxx1", }, "UPI_LL.RxL_INSERTS.SLOT2": { "Box": "UPI_LL", "Category": "UPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.", "Desc": "RxQ Flit Buffer Allocations", "EvSel": 48, "ExtSel": "", "MaxIncCyc": 3, "Umask": "bxxxxx1xx", }, "UPI_LL.RxL_OCCUPANCY": { "Box": "UPI_LL", "Category": "UPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the UPI RxQ in each cycle. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.", "Desc": "RxQ Occupancy - All Packets", "EvSel": 50, "ExtSel": "", "MaxIncCyc": 128, }, "UPI_LL.RxL_OCCUPANCY.SLOT2": { "Box": "UPI_LL", "Category": "UPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the UPI RxQ in each cycle. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.", "Desc": "RxQ Occupancy - All Packets", "EvSel": 50, "ExtSel": "", "MaxIncCyc": 128, "Umask": "bxxxxx1xx", }, "UPI_LL.RxL_OCCUPANCY.SLOT1": { "Box": "UPI_LL", "Category": "UPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the UPI RxQ in each cycle. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.", "Desc": "RxQ Occupancy - All Packets", "EvSel": 50, "ExtSel": "", "MaxIncCyc": 128, "Umask": "bxxxxxx1x", }, "UPI_LL.RxL_OCCUPANCY.SLOT0": { "Box": "UPI_LL", "Category": "UPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the UPI RxQ in each cycle. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.", "Desc": "RxQ Occupancy - All Packets", "EvSel": 50, "ExtSel": "", "MaxIncCyc": 128, "Umask": "bxxxxxxx1", }, "UPI_LL.RxL_SLOT_BYPASS": { "Box": "UPI_LL", "Category": "UPI_LL RXQ Events", "Counters": "0-3", "EvSel": 51, "ExtSel": "", }, "UPI_LL.RxL_SLOT_BYPASS.S2_RXQ1": { "Box": "UPI_LL", "Category": "UPI_LL RXQ Events", "Counters": "0-3", "EvSel": 51, "ExtSel": "", "Umask": "bxx1xxxxx", }, "UPI_LL.RxL_SLOT_BYPASS.S0_RXQ1": { "Box": "UPI_LL", "Category": "UPI_LL RXQ Events", "Counters": "0-3", "EvSel": 51, "ExtSel": "", "Umask": "bxxxxxxx1", }, "UPI_LL.RxL_SLOT_BYPASS.S1_RXQ2": { "Box": "UPI_LL", "Category": "UPI_LL RXQ Events", "Counters": "0-3", "EvSel": 51, "ExtSel": "", "Umask": "bxxxx1xxx", }, "UPI_LL.RxL_SLOT_BYPASS.S2_RXQ0": { "Box": "UPI_LL", "Category": "UPI_LL RXQ Events", "Counters": "0-3", "EvSel": 51, "ExtSel": "", "Umask": "bxxx1xxxx", }, "UPI_LL.RxL_SLOT_BYPASS.S1_RXQ0": { "Box": "UPI_LL", "Category": "UPI_LL RXQ Events", "Counters": "0-3", "EvSel": 51, "ExtSel": "", "Umask": "bxxxxx1xx", }, "UPI_LL.RxL_SLOT_BYPASS.S0_RXQ2": { "Box": "UPI_LL", "Category": "UPI_LL RXQ Events", "Counters": "0-3", "EvSel": 51, "ExtSel": "", "Umask": "bxxxxxx1x", }, "UPI_LL.TxL0P_CLK_ACTIVE": { "Box": "UPI_LL", "Category": "UPI_LL POWER_TX Events", "Counters": "0-3", "EvSel": 42, "ExtSel": "", }, "UPI_LL.TxL0P_CLK_ACTIVE.SPARE": { "Box": "UPI_LL", "Category": "UPI_LL POWER_TX Events", "Counters": "0-3", "EvSel": 42, "ExtSel": "", "Umask": "b1xxxxxxx", }, "UPI_LL.TxL0P_CLK_ACTIVE.TXQ": { "Box": "UPI_LL", "Category": "UPI_LL POWER_TX Events", "Counters": "0-3", "EvSel": 42, "ExtSel": "", "Umask": "bxxx1xxxx", }, "UPI_LL.TxL0P_CLK_ACTIVE.CFG_CTL": { "Box": "UPI_LL", "Category": "UPI_LL POWER_TX Events", "Counters": "0-3", "EvSel": 42, "ExtSel": "", "Umask": "bxxxxxxx1", }, "UPI_LL.TxL0P_CLK_ACTIVE.RXQ_CRED": { "Box": "UPI_LL", "Category": "UPI_LL POWER_TX Events", "Counters": "0-3", "EvSel": 42, "ExtSel": "", "Umask": "bxxxx1xxx", }, "UPI_LL.TxL0P_CLK_ACTIVE.RXQ": { "Box": "UPI_LL", "Category": "UPI_LL POWER_TX Events", "Counters": "0-3", "EvSel": 42, "ExtSel": "", "Umask": "bxxxxxx1x", }, "UPI_LL.TxL0P_CLK_ACTIVE.RXQ_BYPASS": { "Box": "UPI_LL", "Category": "UPI_LL POWER_TX Events", "Counters": "0-3", "EvSel": 42, "ExtSel": "", "Umask": "bxxxxx1xx", }, "UPI_LL.TxL0P_CLK_ACTIVE.RETRY": { "Box": "UPI_LL", "Category": "UPI_LL POWER_TX Events", "Counters": "0-3", "EvSel": 42, "ExtSel": "", "Umask": "bxx1xxxxx", }, "UPI_LL.TxL0P_CLK_ACTIVE.DFX": { "Box": "UPI_LL", "Category": "UPI_LL POWER_TX Events", "Counters": "0-3", "EvSel": 42, "ExtSel": "", "Umask": "bx1xxxxxx", }, "UPI_LL.TxL0P_POWER_CYCLES": { "Box": "UPI_LL", "Category": "UPI_LL POWER_TX Events", "Counters": "0-3", "Defn": "Number of UPI qfclk cycles spent in L0p power mode. L0p is a mode where we disable 1/2 of the UPI lanes, decreasing our bandwidth in order to save power. It increases snoop and data transfer latencies and decreases overall bandwidth. This mode can be very useful in NUMA optimized workloads that largely only utilize UPI for snoops and their responses. Use edge detect to count the number of instances when the UPI link entered L0p. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another.", "Desc": "Cycles in L0p", "EvSel": 39, "ExtSel": "", "Notes": "Using .edge_det to count transitions does not function if L1_POWER_CYCLES > 0.", }, "UPI_LL.TxL0P_POWER_CYCLES_LL_ENTER": { "Box": "UPI_LL", "Category": "UPI_LL POWER_TX Events", "Counters": "0-3", "EvSel": 40, "ExtSel": "", }, "UPI_LL.TxL0P_POWER_CYCLES_M3_EXIT": { "Box": "UPI_LL", "Category": "UPI_LL POWER_TX Events", "Counters": "0-3", "EvSel": 41, "ExtSel": "", }, "UPI_LL.TxL0_POWER_CYCLES": { "Box": "UPI_LL", "Category": "UPI_LL POWER_TX Events", "Counters": "0-3", "Defn": "Number of UPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.", "Desc": "Cycles in L0", "EvSel": 38, "ExtSel": "", "Notes": "Includes L0p cycles. To get just L0, subtract TxL0P_POWER_CYCLES", }, "UPI_LL.TxL_BASIC_HDR_MATCH": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Defn": """Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.""", "Desc": "Matches on Transmit path of a UPI Port", "EvSel": 4, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "MaxIncCyc": 3, "Notes": "This event is subject to finer grain filtering. See doc for more information. Components (message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", }, "UPI_LL.TxL_BASIC_HDR_MATCH.RSP_DATA_OPC": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Defn": """Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.""", "Desc": "Matches on Transmit path of a UPI Port", "EvSel": 4, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "MaxIncCyc": 3, "Notes": "This event is subject to finer grain filtering. See doc for more information. Components (message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bXXXX1100", "UmaskExt": 0x1, }, "UPI_LL.TxL_BASIC_HDR_MATCH.WB": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Defn": """Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.""", "Desc": "Matches on Transmit path of a UPI Port", "EvSel": 4, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "MaxIncCyc": 3, "Notes": "This event is subject to finer grain filtering. See doc for more information. Components (message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bxxxx1101", "UmaskExt": 0x0, }, "UPI_LL.TxL_BASIC_HDR_MATCH.RSP_NODATA_OPC": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Defn": """Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.""", "Desc": "Matches on Transmit path of a UPI Port", "EvSel": 4, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "MaxIncCyc": 3, "Notes": "This event is subject to finer grain filtering. See doc for more information. Components (message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bXXXX1010", "UmaskExt": 0x1, }, "UPI_LL.TxL_BASIC_HDR_MATCH.WB_OPC": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Defn": """Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.""", "Desc": "Matches on Transmit path of a UPI Port", "EvSel": 4, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "MaxIncCyc": 3, "Notes": "This event is subject to finer grain filtering. See doc for more information. Components (message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bxxxx1101", "UmaskExt": 0x1, }, "UPI_LL.TxL_BASIC_HDR_MATCH.RSPCNFLT": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Defn": """Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.""", "Desc": "Matches on Transmit path of a UPI Port", "EvSel": 4, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "MaxIncCyc": 3, "Notes": "This event is subject to finer grain filtering. See doc for more information. Components (message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "b10101010", "UmaskExt": 0x1, }, "UPI_LL.TxL_BASIC_HDR_MATCH.NCB": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Defn": """Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.""", "Desc": "Matches on Transmit path of a UPI Port", "EvSel": 4, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "MaxIncCyc": 3, "Notes": "This event is subject to finer grain filtering. See doc for more information. Components (message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bxxxx1110", "UmaskExt": 0x0, }, "UPI_LL.TxL_BASIC_HDR_MATCH.RSPI": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Defn": """Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.""", "Desc": "Matches on Transmit path of a UPI Port", "EvSel": 4, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "MaxIncCyc": 3, "Notes": "This event is subject to finer grain filtering. See doc for more information. Components (message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "b00101010", "UmaskExt": 0x1, }, "UPI_LL.TxL_BASIC_HDR_MATCH.NCS_OPC": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Defn": """Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.""", "Desc": "Matches on Transmit path of a UPI Port", "EvSel": 4, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "MaxIncCyc": 3, "Notes": "This event is subject to finer grain filtering. See doc for more information. Components (message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bxxxx1111", "UmaskExt": 0x1, }, "UPI_LL.TxL_BASIC_HDR_MATCH.REQ_OPC": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Defn": """Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.""", "Desc": "Matches on Transmit path of a UPI Port", "EvSel": 4, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "MaxIncCyc": 3, "Notes": "This event is subject to finer grain filtering. See doc for more information. Components (message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bXXXX1000", "UmaskExt": 0x1, }, "UPI_LL.TxL_BASIC_HDR_MATCH.SNP": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Defn": """Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.""", "Desc": "Matches on Transmit path of a UPI Port", "EvSel": 4, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "MaxIncCyc": 3, "Notes": "This event is subject to finer grain filtering. See doc for more information. Components (message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bxxxx1001", "UmaskExt": 0x0, }, "UPI_LL.TxL_BASIC_HDR_MATCH.RSP_DATA": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Defn": """Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.""", "Desc": "Matches on Transmit path of a UPI Port", "EvSel": 4, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "MaxIncCyc": 3, "Notes": "This event is subject to finer grain filtering. See doc for more information. Components (message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bxxxx1100", "UmaskExt": 0x0, }, "UPI_LL.TxL_BASIC_HDR_MATCH.NCB_OPC": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Defn": """Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.""", "Desc": "Matches on Transmit path of a UPI Port", "EvSel": 4, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "MaxIncCyc": 3, "Notes": "This event is subject to finer grain filtering. See doc for more information. Components (message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bxxxx1110", "UmaskExt": 0x1, }, "UPI_LL.TxL_BASIC_HDR_MATCH.REQ": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Defn": """Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.""", "Desc": "Matches on Transmit path of a UPI Port", "EvSel": 4, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "MaxIncCyc": 3, "Notes": "This event is subject to finer grain filtering. See doc for more information. Components (message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bxxxx1000", "UmaskExt": 0x0, }, "UPI_LL.TxL_BASIC_HDR_MATCH.SNP_OPC": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Defn": """Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.""", "Desc": "Matches on Transmit path of a UPI Port", "EvSel": 4, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "MaxIncCyc": 3, "Notes": "This event is subject to finer grain filtering. See doc for more information. Components (message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bXXXX1001", "UmaskExt": 0x1, }, "UPI_LL.TxL_BASIC_HDR_MATCH.RSP_NODATA": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Defn": """Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.""", "Desc": "Matches on Transmit path of a UPI Port", "EvSel": 4, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "MaxIncCyc": 3, "Notes": "This event is subject to finer grain filtering. See doc for more information. Components (message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bxxxx1010", "UmaskExt": 0x0, }, "UPI_LL.TxL_BASIC_HDR_MATCH.NCS": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Defn": """Matches on Transmit path of a UPI port. Match based on UMask specific bits: Z: Message Class (3-bit) Y: Message Class Enable W: Opcode (4-bit) V: Opcode Enable U: Local Enable T: Remote Enable S: Data Hdr Enable R: Non-Data Hdr Enable Q: Dual Slot Hdr Enable P: Single Slot Hdr Enable Link Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases. Note: If Message Class is disabled, we expect opcode to also be disabled.""", "Desc": "Matches on Transmit path of a UPI Port", "EvSel": 4, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "MaxIncCyc": 3, "Notes": "This event is subject to finer grain filtering. See doc for more information. Components (message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bxxxx1111", "UmaskExt": 0x0, }, "UPI_LL.TxL_BYPASSED": { "Box": "UPI_LL", "Category": "UPI_LL TXQ Events", "Counters": "0-3", "Defn": "Counts the number of times that an incoming flit was able to bypass the Tx flit buffer and pass directly out the UPI Link. Generally, when data is transmitted across UPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link.", "Desc": "Tx Flit Buffer Bypassed", "EvSel": 65, "ExtSel": "", }, "UPI_LL.TxL_FLITS": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Sent", "EvSel": 2, "ExtSel": "", "MaxIncCyc": 3, "Notes": "You can OR any of 5 MSB together and apply against any combination of slots and they will be added together, but a slot MUST be selected.", }, "UPI_LL.TxL_FLITS.IDLE": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Sent", "EvSel": 2, "ExtSel": "", "MaxIncCyc": 3, "Notes": "You can OR any of 5 MSB together and apply against any combination of slots and they will be added together, but a slot MUST be selected.", "Umask": "b01000111", }, "UPI_LL.TxL_FLITS.SLOT0": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Sent", "EvSel": 2, "ExtSel": "", "MaxIncCyc": 3, "Notes": "You can OR any of 5 MSB together and apply against any combination of slots and they will be added together, but a slot MUST be selected.", "Umask": "bxxxxxxx1", }, "UPI_LL.TxL_FLITS.NULL": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Sent", "EvSel": 2, "ExtSel": "", "MaxIncCyc": 3, "Notes": "You can OR any of 5 MSB together and apply against any combination of slots and they will be added together, but a slot MUST be selected.", "Umask": "bxx1xxxxx", }, "UPI_LL.TxL_FLITS.LLCRD": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Sent", "EvSel": 2, "ExtSel": "", "MaxIncCyc": 3, "Notes": "You can OR any of 5 MSB together and apply against any combination of slots and they will be added together, but a slot MUST be selected.", "Umask": "bxxx1xxxx", }, "UPI_LL.TxL_FLITS.SLOT2": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Sent", "EvSel": 2, "ExtSel": "", "MaxIncCyc": 3, "Notes": "You can OR any of 5 MSB together and apply against any combination of slots and they will be added together, but a slot MUST be selected.", "Umask": "bxxxxx1xx", }, "UPI_LL.TxL_FLITS.ALL_DATA": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Sent", "EvSel": 2, "ExtSel": "", "MaxIncCyc": 3, "Notes": "You can OR any of 5 MSB together and apply against any combination of slots and they will be added together, but a slot MUST be selected.", "Umask": "b00001111", }, "UPI_LL.TxL_FLITS.PROTHDR": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Sent", "EvSel": 2, "ExtSel": "", "MaxIncCyc": 3, "Notes": "You can OR any of 5 MSB together and apply against any combination of slots and they will be added together, but a slot MUST be selected.", "Umask": "b1xxxxxxx", }, "UPI_LL.TxL_FLITS.ALL_NULL": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Sent", "EvSel": 2, "ExtSel": "", "MaxIncCyc": 3, "Notes": "You can OR any of 5 MSB together and apply against any combination of slots and they will be added together, but a slot MUST be selected.", "Umask": "b00100111", }, "UPI_LL.TxL_FLITS.NON_DATA": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Sent", "EvSel": 2, "ExtSel": "", "MaxIncCyc": 3, "Notes": "You can OR any of 5 MSB together and apply against any combination of slots and they will be added together, but a slot MUST be selected.", "Umask": "b10010111", }, "UPI_LL.TxL_FLITS.LLCTRL": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Sent", "EvSel": 2, "ExtSel": "", "MaxIncCyc": 3, "Notes": "You can OR any of 5 MSB together and apply against any combination of slots and they will be added together, but a slot MUST be selected.", "Umask": "bx1xxxxxx", }, "UPI_LL.TxL_FLITS.DATA": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Sent", "EvSel": 2, "ExtSel": "", "MaxIncCyc": 3, "Notes": "You can OR any of 5 MSB together and apply against any combination of slots and they will be added together, but a slot MUST be selected.", "Umask": "bxxxx1xxx", }, "UPI_LL.TxL_FLITS.SLOT1": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Sent", "EvSel": 2, "ExtSel": "", "MaxIncCyc": 3, "Notes": "You can OR any of 5 MSB together and apply against any combination of slots and they will be added together, but a slot MUST be selected.", "Umask": "bxxxxxx1x", }, "UPI_LL.TxL_INSERTS": { "Box": "UPI_LL", "Category": "UPI_LL TXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the UPI Tx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.", "Desc": "Tx Flit Buffer Allocations", "EvSel": 64, "ExtSel": "", }, "UPI_LL.TxL_OCCUPANCY": { "Box": "UPI_LL", "Category": "UPI_LL TXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of flits in the TxQ. Generally, when data is transmitted across UPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This can be used with the cycles not empty event to track average occupancy, or the allocations event to track average lifetime in the TxQ.", "Desc": "Tx Flit Buffer Occupancy", "EvSel": 66, "ExtSel": "", "MaxIncCyc": 32, }, "UPI_LL.VNA_CREDIT_RETURN_BLOCKED_VN01": { "Box": "UPI_LL", "Category": "UPI_LL VNA_CREDIT_RETURN Events", "Counters": "0-3", "EvSel": 69, "ExtSel": "", }, "UPI_LL.VNA_CREDIT_RETURN_OCCUPANCY": { "Box": "UPI_LL", "Category": "UPI_LL VNA_CREDIT_RETURN Events", "Counters": "0-3", "Defn": "Number of VNA credits in the Rx side that are waitng to be returned back across the link.", "Desc": "VNA Credits Pending Return - Occupancy", "EvSel": 68, "ExtSel": "", "MaxIncCyc": 128, }, # CHA: "CHA.AG0_AD_CRD_ACQUIRED0": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", }, "CHA.AG0_AD_CRD_ACQUIRED0.TGR4": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxx1xxxx", }, "CHA.AG0_AD_CRD_ACQUIRED0.TGR5": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxx1xxxxx", }, "CHA.AG0_AD_CRD_ACQUIRED0.TGR3": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxx1xxx", }, "CHA.AG0_AD_CRD_ACQUIRED0.TGR7": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "b1xxxxxxx", }, "CHA.AG0_AD_CRD_ACQUIRED0.TGR6": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bx1xxxxxx", }, "CHA.AG0_AD_CRD_ACQUIRED0.TGR1": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxxxx1x", }, "CHA.AG0_AD_CRD_ACQUIRED0.TGR0": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxxxxx1", }, "CHA.AG0_AD_CRD_ACQUIRED0.TGR2": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxxx1xx", }, "CHA.AG0_AD_CRD_ACQUIRED1": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 129, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected. Extension not used by ICX.", }, "CHA.AG0_AD_CRD_ACQUIRED1.TGR8": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 129, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected. Extension not used by ICX.", "Umask": "bxxxxxxx1", }, "CHA.AG0_AD_CRD_ACQUIRED1.TGR10": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 129, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected. Extension not used by ICX.", "Umask": "bxxxxx1xx", }, "CHA.AG0_AD_CRD_ACQUIRED1.TGR9": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 129, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected. Extension not used by ICX.", "Umask": "bxxxxxx1x", }, "CHA.AG0_AD_CRD_OCCUPANCY0": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", }, "CHA.AG0_AD_CRD_OCCUPANCY0.TGR0": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b00000001", }, "CHA.AG0_AD_CRD_OCCUPANCY0.TGR1": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b00000010", }, "CHA.AG0_AD_CRD_OCCUPANCY0.TGR2": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b00000100", }, "CHA.AG0_AD_CRD_OCCUPANCY0.TGR4": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b00010000", }, "CHA.AG0_AD_CRD_OCCUPANCY0.TGR5": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b00100000", }, "CHA.AG0_AD_CRD_OCCUPANCY0.TGR6": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b01000000", }, "CHA.AG0_AD_CRD_OCCUPANCY0.TGR7": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b10000000", }, "CHA.AG0_AD_CRD_OCCUPANCY0.TGR3": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b00001000", }, "CHA.AG0_AD_CRD_OCCUPANCY1": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 131, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected. Extension not used by ICX.", }, "CHA.AG0_AD_CRD_OCCUPANCY1.TGR8": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 131, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected. Extension not used by ICX.", "Umask": "b00000001", }, "CHA.AG0_AD_CRD_OCCUPANCY1.TGR10": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 131, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected. Extension not used by ICX.", "Umask": "b00000100", }, "CHA.AG0_AD_CRD_OCCUPANCY1.TGR9": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 131, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected. Extension not used by ICX.", "Umask": "b00000010", }, "CHA.AG0_BL_CRD_ACQUIRED0": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", }, "CHA.AG0_BL_CRD_ACQUIRED0.TGR1": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.AG0_BL_CRD_ACQUIRED0.TGR0": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.AG0_BL_CRD_ACQUIRED0.TGR2": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.AG0_BL_CRD_ACQUIRED0.TGR4": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.AG0_BL_CRD_ACQUIRED0.TGR5": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.AG0_BL_CRD_ACQUIRED0.TGR3": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.AG0_BL_CRD_ACQUIRED0.TGR7": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.AG0_BL_CRD_ACQUIRED0.TGR6": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.AG0_BL_CRD_ACQUIRED1": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 137, "ExtSel": "", "Notes": "Extension not used by ICX.", }, "CHA.AG0_BL_CRD_ACQUIRED1.TGR9": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 137, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxxx1x", }, "CHA.AG0_BL_CRD_ACQUIRED1.TGR10": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 137, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxx1xx", }, "CHA.AG0_BL_CRD_ACQUIRED1.TGR8": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 137, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxxxx1", }, "CHA.AG0_BL_CRD_OCCUPANCY0": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", }, "CHA.AG0_BL_CRD_OCCUPANCY0.TGR4": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "b00010000", }, "CHA.AG0_BL_CRD_OCCUPANCY0.TGR5": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "b00100000", }, "CHA.AG0_BL_CRD_OCCUPANCY0.TGR3": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "b00001000", }, "CHA.AG0_BL_CRD_OCCUPANCY0.TGR7": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "b10000000", }, "CHA.AG0_BL_CRD_OCCUPANCY0.TGR6": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "b01000000", }, "CHA.AG0_BL_CRD_OCCUPANCY0.TGR1": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "b00000010", }, "CHA.AG0_BL_CRD_OCCUPANCY0.TGR0": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "b00000001", }, "CHA.AG0_BL_CRD_OCCUPANCY0.TGR2": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "b00000100", }, "CHA.AG0_BL_CRD_OCCUPANCY1": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 139, "ExtSel": "", "Notes": "Extension not used by ICX.", }, "CHA.AG0_BL_CRD_OCCUPANCY1.TGR10": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 139, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "b00000100", }, "CHA.AG0_BL_CRD_OCCUPANCY1.TGR9": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 139, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "b00000010", }, "CHA.AG0_BL_CRD_OCCUPANCY1.TGR8": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 139, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "b00000001", }, "CHA.AG1_AD_CRD_ACQUIRED0": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", }, "CHA.AG1_AD_CRD_ACQUIRED0.TGR4": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxx1xxxx", }, "CHA.AG1_AD_CRD_ACQUIRED0.TGR5": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxx1xxxxx", }, "CHA.AG1_AD_CRD_ACQUIRED0.TGR7": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "b1xxxxxxx", }, "CHA.AG1_AD_CRD_ACQUIRED0.TGR3": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxx1xxx", }, "CHA.AG1_AD_CRD_ACQUIRED0.TGR6": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bx1xxxxxx", }, "CHA.AG1_AD_CRD_ACQUIRED0.TGR1": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxxxx1x", }, "CHA.AG1_AD_CRD_ACQUIRED0.TGR0": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxxxxx1", }, "CHA.AG1_AD_CRD_ACQUIRED0.TGR2": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxxx1xx", }, "CHA.AG1_AD_CRD_ACQUIRED1": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 133, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected. Extension not used by ICX.", }, "CHA.AG1_AD_CRD_ACQUIRED1.TGR8": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 133, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected. Extension not used by ICX.", "Umask": "bxxxxxxx1", }, "CHA.AG1_AD_CRD_ACQUIRED1.TGR9": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 133, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected. Extension not used by ICX.", "Umask": "bxxxxxx1x", }, "CHA.AG1_AD_CRD_ACQUIRED1.TGR10": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 133, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected. Extension not used by ICX.", "Umask": "bxxxxx1xx", }, "CHA.AG1_AD_CRD_OCCUPANCY0": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", }, "CHA.AG1_AD_CRD_OCCUPANCY0.TGR5": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b00100000", }, "CHA.AG1_AD_CRD_OCCUPANCY0.TGR4": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b00010000", }, "CHA.AG1_AD_CRD_OCCUPANCY0.TGR7": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b10000000", }, "CHA.AG1_AD_CRD_OCCUPANCY0.TGR3": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b00001000", }, "CHA.AG1_AD_CRD_OCCUPANCY0.TGR6": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b01000000", }, "CHA.AG1_AD_CRD_OCCUPANCY0.TGR1": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b00000010", }, "CHA.AG1_AD_CRD_OCCUPANCY0.TGR0": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b00000001", }, "CHA.AG1_AD_CRD_OCCUPANCY0.TGR2": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b00000100", }, "CHA.AG1_AD_CRD_OCCUPANCY1": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 135, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected. Extension not used by ICX.", }, "CHA.AG1_AD_CRD_OCCUPANCY1.TGR9": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 135, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected. Extension not used by ICX.", "Umask": "b00000010", }, "CHA.AG1_AD_CRD_OCCUPANCY1.TGR10": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 135, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected. Extension not used by ICX.", "Umask": "b00000100", }, "CHA.AG1_AD_CRD_OCCUPANCY1.TGR8": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 135, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected. Extension not used by ICX.", "Umask": "b00000001", }, "CHA.AG1_BL_CRD_ACQUIRED0": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", }, "CHA.AG1_BL_CRD_ACQUIRED0.TGR6": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.AG1_BL_CRD_ACQUIRED0.TGR3": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.AG1_BL_CRD_ACQUIRED0.TGR7": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.AG1_BL_CRD_ACQUIRED0.TGR5": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.AG1_BL_CRD_ACQUIRED0.TGR4": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.AG1_BL_CRD_ACQUIRED0.TGR2": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.AG1_BL_CRD_ACQUIRED0.TGR0": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.AG1_BL_CRD_ACQUIRED0.TGR1": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.AG1_BL_CRD_ACQUIRED1": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 141, "ExtSel": "", "Notes": "Extension not used by ICX.", }, "CHA.AG1_BL_CRD_ACQUIRED1.TGR10": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 141, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxx1xx", }, "CHA.AG1_BL_CRD_ACQUIRED1.TGR9": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 141, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxxx1x", }, "CHA.AG1_BL_CRD_ACQUIRED1.TGR8": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 141, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxxxx1", }, "CHA.AG1_BL_CRD_OCCUPANCY0": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", }, "CHA.AG1_BL_CRD_OCCUPANCY0.TGR1": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "b00000010", }, "CHA.AG1_BL_CRD_OCCUPANCY0.TGR0": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "b00000001", }, "CHA.AG1_BL_CRD_OCCUPANCY0.TGR2": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "b00000100", }, "CHA.AG1_BL_CRD_OCCUPANCY0.TGR4": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "b00010000", }, "CHA.AG1_BL_CRD_OCCUPANCY0.TGR5": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "b00100000", }, "CHA.AG1_BL_CRD_OCCUPANCY0.TGR7": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "b10000000", }, "CHA.AG1_BL_CRD_OCCUPANCY0.TGR3": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "b00001000", }, "CHA.AG1_BL_CRD_OCCUPANCY0.TGR6": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "b01000000", }, "CHA.AG1_BL_CRD_OCCUPANCY1": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 143, "ExtSel": "", "Notes": "Extension not used by ICX.", }, "CHA.AG1_BL_CRD_OCCUPANCY1.TGR9": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 143, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "b00000010", }, "CHA.AG1_BL_CRD_OCCUPANCY1.TGR10": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 143, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "b00000100", }, "CHA.AG1_BL_CRD_OCCUPANCY1.TGR8": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 143, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "b00000001", }, "CHA.BYPASS_CHA_IMC": { "Box": "CHA", "Category": "CHA HA BYPASS Events", "Defn": "Counts the number of times when the CHA was able to bypass HA pipe on the way to iMC. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filtered by when the bypass was taken and when it was not.", "Desc": "CHA to iMC Bypass", "EvSel": 87, "ExtSel": "", }, "CHA.BYPASS_CHA_IMC.INTERMEDIATE": { "Box": "CHA", "Category": "CHA HA BYPASS Events", "Defn": "Counts the number of times when the CHA was able to bypass HA pipe on the way to iMC. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filtered by when the bypass was taken and when it was not.", "Desc": "CHA to iMC Bypass", "EvSel": 87, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.BYPASS_CHA_IMC.TAKEN": { "Box": "CHA", "Category": "CHA HA BYPASS Events", "Defn": "Counts the number of times when the CHA was able to bypass HA pipe on the way to iMC. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filtered by when the bypass was taken and when it was not.", "Desc": "CHA to iMC Bypass", "EvSel": 87, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.BYPASS_CHA_IMC.NOT_TAKEN": { "Box": "CHA", "Category": "CHA HA BYPASS Events", "Defn": "Counts the number of times when the CHA was able to bypass HA pipe on the way to iMC. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filtered by when the bypass was taken and when it was not.", "Desc": "CHA to iMC Bypass", "EvSel": 87, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.CLOCKTICKS": { "Box": "CHA", "Category": "CHA UCLK Events", "Counters": "0-3", "Desc": "Clockticks of the uncore caching and home agent (CHA)", "EvSel": 0, "ExtSel": "", }, "CHA.CMS_CLOCKTICKS": { "Box": "CHA", "Category": "CHA Misc Events", "Desc": "CMS Clockticks", "EvSel": 192, "ExtSel": "", }, "CHA.CORE_SNP": { "Box": "CHA", "Category": "CHA ISMQ Events", "Defn": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).", "Desc": "Core Cross Snoops Issued", "EvSel": 51, "ExtSel": "", }, "CHA.CORE_SNP.EVICT_GTONE": { "Box": "CHA", "Category": "CHA ISMQ Events", "Defn": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).", "Desc": "Core Cross Snoops Issued", "EvSel": 51, "ExtSel": "", "Umask": "b10000010", }, "CHA.CORE_SNP.ANY_GTONE": { "Box": "CHA", "Category": "CHA ISMQ Events", "Defn": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).", "Desc": "Core Cross Snoops Issued", "EvSel": 51, "ExtSel": "", "Umask": "b11110010", }, "CHA.CORE_SNP.ANY_ONE": { "Box": "CHA", "Category": "CHA ISMQ Events", "Defn": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).", "Desc": "Core Cross Snoops Issued", "EvSel": 51, "ExtSel": "", "Umask": "b11110001", }, "CHA.CORE_SNP.CORE_GTONE": { "Box": "CHA", "Category": "CHA ISMQ Events", "Defn": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).", "Desc": "Core Cross Snoops Issued", "EvSel": 51, "ExtSel": "", "Umask": "b01000010", }, "CHA.CORE_SNP.EXT_GTONE": { "Box": "CHA", "Category": "CHA ISMQ Events", "Defn": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).", "Desc": "Core Cross Snoops Issued", "EvSel": 51, "ExtSel": "", "Umask": "b00100010", }, "CHA.CORE_SNP.REMOTE_GTONE": { "Box": "CHA", "Category": "CHA ISMQ Events", "Defn": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).", "Desc": "Core Cross Snoops Issued", "EvSel": 51, "ExtSel": "", "Umask": "b00100010", }, "CHA.CORE_SNP.EXT_ONE": { "Box": "CHA", "Category": "CHA ISMQ Events", "Defn": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).", "Desc": "Core Cross Snoops Issued", "EvSel": 51, "ExtSel": "", "Umask": "b00100001", }, "CHA.CORE_SNP.EVICT_ONE": { "Box": "CHA", "Category": "CHA ISMQ Events", "Defn": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).", "Desc": "Core Cross Snoops Issued", "EvSel": 51, "ExtSel": "", "Umask": "b10000001", }, "CHA.CORE_SNP.CORE_ONE": { "Box": "CHA", "Category": "CHA ISMQ Events", "Defn": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).", "Desc": "Core Cross Snoops Issued", "EvSel": 51, "ExtSel": "", "Umask": "b01000001", }, "CHA.CORE_SNP.REMOTE_ONE": { "Box": "CHA", "Category": "CHA ISMQ Events", "Defn": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).", "Desc": "Core Cross Snoops Issued", "EvSel": 51, "ExtSel": "", "Umask": "b00010001", }, "CHA.COUNTER0_OCCUPANCY": { "Box": "CHA", "Category": "CHA OCCUPANCY Events", "Counters": "0-3", "Defn": "Since occupancy counts can only be captured in the Cbo's 0 counter, this event allows a user to capture occupancy related information by filtering the Cb0 occupancy count captured in Counter 0. The filtering available is found in the control register - threshold, invert and edge detect. E.g. setting threshold to 1 can effectively monitor how many cycles the monitored queue has an entry.", "Desc": "Counter 0 Occupancy", "EvSel": 31, "ExtSel": "", "MaxIncCyc": 20, }, "CHA.DIRECT_GO": { "Box": "CHA", "Category": "CHA DIRECT GO Events", "Desc": "Direct GO", "EvSel": 110, "ExtSel": "", }, "CHA.DIRECT_GO.HA_TOR_DEALLOC": { "Box": "CHA", "Category": "CHA DIRECT GO Events", "Desc": "Direct GO", "EvSel": 110, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.DIRECT_GO.HA_SUPPRESS_DRD": { "Box": "CHA", "Category": "CHA DIRECT GO Events", "Desc": "Direct GO", "EvSel": 110, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.DIRECT_GO.HA_SUPPRESS_NO_D2C": { "Box": "CHA", "Category": "CHA DIRECT GO Events", "Desc": "Direct GO", "EvSel": 110, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.DIRECT_GO_OPC": { "Box": "CHA", "Category": "CHA DIRECT GO Events", "Desc": "Direct GO", "EvSel": 109, "ExtSel": "", }, "CHA.DIRECT_GO_OPC.FAST_GO": { "Box": "CHA", "Category": "CHA DIRECT GO Events", "Desc": "Direct GO", "EvSel": 109, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.DIRECT_GO_OPC.PULL": { "Box": "CHA", "Category": "CHA DIRECT GO Events", "Desc": "Direct GO", "EvSel": 109, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.DIRECT_GO_OPC.EXTCMP": { "Box": "CHA", "Category": "CHA DIRECT GO Events", "Desc": "Direct GO", "EvSel": 109, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.DIRECT_GO_OPC.IDLE_DUE_SUPPRESS": { "Box": "CHA", "Category": "CHA DIRECT GO Events", "Desc": "Direct GO", "EvSel": 109, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.DIRECT_GO_OPC.GO": { "Box": "CHA", "Category": "CHA DIRECT GO Events", "Desc": "Direct GO", "EvSel": 109, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.DIRECT_GO_OPC.FAST_GO_PULL": { "Box": "CHA", "Category": "CHA DIRECT GO Events", "Desc": "Direct GO", "EvSel": 109, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.DIRECT_GO_OPC.NOP": { "Box": "CHA", "Category": "CHA DIRECT GO Events", "Desc": "Direct GO", "EvSel": 109, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.DIRECT_GO_OPC.GO_PULL": { "Box": "CHA", "Category": "CHA DIRECT GO Events", "Desc": "Direct GO", "EvSel": 109, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.DIR_LOOKUP": { "Box": "CHA", "Category": "CHA HA DIRECTORY Events", "Defn": "Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to.", "Desc": "Multi-socket cacheline directory state lookups", "EvSel": 83, "ExtSel": "", "Notes": "Only valid for parts that implement the Directory", }, "CHA.DIR_LOOKUP.NO_SNP": { "Box": "CHA", "Category": "CHA HA DIRECTORY Events", "Defn": "Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to.", "Desc": "Multi-socket cacheline directory state lookups", "EvSel": 83, "ExtSel": "", "Notes": "Only valid for parts that implement the Directory", "Umask": "bxxxxxx1x", }, "CHA.DIR_LOOKUP.SNP": { "Box": "CHA", "Category": "CHA HA DIRECTORY Events", "Defn": "Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to.", "Desc": "Multi-socket cacheline directory state lookups", "EvSel": 83, "ExtSel": "", "Notes": "Only valid for parts that implement the Directory", "Umask": "bxxxxxxx1", }, "CHA.DIR_UPDATE": { "Box": "CHA", "Category": "CHA HA DIRECTORY Events", "Defn": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.", "Desc": "Multi-socket cacheline directory state updates", "EvSel": 84, "ExtSel": "", "Notes": "Only valid for parts that implement the Directory", }, "CHA.DIR_UPDATE.HA": { "Box": "CHA", "Category": "CHA HA DIRECTORY Events", "Defn": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.", "Desc": "Multi-socket cacheline directory state updates", "EvSel": 84, "ExtSel": "", "Notes": "Only valid for parts that implement the Directory", "Umask": "bxxxxxxx1", }, "CHA.DIR_UPDATE.TOR": { "Box": "CHA", "Category": "CHA HA DIRECTORY Events", "Defn": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.", "Desc": "Multi-socket cacheline directory state updates", "EvSel": 84, "ExtSel": "", "Notes": "Only valid for parts that implement the Directory", "Umask": "bxxxxxx1x", }, "CHA.DISTRESS_ASSERTED": { "Box": "CHA", "Category": "CHA Horizontal RING Events", "Defn": "Counts the number of cycles either the local or incoming distress signals are asserted.", "Desc": "Distress signal asserted", "EvSel": 175, "ExtSel": "", }, "CHA.DISTRESS_ASSERTED.PMM_NONLOCAL": { "Box": "CHA", "Category": "CHA Horizontal RING Events", "Defn": "Counts the number of cycles either the local or incoming distress signals are asserted.", "Desc": "Distress signal asserted", "EvSel": 175, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.DISTRESS_ASSERTED.VERT": { "Box": "CHA", "Category": "CHA Horizontal RING Events", "Defn": "Counts the number of cycles either the local or incoming distress signals are asserted.", "Desc": "Distress signal asserted", "EvSel": 175, "ExtSel": "", "Umask": "b00000001", }, "CHA.DISTRESS_ASSERTED.DPT_NONLOCAL": { "Box": "CHA", "Category": "CHA Horizontal RING Events", "Defn": "Counts the number of cycles either the local or incoming distress signals are asserted.", "Desc": "Distress signal asserted", "EvSel": 175, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.DISTRESS_ASSERTED.HORZ": { "Box": "CHA", "Category": "CHA Horizontal RING Events", "Defn": "Counts the number of cycles either the local or incoming distress signals are asserted.", "Desc": "Distress signal asserted", "EvSel": 175, "ExtSel": "", "Umask": "b00000010", }, "CHA.DISTRESS_ASSERTED.DPT_STALL_NOCRD": { "Box": "CHA", "Category": "CHA Horizontal RING Events", "Defn": "Counts the number of cycles either the local or incoming distress signals are asserted.", "Desc": "Distress signal asserted", "EvSel": 175, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.DISTRESS_ASSERTED.DPT_LOCAL": { "Box": "CHA", "Category": "CHA Horizontal RING Events", "Defn": "Counts the number of cycles either the local or incoming distress signals are asserted.", "Desc": "Distress signal asserted", "EvSel": 175, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.DISTRESS_ASSERTED.PMM_LOCAL": { "Box": "CHA", "Category": "CHA Horizontal RING Events", "Defn": "Counts the number of cycles either the local or incoming distress signals are asserted.", "Desc": "Distress signal asserted", "EvSel": 175, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.DISTRESS_ASSERTED.DPT_STALL_IV": { "Box": "CHA", "Category": "CHA Horizontal RING Events", "Defn": "Counts the number of cycles either the local or incoming distress signals are asserted.", "Desc": "Distress signal asserted", "EvSel": 175, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.EGRESS_ORDERING": { "Box": "CHA", "Category": "CHA Horizontal In Use RING Events", "Defn": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements", "Desc": "Egress Blocking due to Ordering requirements", "EvSel": 186, "ExtSel": "", }, "CHA.EGRESS_ORDERING.IV_SNOOPGO_DN": { "Box": "CHA", "Category": "CHA Horizontal In Use RING Events", "Defn": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements", "Desc": "Egress Blocking due to Ordering requirements", "EvSel": 186, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.EGRESS_ORDERING.IV_SNOOPGO_UP": { "Box": "CHA", "Category": "CHA Horizontal In Use RING Events", "Defn": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements", "Desc": "Egress Blocking due to Ordering requirements", "EvSel": 186, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.HITME_HIT": { "Box": "CHA", "Category": "CHA HA HitME Events", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 95, "ExtSel": "", }, "CHA.HITME_HIT.WBMTOE": { "Box": "CHA", "Category": "CHA HA HitME Events", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 95, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.HITME_HIT.WBMTOI_OR_S": { "Box": "CHA", "Category": "CHA HA HitME Events", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 95, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.HITME_HIT.SHARED_OWNREQ": { "Box": "CHA", "Category": "CHA HA HitME Events", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 95, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.HITME_HIT.EX_RDS": { "Box": "CHA", "Category": "CHA HA HitME Events", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 95, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.HITME_LOOKUP": { "Box": "CHA", "Category": "CHA HA HitME Events", "Desc": "Counts Number of times HitMe Cache is accessed", "EvSel": 94, "ExtSel": "", }, "CHA.HITME_LOOKUP.WRITE": { "Box": "CHA", "Category": "CHA HA HitME Events", "Desc": "Counts Number of times HitMe Cache is accessed", "EvSel": 94, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.HITME_LOOKUP.READ": { "Box": "CHA", "Category": "CHA HA HitME Events", "Desc": "Counts Number of times HitMe Cache is accessed", "EvSel": 94, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.HITME_MISS": { "Box": "CHA", "Category": "CHA HA HitME Events", "Desc": "Counts Number of Misses in HitMe Cache", "EvSel": 96, "ExtSel": "", }, "CHA.HITME_MISS.NOTSHARED_RDINVOWN": { "Box": "CHA", "Category": "CHA HA HitME Events", "Desc": "Counts Number of Misses in HitMe Cache", "EvSel": 96, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.HITME_MISS.SHARED_RDINVOWN": { "Box": "CHA", "Category": "CHA HA HitME Events", "Desc": "Counts Number of Misses in HitMe Cache", "EvSel": 96, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.HITME_MISS.READ_OR_INV": { "Box": "CHA", "Category": "CHA HA HitME Events", "Desc": "Counts Number of Misses in HitMe Cache", "EvSel": 96, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.HITME_UPDATE": { "Box": "CHA", "Category": "CHA HA HitME Pipe Events", "Desc": "Counts the number of Allocate/Update to HitMe Cache", "EvSel": 97, "ExtSel": "", }, "CHA.HITME_UPDATE.RDINVOWN": { "Box": "CHA", "Category": "CHA HA HitME Pipe Events", "Desc": "Counts the number of Allocate/Update to HitMe Cache", "EvSel": 97, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.HITME_UPDATE.SHARED": { "Box": "CHA", "Category": "CHA HA HitME Pipe Events", "Desc": "Counts the number of Allocate/Update to HitMe Cache", "EvSel": 97, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.HITME_UPDATE.DEALLOCATE": { "Box": "CHA", "Category": "CHA HA HitME Pipe Events", "Desc": "Counts the number of Allocate/Update to HitMe Cache", "EvSel": 97, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.HITME_UPDATE.DEALLOCATE_RSPFWDI_LOC": { "Box": "CHA", "Category": "CHA HA HitME Pipe Events", "Desc": "Counts the number of Allocate/Update to HitMe Cache", "EvSel": 97, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.HITME_UPDATE.RSPFWDI_REM": { "Box": "CHA", "Category": "CHA HA HitME Pipe Events", "Desc": "Counts the number of Allocate/Update to HitMe Cache", "EvSel": 97, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.HORZ_RING_AD_IN_USE": { "Box": "CHA", "Category": "CHA Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AD Ring In Use", "EvSel": 182, "ExtSel": "", }, "CHA.HORZ_RING_AD_IN_USE.RIGHT_EVEN": { "Box": "CHA", "Category": "CHA Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AD Ring In Use", "EvSel": 182, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.HORZ_RING_AD_IN_USE.LEFT_EVEN": { "Box": "CHA", "Category": "CHA Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AD Ring In Use", "EvSel": 182, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.HORZ_RING_AD_IN_USE.LEFT_ODD": { "Box": "CHA", "Category": "CHA Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AD Ring In Use", "EvSel": 182, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.HORZ_RING_AD_IN_USE.RIGHT_ODD": { "Box": "CHA", "Category": "CHA Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AD Ring In Use", "EvSel": 182, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.HORZ_RING_AKC_IN_USE": { "Box": "CHA", "Category": "CHA Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 187, "ExtSel": "", }, "CHA.HORZ_RING_AKC_IN_USE.LEFT_ODD": { "Box": "CHA", "Category": "CHA Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 187, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.HORZ_RING_AKC_IN_USE.LEFT_EVEN": { "Box": "CHA", "Category": "CHA Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 187, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.HORZ_RING_AKC_IN_USE.RIGHT_EVEN": { "Box": "CHA", "Category": "CHA Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 187, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.HORZ_RING_AKC_IN_USE.RIGHT_ODD": { "Box": "CHA", "Category": "CHA Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 187, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.HORZ_RING_AK_IN_USE": { "Box": "CHA", "Category": "CHA Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 183, "ExtSel": "", }, "CHA.HORZ_RING_AK_IN_USE.RIGHT_ODD": { "Box": "CHA", "Category": "CHA Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 183, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.HORZ_RING_AK_IN_USE.RIGHT_EVEN": { "Box": "CHA", "Category": "CHA Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 183, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.HORZ_RING_AK_IN_USE.LEFT_ODD": { "Box": "CHA", "Category": "CHA Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 183, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.HORZ_RING_AK_IN_USE.LEFT_EVEN": { "Box": "CHA", "Category": "CHA Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 183, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.HORZ_RING_BL_IN_USE": { "Box": "CHA", "Category": "CHA Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal BL Ring in Use", "EvSel": 184, "ExtSel": "", }, "CHA.HORZ_RING_BL_IN_USE.RIGHT_EVEN": { "Box": "CHA", "Category": "CHA Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal BL Ring in Use", "EvSel": 184, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.HORZ_RING_BL_IN_USE.LEFT_EVEN": { "Box": "CHA", "Category": "CHA Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal BL Ring in Use", "EvSel": 184, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.HORZ_RING_BL_IN_USE.LEFT_ODD": { "Box": "CHA", "Category": "CHA Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal BL Ring in Use", "EvSel": 184, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.HORZ_RING_BL_IN_USE.RIGHT_ODD": { "Box": "CHA", "Category": "CHA Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal BL Ring in Use", "EvSel": 184, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.HORZ_RING_IV_IN_USE": { "Box": "CHA", "Category": "CHA Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "Horizontal IV Ring in Use", "EvSel": 185, "ExtSel": "", }, "CHA.HORZ_RING_IV_IN_USE.LEFT": { "Box": "CHA", "Category": "CHA Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "Horizontal IV Ring in Use", "EvSel": 185, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.HORZ_RING_IV_IN_USE.RIGHT": { "Box": "CHA", "Category": "CHA Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "Horizontal IV Ring in Use", "EvSel": 185, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.IMC_READS_COUNT": { "Box": "CHA", "Category": "CHA MC Credit and Traffic Events", "Defn": "Count of the number of reads issued to any of the memory controller channels. This can be filtered by the priority of the reads.", "Desc": "HA to iMC Reads Issued", "EvSel": 89, "ExtSel": "", "Notes": "To match the number of reads seen at the IMC, it's necessary to account for any bypasses. IMC_READS_COUNT.* + BYPASS_CHA_IMC.TAKEN == CAS_COUNT.RD", }, "CHA.IMC_READS_COUNT.PRIORITY": { "Box": "CHA", "Category": "CHA MC Credit and Traffic Events", "Defn": "Count of the number of reads issued to any of the memory controller channels. This can be filtered by the priority of the reads.", "Desc": "HA to iMC Reads Issued", "EvSel": 89, "ExtSel": "", "Notes": "To match the number of reads seen at the IMC, it's necessary to account for any bypasses. IMC_READS_COUNT.* + BYPASS_CHA_IMC.TAKEN == CAS_COUNT.RD", "Umask": "bxxxxxx1x", }, "CHA.IMC_READS_COUNT.NORMAL": { "Box": "CHA", "Category": "CHA MC Credit and Traffic Events", "Defn": "Count of the number of reads issued to any of the memory controller channels. This can be filtered by the priority of the reads.", "Desc": "HA to iMC Reads Issued", "EvSel": 89, "ExtSel": "", "Notes": "To match the number of reads seen at the IMC, it's necessary to account for any bypasses. IMC_READS_COUNT.* + BYPASS_CHA_IMC.TAKEN == CAS_COUNT.RD", "Umask": "bxxxxxxx1", }, "CHA.IMC_WRITES_COUNT": { "Box": "CHA", "Category": "CHA MC Credit and Traffic Events", "Defn": "Counts the total number of full line writes issued from the HA into the memory controller.", "Desc": "CHA to iMC Full Line Writes Issued", "EvSel": 91, "ExtSel": "", "Notes": "Directory bits are stored in memory. Remote socket RFOs will result in a directory update which, in turn, will cause a write command.", }, "CHA.IMC_WRITES_COUNT.FULL_PRIORITY": { "Box": "CHA", "Category": "CHA MC Credit and Traffic Events", "Defn": "Counts the total number of full line writes issued from the HA into the memory controller.", "Desc": "CHA to iMC Full Line Writes Issued", "EvSel": 91, "ExtSel": "", "Notes": "Directory bits are stored in memory. Remote socket RFOs will result in a directory update which, in turn, will cause a write command.", "Umask": "bxxxxx1xx", }, "CHA.IMC_WRITES_COUNT.PARTIAL_PRIORITY": { "Box": "CHA", "Category": "CHA MC Credit and Traffic Events", "Defn": "Counts the total number of full line writes issued from the HA into the memory controller.", "Desc": "CHA to iMC Full Line Writes Issued", "EvSel": 91, "ExtSel": "", "Notes": "Directory bits are stored in memory. Remote socket RFOs will result in a directory update which, in turn, will cause a write command.", "Umask": "bxxxx1xxx", }, "CHA.IMC_WRITES_COUNT.PARTIAL": { "Box": "CHA", "Category": "CHA MC Credit and Traffic Events", "Defn": "Counts the total number of full line writes issued from the HA into the memory controller.", "Desc": "CHA to iMC Full Line Writes Issued", "EvSel": 91, "ExtSel": "", "Notes": "Directory bits are stored in memory. Remote socket RFOs will result in a directory update which, in turn, will cause a write command.", "Umask": "bxxxxxx1x", }, "CHA.IMC_WRITES_COUNT.FULL": { "Box": "CHA", "Category": "CHA MC Credit and Traffic Events", "Defn": "Counts the total number of full line writes issued from the HA into the memory controller.", "Desc": "CHA to iMC Full Line Writes Issued", "EvSel": 91, "ExtSel": "", "Notes": "Directory bits are stored in memory. Remote socket RFOs will result in a directory update which, in turn, will cause a write command.", "Umask": "bxxxxxxx1", }, "CHA.LLC_LOOKUP": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", }, "CHA.LLC_LOOKUP.RFO": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "b11111111", "UmaskExt": 0x1BC8, }, "CHA.LLC_LOOKUP.RFO_LOCAL": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "b11111111", "UmaskExt": 0x19C8, }, "CHA.LLC_LOOKUP.FLUSH_INV_REMOTE": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "b11111111", "UmaskExt": 0x1A04, }, "CHA.LLC_LOOKUP.F": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "b1xxxxxxx", "UmaskExt": "bxxxxxxxxxxxxx", }, "CHA.LLC_LOOKUP.E": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "bxx1xxxxx", "UmaskExt": "bxxxxxxxxxxxxx", }, "CHA.LLC_LOOKUP.FLUSH_INV_LOCAL": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "b11111111", "UmaskExt": 0x1844, }, "CHA.LLC_LOOKUP.DATA_READ_F": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxxxxxxx1", }, "CHA.LLC_LOOKUP.ANY_F": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxx1xxxxx", }, "CHA.LLC_LOOKUP.CODE_READ_F": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxxx1xxxx", }, "CHA.LLC_LOOKUP.SF_E": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "bxxxxx1xx", "UmaskExt": "bxxxxxxxxxxxxx", }, "CHA.LLC_LOOKUP.MISS_ALL": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "b00000001", "UmaskExt": 0x1FE0, }, "CHA.LLC_LOOKUP.RFO_MISS": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "b00000001", "UmaskExt": 0x1BC8, }, "CHA.LLC_LOOKUP.READ": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "b11111111", "UmaskExt": 0x1BD9, }, "CHA.LLC_LOOKUP.ALL_REMOTE": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "b11111111", "UmaskExt": 0x1E20, }, "CHA.LLC_LOOKUP.S": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "bxxx1xxxx", "UmaskExt": "bxxxxxxxxxxxxx", }, "CHA.LLC_LOOKUP.READ_MISS_REM_HOM": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "b00000001", "UmaskExt": 0x13D9, }, "CHA.LLC_LOOKUP.REMOTE_F": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "bxxxxxxxx", "UmaskExt": "b1xxxxxxxxxxxx", }, "CHA.LLC_LOOKUP.READ_SF_HIT": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "b00001110", "UmaskExt": 0x1BD9, }, "CHA.LLC_LOOKUP.DATA_READ_MISS": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "b00000001", "UmaskExt": 0x1BC1, }, "CHA.LLC_LOOKUP.LLCPREF_LOCAL_F": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxx1xxxxxxx", }, "CHA.LLC_LOOKUP.FLUSH_INV": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "b11111111", "UmaskExt": 0x1A44, }, "CHA.LLC_LOOKUP.REMOTE_SNOOP_F": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "bxxxxxxxx", "UmaskExt": "bxx1xxxxxxxxxx", }, "CHA.LLC_LOOKUP.LOCAL_F": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "bxxxxxxxx", "UmaskExt": "bx1xxxxxxxxxxx", }, "CHA.LLC_LOOKUP.WRITES_AND_OTHER": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "b11111111", "UmaskExt": 0x1A42, }, "CHA.LLC_LOOKUP.I": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "bxxxxxxx1", "UmaskExt": "bxxxxxxxxxxxxx", }, "CHA.LLC_LOOKUP.CODE_READ_MISS": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "b00000001", "UmaskExt": 0x1BD0, }, "CHA.LLC_LOOKUP.READ_OR_SNOOP_REMOTE_MISS_REM_HOM": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "b00000001", "UmaskExt": 0x1619, }, "CHA.LLC_LOOKUP.M": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "bx1xxxxxx", "UmaskExt": "bxxxxxxxxxxxxx", }, "CHA.LLC_LOOKUP.OTHER_REQ_F": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxxxxxx1x", }, "CHA.LLC_LOOKUP.SF_H": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "bxxxx1xxx", "UmaskExt": "bxxxxxxxxxxxxx", }, "CHA.LLC_LOOKUP.READ_REMOTE_LOC_HOM": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "b11111111", "UmaskExt": 0x0A19, }, "CHA.LLC_LOOKUP.REMOTE_SNP": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "b11111111", "UmaskExt": 0x1C19, }, "CHA.LLC_LOOKUP.SF_S": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "bxxxxxx1x", "UmaskExt": "bxxxxxxxxxxxxx", }, "CHA.LLC_LOOKUP.DATA_READ": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "b11111111", "UmaskExt": 0x1BC1, }, "CHA.LLC_LOOKUP.PREF_OR_DMND_REMOTE_F": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "bxxxxxxxx", "UmaskExt": "bxxx1xxxxxxxxx", }, "CHA.LLC_LOOKUP.FLUSH_OR_INV_F": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxxxxx1xx", }, "CHA.LLC_LOOKUP.LLCPREF_LOCAL": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "b11111111", "UmaskExt": 0x189D, }, "CHA.LLC_LOOKUP.LOC_HOM": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "b11111111", "UmaskExt": 0x0BDF, }, "CHA.LLC_LOOKUP.COREPREF_OR_DMND_LOCAL_F": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxx1xxxxxx", }, "CHA.LLC_LOOKUP.READ_MISS": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "b00000001", "UmaskExt": 0x1BD9, }, "CHA.LLC_LOOKUP.CODE_READ": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "b11111111", "UmaskExt": 0x1BD0, }, "CHA.LLC_LOOKUP.READ_MISS_LOC_HOM": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "b00000001", "UmaskExt": 0x0BD9, }, "CHA.LLC_LOOKUP.ALL": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "b11111111", "UmaskExt": 0x1FFF, }, "CHA.LLC_LOOKUP.DATA_READ_REMOTE": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "b11111111", "UmaskExt": 0x1A01, }, "CHA.LLC_LOOKUP.READ_LOCAL_REM_HOM": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "b11111111", "UmaskExt": 0x11D9, }, "CHA.LLC_LOOKUP.DATA_READ_LOCAL": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "b11111111", "UmaskExt": 0x19C1, }, "CHA.LLC_LOOKUP.REM_HOM": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "b11111111", "UmaskExt": 0x15DF, }, "CHA.LLC_LOOKUP.CODE_READ_LOCAL": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "b11111111", "UmaskExt": 0x19D0, }, "CHA.LLC_LOOKUP.READ_LOCAL_LOC_HOM": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "b11111111", "UmaskExt": 0x09D9, }, "CHA.LLC_LOOKUP.CODE_READ_REMOTE": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "b11111111", "UmaskExt": 0x1A10, }, "CHA.LLC_LOOKUP.RFO_REMOTE": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "b11111111", "UmaskExt": 0x1A08, }, "CHA.LLC_LOOKUP.RFO_F": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "One of the bits in the umask, those corresponding to each cacheline state (e.g. bit0 = invalid (miss), bit4 = shared), etc, must always be set for this event. To monitor any lookup, set the field to 0xFF.", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxxxx1xxx", }, "CHA.LLC_VICTIMS": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Notes": "Does not include evict cleans", }, "CHA.LLC_VICTIMS.REMOTE_E": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Notes": "Does not include evict cleans", "Umask": "b00000010", "UmaskExt": "b10000000", }, "CHA.LLC_VICTIMS.LOCAL_E": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Notes": "Does not include evict cleans", "Umask": "b00000010", "UmaskExt": "b00100000", }, "CHA.LLC_VICTIMS.ALL": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Notes": "Does not include evict cleans", "Umask": "b00001111", "UmaskExt": "b00000000", }, "CHA.LLC_VICTIMS.E_STATE": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Notes": "Does not include evict cleans", "Umask": "bxxxxxx1x", "UmaskExt": "bxxxxxxxx", }, "CHA.LLC_VICTIMS.LOCAL_M": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Notes": "Does not include evict cleans", "Umask": "b00000001", "UmaskExt": "b00100000", }, "CHA.LLC_VICTIMS.REMOTE_ALL": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Notes": "Does not include evict cleans", "Umask": "b00001111", "UmaskExt": "b10000000", }, "CHA.LLC_VICTIMS.REMOTE_ONLY": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Notes": "Does not include evict cleans", "Umask": "bxxxxxxxx", "UmaskExt": "b1xxxxxxx", }, "CHA.LLC_VICTIMS.REMOTE_M": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Notes": "Does not include evict cleans", "Umask": "b00000001", "UmaskExt": "b10000000", }, "CHA.LLC_VICTIMS.LOCAL_S": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Notes": "Does not include evict cleans", "Umask": "b00000100", "UmaskExt": "b00100000", }, "CHA.LLC_VICTIMS.LOCAL_ALL": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Notes": "Does not include evict cleans", "Umask": "b00001111", "UmaskExt": "b00100000", }, "CHA.LLC_VICTIMS.S_STATE": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Notes": "Does not include evict cleans", "Umask": "bxxxxx1xx", "UmaskExt": "bxxxxxxxx", }, "CHA.LLC_VICTIMS.M_STATE": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Notes": "Does not include evict cleans", "Umask": "bxxxxxxx1", "UmaskExt": "bxxxxxxxx", }, "CHA.LLC_VICTIMS.LOCAL_ONLY": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Notes": "Does not include evict cleans", "Umask": "bxxxxxxxx", "UmaskExt": "bxx1xxxxx", }, "CHA.LLC_VICTIMS.REMOTE_S": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Notes": "Does not include evict cleans", "Umask": "b00000100", "UmaskExt": "b10000000", }, "CHA.MISC": { "Box": "CHA", "Category": "CHA MISC Events", "Counters": "0-3", "Defn": "Miscellaneous events in the Cbo.", "Desc": "Cbo Misc", "EvSel": 57, "ExtSel": "", }, "CHA.MISC.WC_ALIASING": { "Box": "CHA", "Category": "CHA MISC Events", "Counters": "0-3", "Defn": "Miscellaneous events in the Cbo.", "Desc": "Cbo Misc", "EvSel": 57, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.MISC.CV0_PREF_VIC": { "Box": "CHA", "Category": "CHA MISC Events", "Counters": "0-3", "Defn": "Miscellaneous events in the Cbo.", "Desc": "Cbo Misc", "EvSel": 57, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.MISC.RFO_HIT_S": { "Box": "CHA", "Category": "CHA MISC Events", "Counters": "0-3", "Defn": "Miscellaneous events in the Cbo.", "Desc": "Cbo Misc", "EvSel": 57, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.MISC.CV0_PREF_MISS": { "Box": "CHA", "Category": "CHA MISC Events", "Counters": "0-3", "Defn": "Miscellaneous events in the Cbo.", "Desc": "Cbo Misc", "EvSel": 57, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.MISC.RSPI_WAS_FSE": { "Box": "CHA", "Category": "CHA MISC Events", "Counters": "0-3", "Defn": "Miscellaneous events in the Cbo.", "Desc": "Cbo Misc", "EvSel": 57, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.MISC_EXTERNAL": { "Box": "CHA", "Category": "CHA External Misc Events (eg. From MS2IDI)", "Desc": "Miscellaneous Events (mostly from MS2IDI)", "EvSel": 230, "ExtSel": "", "Notes": "ONLY relevant to the CHA's CMS", }, "CHA.MISC_EXTERNAL.MBE_INST1": { "Box": "CHA", "Category": "CHA External Misc Events (eg. From MS2IDI)", "Desc": "Miscellaneous Events (mostly from MS2IDI)", "EvSel": 230, "ExtSel": "", "Notes": "ONLY relevant to the CHA's CMS", "Umask": "bxxxxxx1x", }, "CHA.MISC_EXTERNAL.MBE_INST0": { "Box": "CHA", "Category": "CHA External Misc Events (eg. From MS2IDI)", "Desc": "Miscellaneous Events (mostly from MS2IDI)", "EvSel": 230, "ExtSel": "", "Notes": "ONLY relevant to the CHA's CMS", "Umask": "bxxxxxxx1", }, "CHA.OSB": { "Box": "CHA", "Category": "CHA HA OSB Events", "Defn": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.", "Desc": "OSB Snoop Broadcast", "EvSel": 85, "ExtSel": "", }, "CHA.OSB.RFO_HITS_SNP_BCAST": { "Box": "CHA", "Category": "CHA HA OSB Events", "Defn": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.", "Desc": "OSB Snoop Broadcast", "EvSel": 85, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.OSB.REMOTE_READ": { "Box": "CHA", "Category": "CHA HA OSB Events", "Defn": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.", "Desc": "OSB Snoop Broadcast", "EvSel": 85, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.OSB.REMOTE_READINVITOE": { "Box": "CHA", "Category": "CHA HA OSB Events", "Defn": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.", "Desc": "OSB Snoop Broadcast", "EvSel": 85, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.OSB.LOCAL_READ": { "Box": "CHA", "Category": "CHA HA OSB Events", "Defn": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.", "Desc": "OSB Snoop Broadcast", "EvSel": 85, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.OSB.OFF_PWRHEURISTIC": { "Box": "CHA", "Category": "CHA HA OSB Events", "Defn": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.", "Desc": "OSB Snoop Broadcast", "EvSel": 85, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.OSB.LOCAL_INVITOE": { "Box": "CHA", "Category": "CHA HA OSB Events", "Defn": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.", "Desc": "OSB Snoop Broadcast", "EvSel": 85, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.PIPE_REJECT": { "Box": "CHA", "Category": "CHA PIPE REJECT Events", "Counters": "0-3", "Defn": "More Miscellaneous events in the Cbo.", "Desc": "Pipe Rejects", "EvSel": 66, "ExtSel": "", }, "CHA.PIPE_REJECT.RRQ_SETMATCH_VICP": { "Box": "CHA", "Category": "CHA PIPE REJECT Events", "Counters": "0-3", "Defn": "More Miscellaneous events in the Cbo.", "Desc": "Pipe Rejects", "EvSel": 66, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxxxxxxxxxxxxxx1xxxxxxxxxxxx", }, "CHA.PIPE_REJECT.SF_WAYS_RES": { "Box": "CHA", "Category": "CHA PIPE REJECT Events", "Counters": "0-3", "Defn": "More Miscellaneous events in the Cbo.", "Desc": "Pipe Rejects", "EvSel": 66, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxxxxxxxxxxxx1xxxxxxxxxxxxxx", }, "CHA.PIPE_REJECT.FSF_VICP": { "Box": "CHA", "Category": "CHA PIPE REJECT Events", "Counters": "0-3", "Defn": "More Miscellaneous events in the Cbo.", "Desc": "Pipe Rejects", "EvSel": 66, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx1x", }, "CHA.PIPE_REJECT.ONE_FSF_VIC": { "Box": "CHA", "Category": "CHA PIPE REJECT Events", "Counters": "0-3", "Defn": "More Miscellaneous events in the Cbo.", "Desc": "Pipe Rejects", "EvSel": 66, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxxxxxxxxxxxxxxxxxxxxxxxx1xx", }, "CHA.PIPE_REJECT.LLC_WAYS_RES": { "Box": "CHA", "Category": "CHA PIPE REJECT Events", "Counters": "0-3", "Defn": "More Miscellaneous events in the Cbo.", "Desc": "Pipe Rejects", "EvSel": 66, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxxxxxxxxxxx1xxxxxxxxxxxxxxx", }, "CHA.PIPE_REJECT.VN_BL_NCB": { "Box": "CHA", "Category": "CHA PIPE REJECT Events", "Counters": "0-3", "Defn": "More Miscellaneous events in the Cbo.", "Desc": "Pipe Rejects", "EvSel": 66, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxx1xxxxxxxxxxxxxxxxxxxxxxxxxxx", }, "CHA.PIPE_REJECT.VN_BL_RSP": { "Box": "CHA", "Category": "CHA PIPE REJECT Events", "Counters": "0-3", "Defn": "More Miscellaneous events in the Cbo.", "Desc": "Pipe Rejects", "EvSel": 66, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxx1xxxxxxxxxxxxxxxxxxxxxxxxx", }, "CHA.PIPE_REJECT.PMM_MEMMODE_TORMATCH_MULTI": { "Box": "CHA", "Category": "CHA PIPE REJECT Events", "Counters": "0-3", "Defn": "More Miscellaneous events in the Cbo.", "Desc": "Pipe Rejects", "EvSel": 66, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxxxxxxxxxxxxxxxx1xxxxxxxxxx", }, "CHA.PIPE_REJECT.ISMQ_SETMATCH_VICP": { "Box": "CHA", "Category": "CHA PIPE REJECT Events", "Counters": "0-3", "Defn": "More Miscellaneous events in the Cbo.", "Desc": "Pipe Rejects", "EvSel": 66, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxxxxxxxxxxxxx1xxxxxxxxxxxxx", }, "CHA.PIPE_REJECT.NOTALLOWSNOOP": { "Box": "CHA", "Category": "CHA PIPE REJECT Events", "Counters": "0-3", "Defn": "More Miscellaneous events in the Cbo.", "Desc": "Pipe Rejects", "EvSel": 66, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxxxxxxxxxx1xxxxxxxxxxxxxxxx", }, "CHA.PIPE_REJECT.VN_AD_REQ": { "Box": "CHA", "Category": "CHA PIPE REJECT Events", "Counters": "0-3", "Defn": "More Miscellaneous events in the Cbo.", "Desc": "Pipe Rejects", "EvSel": 66, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxxx1xxxxxxxxxxxxxxxxxxxxxxx", }, "CHA.PIPE_REJECT.SETMATCHENTRYWSCT": { "Box": "CHA", "Category": "CHA PIPE REJECT Events", "Counters": "0-3", "Defn": "More Miscellaneous events in the Cbo.", "Desc": "Pipe Rejects", "EvSel": 66, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxxxxxxxxxxxxxxxxx1xxxxxxxxx", }, "CHA.PIPE_REJECT.IRQ_SETMATCH_VICP": { "Box": "CHA", "Category": "CHA PIPE REJECT Events", "Counters": "0-3", "Defn": "More Miscellaneous events in the Cbo.", "Desc": "Pipe Rejects", "EvSel": 66, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx1", }, "CHA.PIPE_REJECT.VN_AD_RSP": { "Box": "CHA", "Category": "CHA PIPE REJECT Events", "Counters": "0-3", "Defn": "More Miscellaneous events in the Cbo.", "Desc": "Pipe Rejects", "EvSel": 66, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxx1xxxxxxxxxxxxxxxxxxxxxxxx", }, "CHA.PIPE_REJECT.GOTRACK_WAYMATCH": { "Box": "CHA", "Category": "CHA PIPE REJECT Events", "Counters": "0-3", "Defn": "More Miscellaneous events in the Cbo.", "Desc": "Pipe Rejects", "EvSel": 66, "ExtSel": "", "Umask": "bxxxx1xxx", "UmaskExt": "bxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", }, "CHA.PIPE_REJECT.VN_BL_NCS": { "Box": "CHA", "Category": "CHA PIPE REJECT Events", "Counters": "0-3", "Defn": "More Miscellaneous events in the Cbo.", "Desc": "Pipe Rejects", "EvSel": 66, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxx1xxxxxxxxxxxxxxxxxxxxxxxxxxxx", }, "CHA.PIPE_REJECT.AKEGRCREDIT": { "Box": "CHA", "Category": "CHA PIPE REJECT Events", "Counters": "0-3", "Defn": "More Miscellaneous events in the Cbo.", "Desc": "Pipe Rejects", "EvSel": 66, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxxxxx1xxxxxxxxxxxxxxxxxxxxx", }, "CHA.PIPE_REJECT.IDX_INPIPE": { "Box": "CHA", "Category": "CHA PIPE REJECT Events", "Counters": "0-3", "Defn": "More Miscellaneous events in the Cbo.", "Desc": "Pipe Rejects", "EvSel": 66, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxxxxxxxxxxxxxxxxxx1xxxxxxxx", }, "CHA.PIPE_REJECT.GOTRACK_PAMATCH": { "Box": "CHA", "Category": "CHA PIPE REJECT Events", "Counters": "0-3", "Defn": "More Miscellaneous events in the Cbo.", "Desc": "Pipe Rejects", "EvSel": 66, "ExtSel": "", "Umask": "bxxxxxx1x", "UmaskExt": "bxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", }, "CHA.PIPE_REJECT.GOTRACK_ALLOWSNP": { "Box": "CHA", "Category": "CHA PIPE REJECT Events", "Counters": "0-3", "Defn": "More Miscellaneous events in the Cbo.", "Desc": "Pipe Rejects", "EvSel": 66, "ExtSel": "", "Umask": "bxxxxx1xx", "UmaskExt": "bxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", }, "CHA.PIPE_REJECT.VN_BL_WB": { "Box": "CHA", "Category": "CHA PIPE REJECT Events", "Counters": "0-3", "Defn": "More Miscellaneous events in the Cbo.", "Desc": "Pipe Rejects", "EvSel": 66, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxx1xxxxxxxxxxxxxxxxxxxxxxxxxx", }, "CHA.PIPE_REJECT.IPQ_SETMATCH_VICP": { "Box": "CHA", "Category": "CHA PIPE REJECT Events", "Counters": "0-3", "Defn": "More Miscellaneous events in the Cbo.", "Desc": "Pipe Rejects", "EvSel": 66, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxxxxxxxxxxxxxxxxxxxxx1xxxxx", }, "CHA.PIPE_REJECT.HACREDIT": { "Box": "CHA", "Category": "CHA PIPE REJECT Events", "Counters": "0-3", "Defn": "More Miscellaneous events in the Cbo.", "Desc": "Pipe Rejects", "EvSel": 66, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxxxx1xxxxxxxxxxxxxxxxxxxxxx", }, "CHA.PIPE_REJECT.IVEGRCREDIT": { "Box": "CHA", "Category": "CHA PIPE REJECT Events", "Counters": "0-3", "Defn": "More Miscellaneous events in the Cbo.", "Desc": "Pipe Rejects", "EvSel": 66, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxxxxxxxx1xxxxxxxxxxxxxxxxxx", }, "CHA.PIPE_REJECT.IRQ_PMM": { "Box": "CHA", "Category": "CHA PIPE REJECT Events", "Counters": "0-3", "Defn": "More Miscellaneous events in the Cbo.", "Desc": "Pipe Rejects", "EvSel": 66, "ExtSel": "", "Umask": "bxx1xxxxx", "UmaskExt": "bxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", }, "CHA.PIPE_REJECT.ALLRSFWAYS_RES": { "Box": "CHA", "Category": "CHA PIPE REJECT Events", "Counters": "0-3", "Defn": "More Miscellaneous events in the Cbo.", "Desc": "Pipe Rejects", "EvSel": 66, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxxxxxxxxxxxxxxx1xxxxxxxxxxx", }, "CHA.PIPE_REJECT.ONE_RSP_CON": { "Box": "CHA", "Category": "CHA PIPE REJECT Events", "Counters": "0-3", "Defn": "More Miscellaneous events in the Cbo.", "Desc": "Pipe Rejects", "EvSel": 66, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxxxxxxxxxxxxxxxxxxx1xxxxxxx", }, "CHA.PIPE_REJECT.WAY_MATCH": { "Box": "CHA", "Category": "CHA PIPE REJECT Events", "Counters": "0-3", "Defn": "More Miscellaneous events in the Cbo.", "Desc": "Pipe Rejects", "EvSel": 66, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxxxxxxxxxxxxxxxxxxxx1xxxxxx", }, "CHA.PIPE_REJECT.PRQ_PMM": { "Box": "CHA", "Category": "CHA PIPE REJECT Events", "Counters": "0-3", "Defn": "More Miscellaneous events in the Cbo.", "Desc": "Pipe Rejects", "EvSel": 66, "ExtSel": "", "Umask": "bx1xxxxxx", "UmaskExt": "bxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", }, "CHA.PIPE_REJECT.PMM_MEMMODE_TOR_MATCH": { "Box": "CHA", "Category": "CHA PIPE REJECT Events", "Counters": "0-3", "Defn": "More Miscellaneous events in the Cbo.", "Desc": "Pipe Rejects", "EvSel": 66, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxxxxxxxxxxxxxxxxxxxxxxx1xxx", }, "CHA.PIPE_REJECT.RMW_SETMATCH": { "Box": "CHA", "Category": "CHA PIPE REJECT Events", "Counters": "0-3", "Defn": "More Miscellaneous events in the Cbo.", "Desc": "Pipe Rejects", "EvSel": 66, "ExtSel": "", "Umask": "bxxxxxxx1", "UmaskExt": "bxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", }, "CHA.PIPE_REJECT.BLEGRCREDIT": { "Box": "CHA", "Category": "CHA PIPE REJECT Events", "Counters": "0-3", "Defn": "More Miscellaneous events in the Cbo.", "Desc": "Pipe Rejects", "EvSel": 66, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxxxxxxx1xxxxxxxxxxxxxxxxxxx", }, "CHA.PIPE_REJECT.GOTRACK_ALLWAYRSV": { "Box": "CHA", "Category": "CHA PIPE REJECT Events", "Counters": "0-3", "Defn": "More Miscellaneous events in the Cbo.", "Desc": "Pipe Rejects", "EvSel": 66, "ExtSel": "", "Umask": "bxxx1xxxx", "UmaskExt": "bxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", }, "CHA.PIPE_REJECT.ADEGRCREDIT": { "Box": "CHA", "Category": "CHA PIPE REJECT Events", "Counters": "0-3", "Defn": "More Miscellaneous events in the Cbo.", "Desc": "Pipe Rejects", "EvSel": 66, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxxxxxx1xxxxxxxxxxxxxxxxxxxx", }, "CHA.PIPE_REJECT.TOPA_MATCH": { "Box": "CHA", "Category": "CHA PIPE REJECT Events", "Counters": "0-3", "Defn": "More Miscellaneous events in the Cbo.", "Desc": "Pipe Rejects", "EvSel": 66, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxxxxxxxxx1xxxxxxxxxxxxxxxxx", }, "CHA.PIPE_REJECT.TORID_MATCH_GO_P": { "Box": "CHA", "Category": "CHA PIPE REJECT Events", "Counters": "0-3", "Defn": "More Miscellaneous events in the Cbo.", "Desc": "Pipe Rejects", "EvSel": 66, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxxxxxxxxxxxxxxxxxxxxxx1xxxx", }, "CHA.PIPE_REJECT.PTL_INPIPE": { "Box": "CHA", "Category": "CHA PIPE REJECT Events", "Counters": "0-3", "Defn": "More Miscellaneous events in the Cbo.", "Desc": "Pipe Rejects", "EvSel": 66, "ExtSel": "", "Umask": "b1xxxxxxx", "UmaskExt": "bxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", }, "CHA.PMM_MEMMODE_NM_INVITOX": { "Box": "CHA", "Category": "CHA HA PM MEMMODE Events", "EvSel": 101, "ExtSel": "", }, "CHA.PMM_MEMMODE_NM_INVITOX.SETCONFLICT": { "Box": "CHA", "Category": "CHA HA PM MEMMODE Events", "EvSel": 101, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.PMM_MEMMODE_NM_INVITOX.LOCAL": { "Box": "CHA", "Category": "CHA HA PM MEMMODE Events", "EvSel": 101, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.PMM_MEMMODE_NM_INVITOX.REMOTE": { "Box": "CHA", "Category": "CHA HA PM MEMMODE Events", "EvSel": 101, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.PMM_MEMMODE_NM_SETCONFLICTS": { "Box": "CHA", "Category": "CHA HA PM MEMMODE Events", "Desc": "PMM Memory Mode related events", "EvSel": 100, "ExtSel": "", }, "CHA.PMM_MEMMODE_NM_SETCONFLICTS.LLC": { "Box": "CHA", "Category": "CHA HA PM MEMMODE Events", "Desc": "PMM Memory Mode related events", "EvSel": 100, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.PMM_MEMMODE_NM_SETCONFLICTS.TOR": { "Box": "CHA", "Category": "CHA HA PM MEMMODE Events", "Desc": "PMM Memory Mode related events", "EvSel": 100, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.PMM_MEMMODE_NM_SETCONFLICTS.SF": { "Box": "CHA", "Category": "CHA HA PM MEMMODE Events", "Desc": "PMM Memory Mode related events", "EvSel": 100, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.PMM_MEMMODE_NM_SETCONFLICTS2": { "Box": "CHA", "Category": "CHA HA PM MEMMODE Events", "EvSel": 112, "ExtSel": "", }, "CHA.PMM_MEMMODE_NM_SETCONFLICTS2.IODC": { "Box": "CHA", "Category": "CHA HA PM MEMMODE Events", "EvSel": 112, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.PMM_MEMMODE_NM_SETCONFLICTS2.MEMWR": { "Box": "CHA", "Category": "CHA HA PM MEMMODE Events", "EvSel": 112, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.PMM_MEMMODE_NM_SETCONFLICTS2.MEMWRNI": { "Box": "CHA", "Category": "CHA HA PM MEMMODE Events", "EvSel": 112, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.PMM_QOS": { "Box": "CHA", "Category": "CHA HA PMM QOS Events", "EvSel": 102, "ExtSel": "", }, "CHA.PMM_QOS.REJ_IRQ": { "Box": "CHA", "Category": "CHA HA PMM QOS Events", "EvSel": 102, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.PMM_QOS.THROTTLE": { "Box": "CHA", "Category": "CHA HA PMM QOS Events", "EvSel": 102, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.PMM_QOS.THROTTLE_PRQ": { "Box": "CHA", "Category": "CHA HA PMM QOS Events", "EvSel": 102, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.PMM_QOS.SLOWTORQ_SKIP": { "Box": "CHA", "Category": "CHA HA PMM QOS Events", "EvSel": 102, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.PMM_QOS.SLOW_INSERT": { "Box": "CHA", "Category": "CHA HA PMM QOS Events", "EvSel": 102, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.PMM_QOS.THROTTLE_IRQ": { "Box": "CHA", "Category": "CHA HA PMM QOS Events", "EvSel": 102, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.PMM_QOS.DDR4_FAST_INSERT": { "Box": "CHA", "Category": "CHA HA PMM QOS Events", "EvSel": 102, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.PMM_QOS_OCCUPANCY": { "Box": "CHA", "Category": "CHA HA PMM QOS Events", "EvSel": 103, "ExtSel": "", }, "CHA.PMM_QOS_OCCUPANCY.DDR_FAST_FIFO": { "Box": "CHA", "Category": "CHA HA PMM QOS Events", "EvSel": 103, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.PMM_QOS_OCCUPANCY.DDR_SLOW_FIFO": { "Box": "CHA", "Category": "CHA HA PMM QOS Events", "EvSel": 103, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.READ_NO_CREDITS": { "Box": "CHA", "Category": "CHA MC Credit and Traffic Events", "Defn": "Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue.", "Desc": "CHA iMC CHNx READ Credits Empty", "EvSel": 88, "ExtSel": "", }, "CHA.READ_NO_CREDITS.MC4": { "Box": "CHA", "Category": "CHA MC Credit and Traffic Events", "Defn": "Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue.", "Desc": "CHA iMC CHNx READ Credits Empty", "EvSel": 88, "ExtSel": "", "Umask": "bxxx1xxxx", "UmaskExt": "bxxxxxxxx", }, "CHA.READ_NO_CREDITS.MC3": { "Box": "CHA", "Category": "CHA MC Credit and Traffic Events", "Defn": "Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue.", "Desc": "CHA iMC CHNx READ Credits Empty", "EvSel": 88, "ExtSel": "", "Umask": "bxxxx1xxx", "UmaskExt": "bxxxxxxxx", }, "CHA.READ_NO_CREDITS.MC13": { "Box": "CHA", "Category": "CHA MC Credit and Traffic Events", "Defn": "Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue.", "Desc": "CHA iMC CHNx READ Credits Empty", "EvSel": 88, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxx1xxxxx", }, "CHA.READ_NO_CREDITS.MC7": { "Box": "CHA", "Category": "CHA MC Credit and Traffic Events", "Defn": "Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue.", "Desc": "CHA iMC CHNx READ Credits Empty", "EvSel": 88, "ExtSel": "", "Umask": "b1xxxxxxx", "UmaskExt": "bxxxxxxxx", }, "CHA.READ_NO_CREDITS.MC10": { "Box": "CHA", "Category": "CHA MC Credit and Traffic Events", "Defn": "Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue.", "Desc": "CHA iMC CHNx READ Credits Empty", "EvSel": 88, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxx1xx", }, "CHA.READ_NO_CREDITS.MC8": { "Box": "CHA", "Category": "CHA MC Credit and Traffic Events", "Defn": "Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue.", "Desc": "CHA iMC CHNx READ Credits Empty", "EvSel": 88, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxx1", }, "CHA.READ_NO_CREDITS.MC5": { "Box": "CHA", "Category": "CHA MC Credit and Traffic Events", "Defn": "Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue.", "Desc": "CHA iMC CHNx READ Credits Empty", "EvSel": 88, "ExtSel": "", "Umask": "bxx1xxxxx", "UmaskExt": "bxxxxxxxx", }, "CHA.READ_NO_CREDITS.MC2": { "Box": "CHA", "Category": "CHA MC Credit and Traffic Events", "Defn": "Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue.", "Desc": "CHA iMC CHNx READ Credits Empty", "EvSel": 88, "ExtSel": "", "Umask": "bxxxxx1xx", "UmaskExt": "bxxxxxxxx", }, "CHA.READ_NO_CREDITS.MC9": { "Box": "CHA", "Category": "CHA MC Credit and Traffic Events", "Defn": "Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue.", "Desc": "CHA iMC CHNx READ Credits Empty", "EvSel": 88, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxx1x", }, "CHA.READ_NO_CREDITS.MC0": { "Box": "CHA", "Category": "CHA MC Credit and Traffic Events", "Defn": "Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue.", "Desc": "CHA iMC CHNx READ Credits Empty", "EvSel": 88, "ExtSel": "", "Umask": "bxxxxxxx1", "UmaskExt": "bxxxxxxxx", }, "CHA.READ_NO_CREDITS.MC1": { "Box": "CHA", "Category": "CHA MC Credit and Traffic Events", "Defn": "Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue.", "Desc": "CHA iMC CHNx READ Credits Empty", "EvSel": 88, "ExtSel": "", "Umask": "bxxxxxx1x", "UmaskExt": "bxxxxxxxx", }, "CHA.READ_NO_CREDITS.MC11": { "Box": "CHA", "Category": "CHA MC Credit and Traffic Events", "Defn": "Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue.", "Desc": "CHA iMC CHNx READ Credits Empty", "EvSel": 88, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxx1xxx", }, "CHA.READ_NO_CREDITS.MC12": { "Box": "CHA", "Category": "CHA MC Credit and Traffic Events", "Defn": "Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue.", "Desc": "CHA iMC CHNx READ Credits Empty", "EvSel": 88, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxx1xxxx", }, "CHA.READ_NO_CREDITS.MC6": { "Box": "CHA", "Category": "CHA MC Credit and Traffic Events", "Defn": "Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue.", "Desc": "CHA iMC CHNx READ Credits Empty", "EvSel": 88, "ExtSel": "", "Umask": "bx1xxxxxx", "UmaskExt": "bxxxxxxxx", }, "CHA.REQUESTS": { "Box": "CHA", "Category": "CHA HA REQUEST Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "HA Read and Write Requests", "EvSel": 80, "ExtSel": "", }, "CHA.REQUESTS.INVITOE_LOCAL": { "Box": "CHA", "Category": "CHA HA REQUEST Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "HA Read and Write Requests", "EvSel": 80, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.REQUESTS.WRITES_LOCAL": { "Box": "CHA", "Category": "CHA HA REQUEST Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "HA Read and Write Requests", "EvSel": 80, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.REQUESTS.INVITOE_REMOTE": { "Box": "CHA", "Category": "CHA HA REQUEST Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "HA Read and Write Requests", "EvSel": 80, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.REQUESTS.READS_REMOTE": { "Box": "CHA", "Category": "CHA HA REQUEST Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "HA Read and Write Requests", "EvSel": 80, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.REQUESTS.WRITES": { "Box": "CHA", "Category": "CHA HA REQUEST Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "HA Read and Write Requests", "EvSel": 80, "ExtSel": "", "Umask": "b00001100", }, "CHA.REQUESTS.READS_LOCAL": { "Box": "CHA", "Category": "CHA HA REQUEST Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "HA Read and Write Requests", "EvSel": 80, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.REQUESTS.INVITOE": { "Box": "CHA", "Category": "CHA HA REQUEST Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "HA Read and Write Requests", "EvSel": 80, "ExtSel": "", "Umask": "b00110000", }, "CHA.REQUESTS.WRITES_REMOTE": { "Box": "CHA", "Category": "CHA HA REQUEST Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "HA Read and Write Requests", "EvSel": 80, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.REQUESTS.READS": { "Box": "CHA", "Category": "CHA HA REQUEST Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "HA Read and Write Requests", "EvSel": 80, "ExtSel": "", "Umask": "b00000011", }, "CHA.RING_BOUNCES_HORZ": { "Box": "CHA", "Category": "CHA Horizontal RING Events", "Defn": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Horizontal Ring.", "EvSel": 172, "ExtSel": "", }, "CHA.RING_BOUNCES_HORZ.AK": { "Box": "CHA", "Category": "CHA Horizontal RING Events", "Defn": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Horizontal Ring.", "EvSel": 172, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.RING_BOUNCES_HORZ.BL": { "Box": "CHA", "Category": "CHA Horizontal RING Events", "Defn": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Horizontal Ring.", "EvSel": 172, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.RING_BOUNCES_HORZ.IV": { "Box": "CHA", "Category": "CHA Horizontal RING Events", "Defn": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Horizontal Ring.", "EvSel": 172, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.RING_BOUNCES_HORZ.AD": { "Box": "CHA", "Category": "CHA Horizontal RING Events", "Defn": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Horizontal Ring.", "EvSel": 172, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.RING_BOUNCES_VERT": { "Box": "CHA", "Category": "CHA Vertical RING Events", "Defn": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Vertical Ring.", "EvSel": 170, "ExtSel": "", "MaxIncCyc": 2, }, "CHA.RING_BOUNCES_VERT.AD": { "Box": "CHA", "Category": "CHA Vertical RING Events", "Defn": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Vertical Ring.", "EvSel": 170, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxxxxx1", }, "CHA.RING_BOUNCES_VERT.IV": { "Box": "CHA", "Category": "CHA Vertical RING Events", "Defn": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Vertical Ring.", "EvSel": 170, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxx1xxx", }, "CHA.RING_BOUNCES_VERT.BL": { "Box": "CHA", "Category": "CHA Vertical RING Events", "Defn": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Vertical Ring.", "EvSel": 170, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxxx1xx", }, "CHA.RING_BOUNCES_VERT.AKC": { "Box": "CHA", "Category": "CHA Vertical RING Events", "Defn": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Vertical Ring.", "EvSel": 170, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxx1xxxx", }, "CHA.RING_BOUNCES_VERT.AK": { "Box": "CHA", "Category": "CHA Vertical RING Events", "Defn": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Vertical Ring.", "EvSel": 170, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxxxx1x", }, "CHA.RING_SINK_STARVED_HORZ": { "Box": "CHA", "Category": "CHA Horizontal RING Events", "Desc": "Sink Starvation on Horizontal Ring", "EvSel": 173, "ExtSel": "", }, "CHA.RING_SINK_STARVED_HORZ.AD": { "Box": "CHA", "Category": "CHA Horizontal RING Events", "Desc": "Sink Starvation on Horizontal Ring", "EvSel": 173, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.RING_SINK_STARVED_HORZ.IV": { "Box": "CHA", "Category": "CHA Horizontal RING Events", "Desc": "Sink Starvation on Horizontal Ring", "EvSel": 173, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.RING_SINK_STARVED_HORZ.BL": { "Box": "CHA", "Category": "CHA Horizontal RING Events", "Desc": "Sink Starvation on Horizontal Ring", "EvSel": 173, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.RING_SINK_STARVED_HORZ.AK_AG1": { "Box": "CHA", "Category": "CHA Horizontal RING Events", "Desc": "Sink Starvation on Horizontal Ring", "EvSel": 173, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.RING_SINK_STARVED_HORZ.AK": { "Box": "CHA", "Category": "CHA Horizontal RING Events", "Desc": "Sink Starvation on Horizontal Ring", "EvSel": 173, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.RING_SINK_STARVED_VERT": { "Box": "CHA", "Category": "CHA Vertical RING Events", "Desc": "Sink Starvation on Vertical Ring", "EvSel": 171, "ExtSel": "", }, "CHA.RING_SINK_STARVED_VERT.AKC": { "Box": "CHA", "Category": "CHA Vertical RING Events", "Desc": "Sink Starvation on Vertical Ring", "EvSel": 171, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.RING_SINK_STARVED_VERT.AK": { "Box": "CHA", "Category": "CHA Vertical RING Events", "Desc": "Sink Starvation on Vertical Ring", "EvSel": 171, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.RING_SINK_STARVED_VERT.IV": { "Box": "CHA", "Category": "CHA Vertical RING Events", "Desc": "Sink Starvation on Vertical Ring", "EvSel": 171, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.RING_SINK_STARVED_VERT.BL": { "Box": "CHA", "Category": "CHA Vertical RING Events", "Desc": "Sink Starvation on Vertical Ring", "EvSel": 171, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.RING_SINK_STARVED_VERT.AD": { "Box": "CHA", "Category": "CHA Vertical RING Events", "Desc": "Sink Starvation on Vertical Ring", "EvSel": 171, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.RING_SRC_THRTL": { "Box": "CHA", "Category": "CHA Horizontal RING Events", "Desc": "Source Throttle", "EvSel": 174, "ExtSel": "", }, "CHA.RxC_INSERTS": { "Box": "CHA", "Category": "CHA INGRESS Events", "Counters": "0-3", "Defn": "Counts number of allocations per cycle into the specified Ingress queue.", "Desc": "Ingress (from CMS) Allocations", "EvSel": 19, "ExtSel": "", "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", }, "CHA.RxC_INSERTS.RRQ": { "Box": "CHA", "Category": "CHA INGRESS Events", "Counters": "0-3", "Defn": "Counts number of allocations per cycle into the specified Ingress queue.", "Desc": "Ingress (from CMS) Allocations", "EvSel": 19, "ExtSel": "", "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "Umask": "bx1xxxxxx", }, "CHA.RxC_INSERTS.IPQ": { "Box": "CHA", "Category": "CHA INGRESS Events", "Counters": "0-3", "Defn": "Counts number of allocations per cycle into the specified Ingress queue.", "Desc": "Ingress (from CMS) Allocations", "EvSel": 19, "ExtSel": "", "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "Umask": "bxxxxx1xx", }, "CHA.RxC_INSERTS.PRQ": { "Box": "CHA", "Category": "CHA INGRESS Events", "Counters": "0-3", "Defn": "Counts number of allocations per cycle into the specified Ingress queue.", "Desc": "Ingress (from CMS) Allocations", "EvSel": 19, "ExtSel": "", "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "Umask": "bxxx1xxxx", }, "CHA.RxC_INSERTS.PRQ_REJ": { "Box": "CHA", "Category": "CHA INGRESS Events", "Counters": "0-3", "Defn": "Counts number of allocations per cycle into the specified Ingress queue.", "Desc": "Ingress (from CMS) Allocations", "EvSel": 19, "ExtSel": "", "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "Umask": "bxx1xxxxx", }, "CHA.RxC_INSERTS.IRQ": { "Box": "CHA", "Category": "CHA INGRESS Events", "Counters": "0-3", "Defn": "Counts number of allocations per cycle into the specified Ingress queue.", "Desc": "Ingress (from CMS) Allocations", "EvSel": 19, "ExtSel": "", "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "Umask": "bxxxxxxx1", }, "CHA.RxC_INSERTS.IRQ_REJ": { "Box": "CHA", "Category": "CHA INGRESS Events", "Counters": "0-3", "Defn": "Counts number of allocations per cycle into the specified Ingress queue.", "Desc": "Ingress (from CMS) Allocations", "EvSel": 19, "ExtSel": "", "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "Umask": "bxxxxxx1x", }, "CHA.RxC_INSERTS.WBQ": { "Box": "CHA", "Category": "CHA INGRESS Events", "Counters": "0-3", "Defn": "Counts number of allocations per cycle into the specified Ingress queue.", "Desc": "Ingress (from CMS) Allocations", "EvSel": 19, "ExtSel": "", "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "Umask": "b1xxxxxxx", }, "CHA.RxC_IPQ0_REJECT": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "IPQ Requests (from CMS) Rejected - Set 0", "EvSel": 34, "ExtSel": "", }, "CHA.RxC_IPQ0_REJECT.AD_REQ_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "IPQ Requests (from CMS) Rejected - Set 0", "EvSel": 34, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.RxC_IPQ0_REJECT.AD_RSP_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "IPQ Requests (from CMS) Rejected - Set 0", "EvSel": 34, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.RxC_IPQ0_REJECT.AK_NON_UPI": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "IPQ Requests (from CMS) Rejected - Set 0", "EvSel": 34, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.RxC_IPQ0_REJECT.BL_RSP_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "IPQ Requests (from CMS) Rejected - Set 0", "EvSel": 34, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.RxC_IPQ0_REJECT.BL_NCS_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "IPQ Requests (from CMS) Rejected - Set 0", "EvSel": 34, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.RxC_IPQ0_REJECT.BL_NCB_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "IPQ Requests (from CMS) Rejected - Set 0", "EvSel": 34, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.RxC_IPQ0_REJECT.BL_WB_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "IPQ Requests (from CMS) Rejected - Set 0", "EvSel": 34, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.RxC_IPQ0_REJECT.IV_NON_UPI": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "IPQ Requests (from CMS) Rejected - Set 0", "EvSel": 34, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.RxC_IPQ1_REJECT": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "IPQ Requests (from CMS) Rejected - Set 1", "EvSel": 35, "ExtSel": "", }, "CHA.RxC_IPQ1_REJECT.PA_MATCH": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "IPQ Requests (from CMS) Rejected - Set 1", "EvSel": 35, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.RxC_IPQ1_REJECT.ANY0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "IPQ Requests (from CMS) Rejected - Set 1", "EvSel": 35, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.RxC_IPQ1_REJECT.VICTIM": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "IPQ Requests (from CMS) Rejected - Set 1", "EvSel": 35, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.RxC_IPQ1_REJECT.LLC_VICTIM": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "IPQ Requests (from CMS) Rejected - Set 1", "EvSel": 35, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.RxC_IPQ1_REJECT.LLC_OR_SF_WAY": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "IPQ Requests (from CMS) Rejected - Set 1", "EvSel": 35, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.RxC_IPQ1_REJECT.SF_VICTIM": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "IPQ Requests (from CMS) Rejected - Set 1", "EvSel": 35, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.RxC_IPQ1_REJECT.HA": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "IPQ Requests (from CMS) Rejected - Set 1", "EvSel": 35, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.RxC_IPQ1_REJECT.ALLOW_SNP": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "IPQ Requests (from CMS) Rejected - Set 1", "EvSel": 35, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.RxC_IRQ0_REJECT": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "IRQ Requests (from CMS) Rejected - Set 0", "EvSel": 24, "ExtSel": "", }, "CHA.RxC_IRQ0_REJECT.BL_WB_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "IRQ Requests (from CMS) Rejected - Set 0", "EvSel": 24, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.RxC_IRQ0_REJECT.IV_NON_UPI": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "IRQ Requests (from CMS) Rejected - Set 0", "EvSel": 24, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.RxC_IRQ0_REJECT.BL_NCB_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "IRQ Requests (from CMS) Rejected - Set 0", "EvSel": 24, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.RxC_IRQ0_REJECT.AK_NON_UPI": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "IRQ Requests (from CMS) Rejected - Set 0", "EvSel": 24, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.RxC_IRQ0_REJECT.BL_RSP_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "IRQ Requests (from CMS) Rejected - Set 0", "EvSel": 24, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.RxC_IRQ0_REJECT.BL_NCS_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "IRQ Requests (from CMS) Rejected - Set 0", "EvSel": 24, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.RxC_IRQ0_REJECT.AD_REQ_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "IRQ Requests (from CMS) Rejected - Set 0", "EvSel": 24, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.RxC_IRQ0_REJECT.AD_RSP_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "IRQ Requests (from CMS) Rejected - Set 0", "EvSel": 24, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.RxC_IRQ1_REJECT": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "IRQ Requests (from CMS) Rejected - Set 1", "EvSel": 25, "ExtSel": "", }, "CHA.RxC_IRQ1_REJECT.PA_MATCH": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "IRQ Requests (from CMS) Rejected - Set 1", "EvSel": 25, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.RxC_IRQ1_REJECT.ANY0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "IRQ Requests (from CMS) Rejected - Set 1", "EvSel": 25, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.RxC_IRQ1_REJECT.VICTIM": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "IRQ Requests (from CMS) Rejected - Set 1", "EvSel": 25, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.RxC_IRQ1_REJECT.LLC_VICTIM": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "IRQ Requests (from CMS) Rejected - Set 1", "EvSel": 25, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.RxC_IRQ1_REJECT.LLC_OR_SF_WAY": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "IRQ Requests (from CMS) Rejected - Set 1", "EvSel": 25, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.RxC_IRQ1_REJECT.SF_VICTIM": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "IRQ Requests (from CMS) Rejected - Set 1", "EvSel": 25, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.RxC_IRQ1_REJECT.HA": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "IRQ Requests (from CMS) Rejected - Set 1", "EvSel": 25, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.RxC_IRQ1_REJECT.ALLOW_SNP": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "IRQ Requests (from CMS) Rejected - Set 1", "EvSel": 25, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.RxC_ISMQ0_REJECT": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Rejects - Set 0", "EvSel": 36, "ExtSel": "", }, "CHA.RxC_ISMQ0_REJECT.IV_NON_UPI": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Rejects - Set 0", "EvSel": 36, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.RxC_ISMQ0_REJECT.BL_WB_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Rejects - Set 0", "EvSel": 36, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.RxC_ISMQ0_REJECT.BL_NCB_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Rejects - Set 0", "EvSel": 36, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.RxC_ISMQ0_REJECT.BL_NCS_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Rejects - Set 0", "EvSel": 36, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.RxC_ISMQ0_REJECT.AK_NON_UPI": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Rejects - Set 0", "EvSel": 36, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.RxC_ISMQ0_REJECT.BL_RSP_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Rejects - Set 0", "EvSel": 36, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.RxC_ISMQ0_REJECT.AD_RSP_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Rejects - Set 0", "EvSel": 36, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.RxC_ISMQ0_REJECT.AD_REQ_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Rejects - Set 0", "EvSel": 36, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.RxC_ISMQ0_RETRY": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries - Set 0", "EvSel": 44, "ExtSel": "", }, "CHA.RxC_ISMQ0_RETRY.BL_WB_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries - Set 0", "EvSel": 44, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.RxC_ISMQ0_RETRY.IV_NON_UPI": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries - Set 0", "EvSel": 44, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.RxC_ISMQ0_RETRY.BL_NCB_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries - Set 0", "EvSel": 44, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.RxC_ISMQ0_RETRY.AK_NON_UPI": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries - Set 0", "EvSel": 44, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.RxC_ISMQ0_RETRY.BL_RSP_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries - Set 0", "EvSel": 44, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.RxC_ISMQ0_RETRY.BL_NCS_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries - Set 0", "EvSel": 44, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.RxC_ISMQ0_RETRY.AD_REQ_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries - Set 0", "EvSel": 44, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.RxC_ISMQ0_RETRY.AD_RSP_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries - Set 0", "EvSel": 44, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.RxC_ISMQ1_REJECT": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Rejects - Set 1", "EvSel": 37, "ExtSel": "", }, "CHA.RxC_ISMQ1_REJECT.ANY0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Rejects - Set 1", "EvSel": 37, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.RxC_ISMQ1_REJECT.HA": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Rejects - Set 1", "EvSel": 37, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.RxC_ISMQ1_RETRY": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries - Set 1", "EvSel": 45, "ExtSel": "", }, "CHA.RxC_ISMQ1_RETRY.HA": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries - Set 1", "EvSel": 45, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.RxC_ISMQ1_RETRY.ANY0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries - Set 1", "EvSel": 45, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.RxC_OCCUPANCY": { "Box": "CHA", "Category": "CHA INGRESS Events", "Counters": 0, "Defn": "Counts number of entries in the specified Ingress queue in each cycle.", "Desc": "Ingress (from CMS) Occupancy", "EvSel": 17, "ExtSel": "", "MaxIncCyc": 20, "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", }, "CHA.RxC_OCCUPANCY.IRQ": { "Box": "CHA", "Category": "CHA INGRESS Events", "Counters": 0, "Defn": "Counts number of entries in the specified Ingress queue in each cycle.", "Desc": "Ingress (from CMS) Occupancy", "EvSel": 17, "ExtSel": "", "MaxIncCyc": 20, "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "Umask": "b00000001", }, "CHA.RxC_OCCUPANCY.WBQ": { "Box": "CHA", "Category": "CHA INGRESS Events", "Counters": 0, "Defn": "Counts number of entries in the specified Ingress queue in each cycle.", "Desc": "Ingress (from CMS) Occupancy", "EvSel": 17, "ExtSel": "", "MaxIncCyc": 20, "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "Umask": "b10000000", }, "CHA.RxC_OCCUPANCY.RRQ": { "Box": "CHA", "Category": "CHA INGRESS Events", "Counters": 0, "Defn": "Counts number of entries in the specified Ingress queue in each cycle.", "Desc": "Ingress (from CMS) Occupancy", "EvSel": 17, "ExtSel": "", "MaxIncCyc": 20, "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "Umask": "b01000000", }, "CHA.RxC_OCCUPANCY.IPQ": { "Box": "CHA", "Category": "CHA INGRESS Events", "Counters": 0, "Defn": "Counts number of entries in the specified Ingress queue in each cycle.", "Desc": "Ingress (from CMS) Occupancy", "EvSel": 17, "ExtSel": "", "MaxIncCyc": 20, "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "Umask": "b00000100", }, "CHA.RxC_OTHER0_RETRY": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)", "Desc": "Other Retries - Set 0", "EvSel": 46, "ExtSel": "", }, "CHA.RxC_OTHER0_RETRY.BL_NCB_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)", "Desc": "Other Retries - Set 0", "EvSel": 46, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.RxC_OTHER0_RETRY.BL_WB_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)", "Desc": "Other Retries - Set 0", "EvSel": 46, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.RxC_OTHER0_RETRY.IV_NON_UPI": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)", "Desc": "Other Retries - Set 0", "EvSel": 46, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.RxC_OTHER0_RETRY.AD_REQ_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)", "Desc": "Other Retries - Set 0", "EvSel": 46, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.RxC_OTHER0_RETRY.AD_RSP_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)", "Desc": "Other Retries - Set 0", "EvSel": 46, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.RxC_OTHER0_RETRY.AK_NON_UPI": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)", "Desc": "Other Retries - Set 0", "EvSel": 46, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.RxC_OTHER0_RETRY.BL_RSP_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)", "Desc": "Other Retries - Set 0", "EvSel": 46, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.RxC_OTHER0_RETRY.BL_NCS_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)", "Desc": "Other Retries - Set 0", "EvSel": 46, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.RxC_OTHER1_RETRY": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)", "Desc": "Other Retries - Set 1", "EvSel": 47, "ExtSel": "", }, "CHA.RxC_OTHER1_RETRY.PA_MATCH": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)", "Desc": "Other Retries - Set 1", "EvSel": 47, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.RxC_OTHER1_RETRY.ANY0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)", "Desc": "Other Retries - Set 1", "EvSel": 47, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.RxC_OTHER1_RETRY.VICTIM": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)", "Desc": "Other Retries - Set 1", "EvSel": 47, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.RxC_OTHER1_RETRY.LLC_VICTIM": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)", "Desc": "Other Retries - Set 1", "EvSel": 47, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.RxC_OTHER1_RETRY.LLC_OR_SF_WAY": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)", "Desc": "Other Retries - Set 1", "EvSel": 47, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.RxC_OTHER1_RETRY.SF_VICTIM": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)", "Desc": "Other Retries - Set 1", "EvSel": 47, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.RxC_OTHER1_RETRY.HA": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)", "Desc": "Other Retries - Set 1", "EvSel": 47, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.RxC_OTHER1_RETRY.ALLOW_SNP": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)", "Desc": "Other Retries - Set 1", "EvSel": 47, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.RxC_PRQ0_REJECT": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "PRQ Requests (from CMS) Rejected - Set 0", "EvSel": 32, "ExtSel": "", }, "CHA.RxC_PRQ0_REJECT.BL_WB_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "PRQ Requests (from CMS) Rejected - Set 0", "EvSel": 32, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.RxC_PRQ0_REJECT.IV_NON_UPI": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "PRQ Requests (from CMS) Rejected - Set 0", "EvSel": 32, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.RxC_PRQ0_REJECT.BL_NCB_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "PRQ Requests (from CMS) Rejected - Set 0", "EvSel": 32, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.RxC_PRQ0_REJECT.AK_NON_UPI": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "PRQ Requests (from CMS) Rejected - Set 0", "EvSel": 32, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.RxC_PRQ0_REJECT.BL_RSP_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "PRQ Requests (from CMS) Rejected - Set 0", "EvSel": 32, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.RxC_PRQ0_REJECT.BL_NCS_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "PRQ Requests (from CMS) Rejected - Set 0", "EvSel": 32, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.RxC_PRQ0_REJECT.AD_REQ_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "PRQ Requests (from CMS) Rejected - Set 0", "EvSel": 32, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.RxC_PRQ0_REJECT.AD_RSP_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "PRQ Requests (from CMS) Rejected - Set 0", "EvSel": 32, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.RxC_PRQ1_REJECT": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "PRQ Requests (from CMS) Rejected - Set 1", "EvSel": 33, "ExtSel": "", }, "CHA.RxC_PRQ1_REJECT.ALLOW_SNP": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "PRQ Requests (from CMS) Rejected - Set 1", "EvSel": 33, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.RxC_PRQ1_REJECT.SF_VICTIM": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "PRQ Requests (from CMS) Rejected - Set 1", "EvSel": 33, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.RxC_PRQ1_REJECT.HA": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "PRQ Requests (from CMS) Rejected - Set 1", "EvSel": 33, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.RxC_PRQ1_REJECT.LLC_OR_SF_WAY": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "PRQ Requests (from CMS) Rejected - Set 1", "EvSel": 33, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.RxC_PRQ1_REJECT.LLC_VICTIM": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "PRQ Requests (from CMS) Rejected - Set 1", "EvSel": 33, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.RxC_PRQ1_REJECT.VICTIM": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "PRQ Requests (from CMS) Rejected - Set 1", "EvSel": 33, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.RxC_PRQ1_REJECT.ANY0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "PRQ Requests (from CMS) Rejected - Set 1", "EvSel": 33, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.RxC_PRQ1_REJECT.PA_MATCH": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "PRQ Requests (from CMS) Rejected - Set 1", "EvSel": 33, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.RxC_REQ_Q0_RETRY": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "\"REQUESTQ\" includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)", "Desc": "Request Queue Retries - Set 0", "EvSel": 42, "ExtSel": "", }, "CHA.RxC_REQ_Q0_RETRY.BL_NCB_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "\"REQUESTQ\" includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)", "Desc": "Request Queue Retries - Set 0", "EvSel": 42, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.RxC_REQ_Q0_RETRY.BL_WB_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "\"REQUESTQ\" includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)", "Desc": "Request Queue Retries - Set 0", "EvSel": 42, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.RxC_REQ_Q0_RETRY.IV_NON_UPI": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "\"REQUESTQ\" includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)", "Desc": "Request Queue Retries - Set 0", "EvSel": 42, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.RxC_REQ_Q0_RETRY.AD_REQ_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "\"REQUESTQ\" includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)", "Desc": "Request Queue Retries - Set 0", "EvSel": 42, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.RxC_REQ_Q0_RETRY.AD_RSP_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "\"REQUESTQ\" includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)", "Desc": "Request Queue Retries - Set 0", "EvSel": 42, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.RxC_REQ_Q0_RETRY.AK_NON_UPI": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "\"REQUESTQ\" includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)", "Desc": "Request Queue Retries - Set 0", "EvSel": 42, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.RxC_REQ_Q0_RETRY.BL_RSP_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "\"REQUESTQ\" includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)", "Desc": "Request Queue Retries - Set 0", "EvSel": 42, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.RxC_REQ_Q0_RETRY.BL_NCS_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "\"REQUESTQ\" includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)", "Desc": "Request Queue Retries - Set 0", "EvSel": 42, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.RxC_REQ_Q1_RETRY": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "\"REQUESTQ\" includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)", "Desc": "Request Queue Retries - Set 1", "EvSel": 43, "ExtSel": "", }, "CHA.RxC_REQ_Q1_RETRY.LLC_OR_SF_WAY": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "\"REQUESTQ\" includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)", "Desc": "Request Queue Retries - Set 1", "EvSel": 43, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.RxC_REQ_Q1_RETRY.SF_VICTIM": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "\"REQUESTQ\" includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)", "Desc": "Request Queue Retries - Set 1", "EvSel": 43, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.RxC_REQ_Q1_RETRY.HA": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "\"REQUESTQ\" includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)", "Desc": "Request Queue Retries - Set 1", "EvSel": 43, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.RxC_REQ_Q1_RETRY.ALLOW_SNP": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "\"REQUESTQ\" includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)", "Desc": "Request Queue Retries - Set 1", "EvSel": 43, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.RxC_REQ_Q1_RETRY.PA_MATCH": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "\"REQUESTQ\" includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)", "Desc": "Request Queue Retries - Set 1", "EvSel": 43, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.RxC_REQ_Q1_RETRY.ANY0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "\"REQUESTQ\" includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)", "Desc": "Request Queue Retries - Set 1", "EvSel": 43, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.RxC_REQ_Q1_RETRY.VICTIM": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "\"REQUESTQ\" includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)", "Desc": "Request Queue Retries - Set 1", "EvSel": 43, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.RxC_REQ_Q1_RETRY.LLC_VICTIM": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "\"REQUESTQ\" includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)", "Desc": "Request Queue Retries - Set 1", "EvSel": 43, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.RxC_RRQ0_REJECT": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.", "Desc": "RRQ Rejects - Set 0", "EvSel": 38, "ExtSel": "", }, "CHA.RxC_RRQ0_REJECT.IV_NON_UPI": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.", "Desc": "RRQ Rejects - Set 0", "EvSel": 38, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.RxC_RRQ0_REJECT.BL_WB_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.", "Desc": "RRQ Rejects - Set 0", "EvSel": 38, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.RxC_RRQ0_REJECT.BL_NCB_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.", "Desc": "RRQ Rejects - Set 0", "EvSel": 38, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.RxC_RRQ0_REJECT.BL_NCS_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.", "Desc": "RRQ Rejects - Set 0", "EvSel": 38, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.RxC_RRQ0_REJECT.AK_NON_UPI": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.", "Desc": "RRQ Rejects - Set 0", "EvSel": 38, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.RxC_RRQ0_REJECT.BL_RSP_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.", "Desc": "RRQ Rejects - Set 0", "EvSel": 38, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.RxC_RRQ0_REJECT.AD_RSP_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.", "Desc": "RRQ Rejects - Set 0", "EvSel": 38, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.RxC_RRQ0_REJECT.AD_REQ_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.", "Desc": "RRQ Rejects - Set 0", "EvSel": 38, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.RxC_RRQ1_REJECT": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.", "Desc": "RRQ Rejects - Set 1", "EvSel": 39, "ExtSel": "", }, "CHA.RxC_RRQ1_REJECT.LLC_OR_SF_WAY": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.", "Desc": "RRQ Rejects - Set 1", "EvSel": 39, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.RxC_RRQ1_REJECT.SF_VICTIM": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.", "Desc": "RRQ Rejects - Set 1", "EvSel": 39, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.RxC_RRQ1_REJECT.HA": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.", "Desc": "RRQ Rejects - Set 1", "EvSel": 39, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.RxC_RRQ1_REJECT.ALLOW_SNP": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.", "Desc": "RRQ Rejects - Set 1", "EvSel": 39, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.RxC_RRQ1_REJECT.PA_MATCH": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.", "Desc": "RRQ Rejects - Set 1", "EvSel": 39, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.RxC_RRQ1_REJECT.ANY0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.", "Desc": "RRQ Rejects - Set 1", "EvSel": 39, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.RxC_RRQ1_REJECT.VICTIM": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.", "Desc": "RRQ Rejects - Set 1", "EvSel": 39, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.RxC_RRQ1_REJECT.LLC_VICTIM": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.", "Desc": "RRQ Rejects - Set 1", "EvSel": 39, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.RxC_WBQ0_REJECT": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.", "Desc": "WBQ Rejects - Set 0", "EvSel": 40, "ExtSel": "", }, "CHA.RxC_WBQ0_REJECT.AD_RSP_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.", "Desc": "WBQ Rejects - Set 0", "EvSel": 40, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.RxC_WBQ0_REJECT.AD_REQ_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.", "Desc": "WBQ Rejects - Set 0", "EvSel": 40, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.RxC_WBQ0_REJECT.BL_NCS_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.", "Desc": "WBQ Rejects - Set 0", "EvSel": 40, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.RxC_WBQ0_REJECT.AK_NON_UPI": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.", "Desc": "WBQ Rejects - Set 0", "EvSel": 40, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.RxC_WBQ0_REJECT.BL_RSP_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.", "Desc": "WBQ Rejects - Set 0", "EvSel": 40, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.RxC_WBQ0_REJECT.BL_NCB_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.", "Desc": "WBQ Rejects - Set 0", "EvSel": 40, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.RxC_WBQ0_REJECT.IV_NON_UPI": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.", "Desc": "WBQ Rejects - Set 0", "EvSel": 40, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.RxC_WBQ0_REJECT.BL_WB_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.", "Desc": "WBQ Rejects - Set 0", "EvSel": 40, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.RxC_WBQ1_REJECT": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.", "Desc": "WBQ Rejects - Set 1", "EvSel": 41, "ExtSel": "", }, "CHA.RxC_WBQ1_REJECT.LLC_OR_SF_WAY": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.", "Desc": "WBQ Rejects - Set 1", "EvSel": 41, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.RxC_WBQ1_REJECT.ALLOW_SNP": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.", "Desc": "WBQ Rejects - Set 1", "EvSel": 41, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.RxC_WBQ1_REJECT.SF_VICTIM": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.", "Desc": "WBQ Rejects - Set 1", "EvSel": 41, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.RxC_WBQ1_REJECT.HA": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.", "Desc": "WBQ Rejects - Set 1", "EvSel": 41, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.RxC_WBQ1_REJECT.ANY0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.", "Desc": "WBQ Rejects - Set 1", "EvSel": 41, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.RxC_WBQ1_REJECT.PA_MATCH": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.", "Desc": "WBQ Rejects - Set 1", "EvSel": 41, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.RxC_WBQ1_REJECT.LLC_VICTIM": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.", "Desc": "WBQ Rejects - Set 1", "EvSel": 41, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.RxC_WBQ1_REJECT.VICTIM": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.", "Desc": "WBQ Rejects - Set 1", "EvSel": 41, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.RxR_BUSY_STARVED": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority", "Desc": "Transgress Injection Starvation", "EvSel": 229, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. Selecting multiple ring types NOT supported", }, "CHA.RxR_BUSY_STARVED.BL_ALL": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority", "Desc": "Transgress Injection Starvation", "EvSel": 229, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. Selecting multiple ring types NOT supported", "Umask": "b01000100", }, "CHA.RxR_BUSY_STARVED.AD_UNCRD": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority", "Desc": "Transgress Injection Starvation", "EvSel": 229, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. Selecting multiple ring types NOT supported", "Umask": "b00000001", }, "CHA.RxR_BUSY_STARVED.AD_CRD": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority", "Desc": "Transgress Injection Starvation", "EvSel": 229, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. Selecting multiple ring types NOT supported", "Umask": "b00010000", }, "CHA.RxR_BUSY_STARVED.BL_CRD": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority", "Desc": "Transgress Injection Starvation", "EvSel": 229, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. Selecting multiple ring types NOT supported", "Umask": "b01000000", }, "CHA.RxR_BUSY_STARVED.BL_UNCRD": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority", "Desc": "Transgress Injection Starvation", "EvSel": 229, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. Selecting multiple ring types NOT supported", "Umask": "b00000100", }, "CHA.RxR_BUSY_STARVED.AD_ALL": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority", "Desc": "Transgress Injection Starvation", "EvSel": 229, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. Selecting multiple ring types NOT supported", "Umask": "b00010001", }, "CHA.RxR_BYPASS": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 226, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", }, "CHA.RxR_BYPASS.AD_CRD": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 226, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00010000", }, "CHA.RxR_BYPASS.BL_ALL": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 226, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b01000100", }, "CHA.RxR_BYPASS.AK": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 226, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00000010", }, "CHA.RxR_BYPASS.AD_ALL": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 226, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00010001", }, "CHA.RxR_BYPASS.BL_CRD": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 226, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b01000000", }, "CHA.RxR_BYPASS.AD_UNCRD": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 226, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00000001", }, "CHA.RxR_BYPASS.BL_UNCRD": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 226, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00000100", }, "CHA.RxR_BYPASS.AKC_UNCRD": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 226, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b10000000", }, "CHA.RxR_BYPASS.IV": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 226, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00001000", }, "CHA.RxR_CRD_STARVED": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 227, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", }, "CHA.RxR_CRD_STARVED.AK": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 227, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "b00000010", }, "CHA.RxR_CRD_STARVED.AD_ALL": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 227, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "b00010001", }, "CHA.RxR_CRD_STARVED.IFV": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 227, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "b10000000", }, "CHA.RxR_CRD_STARVED.BL_CRD": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 227, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "b01000000", }, "CHA.RxR_CRD_STARVED.AD_CRD": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 227, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "b00010000", }, "CHA.RxR_CRD_STARVED.BL_ALL": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 227, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "b01000100", }, "CHA.RxR_CRD_STARVED.BL_UNCRD": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 227, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "b00000100", }, "CHA.RxR_CRD_STARVED.IV": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 227, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "b00001000", }, "CHA.RxR_CRD_STARVED.AD_UNCRD": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 227, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "b00000001", }, "CHA.RxR_CRD_STARVED_1": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 228, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", }, "CHA.RxR_INSERTS": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 225, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", }, "CHA.RxR_INSERTS.AD_UNCRD": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 225, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00000001", }, "CHA.RxR_INSERTS.BL_UNCRD": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 225, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00000100", }, "CHA.RxR_INSERTS.AKC_UNCRD": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 225, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b10000000", }, "CHA.RxR_INSERTS.IV": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 225, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00001000", }, "CHA.RxR_INSERTS.BL_ALL": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 225, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b01000100", }, "CHA.RxR_INSERTS.AD_CRD": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 225, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00010000", }, "CHA.RxR_INSERTS.BL_CRD": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 225, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b01000000", }, "CHA.RxR_INSERTS.AK": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 225, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00000010", }, "CHA.RxR_INSERTS.AD_ALL": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 225, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00010001", }, "CHA.RxR_OCCUPANCY": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 224, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", }, "CHA.RxR_OCCUPANCY.BL_CRD": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 224, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00100000", }, "CHA.RxR_OCCUPANCY.AD_ALL": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 224, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00010001", }, "CHA.RxR_OCCUPANCY.AK": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 224, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00000010", }, "CHA.RxR_OCCUPANCY.BL_ALL": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 224, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b01000100", }, "CHA.RxR_OCCUPANCY.AD_CRD": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 224, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00010000", }, "CHA.RxR_OCCUPANCY.AKC_UNCRD": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 224, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b10000000", }, "CHA.RxR_OCCUPANCY.BL_UNCRD": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 224, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00000100", }, "CHA.RxR_OCCUPANCY.IV": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 224, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00001000", }, "CHA.RxR_OCCUPANCY.AD_UNCRD": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 224, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00000001", }, "CHA.SF_EVICTION": { "Box": "CHA", "Category": "CHA CACHE Events", "Defn": "Counts number of times a snoop filter entry was evicted, due to lack of space, and replaced with a new entry.", "Desc": "Snoop Filter Capacity Evictions", "EvSel": 61, "ExtSel": "", "Notes": "For cache lines this CHA has some responsibility for managing, the snoop filter tracks their state in the Cores. Does not count clean evictions", }, "CHA.SF_EVICTION.S_STATE": { "Box": "CHA", "Category": "CHA CACHE Events", "Defn": "Counts number of times a snoop filter entry was evicted, due to lack of space, and replaced with a new entry.", "Desc": "Snoop Filter Capacity Evictions", "EvSel": 61, "ExtSel": "", "Notes": "For cache lines this CHA has some responsibility for managing, the snoop filter tracks their state in the Cores. Does not count clean evictions", "Umask": "bxxxxx1xx", }, "CHA.SF_EVICTION.M_STATE": { "Box": "CHA", "Category": "CHA CACHE Events", "Defn": "Counts number of times a snoop filter entry was evicted, due to lack of space, and replaced with a new entry.", "Desc": "Snoop Filter Capacity Evictions", "EvSel": 61, "ExtSel": "", "Notes": "For cache lines this CHA has some responsibility for managing, the snoop filter tracks their state in the Cores. Does not count clean evictions", "Umask": "bxxxxxxx1", }, "CHA.SF_EVICTION.E_STATE": { "Box": "CHA", "Category": "CHA CACHE Events", "Defn": "Counts number of times a snoop filter entry was evicted, due to lack of space, and replaced with a new entry.", "Desc": "Snoop Filter Capacity Evictions", "EvSel": 61, "ExtSel": "", "Notes": "For cache lines this CHA has some responsibility for managing, the snoop filter tracks their state in the Cores. Does not count clean evictions", "Umask": "bxxxxxx1x", }, "CHA.SNOOPS_SENT": { "Box": "CHA", "Category": "CHA HA REQUEST Events", "Defn": "Counts the number of snoops issued by the HA.", "Desc": "Snoops Sent", "EvSel": 81, "ExtSel": "", }, "CHA.SNOOPS_SENT.DIRECT_REMOTE": { "Box": "CHA", "Category": "CHA HA REQUEST Events", "Defn": "Counts the number of snoops issued by the HA.", "Desc": "Snoops Sent", "EvSel": 81, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.SNOOPS_SENT.LOCAL": { "Box": "CHA", "Category": "CHA HA REQUEST Events", "Defn": "Counts the number of snoops issued by the HA.", "Desc": "Snoops Sent", "EvSel": 81, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.SNOOPS_SENT.REMOTE": { "Box": "CHA", "Category": "CHA HA REQUEST Events", "Defn": "Counts the number of snoops issued by the HA.", "Desc": "Snoops Sent", "EvSel": 81, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.SNOOPS_SENT.DIRECT_LOCAL": { "Box": "CHA", "Category": "CHA HA REQUEST Events", "Defn": "Counts the number of snoops issued by the HA.", "Desc": "Snoops Sent", "EvSel": 81, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.SNOOPS_SENT.ALL": { "Box": "CHA", "Category": "CHA HA REQUEST Events", "Defn": "Counts the number of snoops issued by the HA.", "Desc": "Snoops Sent", "EvSel": 81, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.SNOOPS_SENT.BCST_LOCAL": { "Box": "CHA", "Category": "CHA HA REQUEST Events", "Defn": "Counts the number of snoops issued by the HA.", "Desc": "Snoops Sent", "EvSel": 81, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.SNOOPS_SENT.BCST_REMOTE": { "Box": "CHA", "Category": "CHA HA REQUEST Events", "Defn": "Counts the number of snoops issued by the HA.", "Desc": "Snoops Sent", "EvSel": 81, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.SNOOP_RESP": { "Box": "CHA", "Category": "CHA HA SNOOP RESPONSE Events", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 92, "ExtSel": "", }, "CHA.SNOOP_RESP.RSPS": { "Box": "CHA", "Category": "CHA HA SNOOP RESPONSE Events", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 92, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.SNOOP_RESP.RSPFWD": { "Box": "CHA", "Category": "CHA HA SNOOP RESPONSE Events", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 92, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.SNOOP_RESP.RSPCNFLCT": { "Box": "CHA", "Category": "CHA HA SNOOP RESPONSE Events", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 92, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.SNOOP_RESP.RSPSFWD": { "Box": "CHA", "Category": "CHA HA SNOOP RESPONSE Events", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 92, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.SNOOP_RESP.RSPWB": { "Box": "CHA", "Category": "CHA HA SNOOP RESPONSE Events", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 92, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.SNOOP_RESP.RSPIFWD": { "Box": "CHA", "Category": "CHA HA SNOOP RESPONSE Events", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 92, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.SNOOP_RESP.RSPI": { "Box": "CHA", "Category": "CHA HA SNOOP RESPONSE Events", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 92, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.SNOOP_RESP.RSPFWDWB": { "Box": "CHA", "Category": "CHA HA SNOOP RESPONSE Events", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 92, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.SNOOP_RESP_LOCAL": { "Box": "CHA", "Category": "CHA HA SNOOP RESPONSE Events", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 93, "ExtSel": "", }, "CHA.SNOOP_RESP_LOCAL.RSPIFWD": { "Box": "CHA", "Category": "CHA HA SNOOP RESPONSE Events", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 93, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.SNOOP_RESP_LOCAL.RSPWB": { "Box": "CHA", "Category": "CHA HA SNOOP RESPONSE Events", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 93, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.SNOOP_RESP_LOCAL.RSPFWDWB": { "Box": "CHA", "Category": "CHA HA SNOOP RESPONSE Events", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 93, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.SNOOP_RESP_LOCAL.RSPI": { "Box": "CHA", "Category": "CHA HA SNOOP RESPONSE Events", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 93, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.SNOOP_RESP_LOCAL.RSPS": { "Box": "CHA", "Category": "CHA HA SNOOP RESPONSE Events", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 93, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.SNOOP_RESP_LOCAL.RSPFWD": { "Box": "CHA", "Category": "CHA HA SNOOP RESPONSE Events", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 93, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.SNOOP_RESP_LOCAL.RSPSFWD": { "Box": "CHA", "Category": "CHA HA SNOOP RESPONSE Events", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 93, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.SNOOP_RESP_LOCAL.RSPCNFLCT": { "Box": "CHA", "Category": "CHA HA SNOOP RESPONSE Events", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 93, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.SNOOP_RSP_MISC": { "Box": "CHA", "Category": "CHA CBO SNOOP RESPONSE Events", "Desc": "Misc Snoop Responses Received", "EvSel": 107, "ExtSel": "", }, "CHA.SNOOP_RSP_MISC.RSPIFWDMPTL_HITLLC": { "Box": "CHA", "Category": "CHA CBO SNOOP RESPONSE Events", "Desc": "Misc Snoop Responses Received", "EvSel": 107, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.SNOOP_RSP_MISC.MTOI_RSPDATAM": { "Box": "CHA", "Category": "CHA CBO SNOOP RESPONSE Events", "Desc": "Misc Snoop Responses Received", "EvSel": 107, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.SNOOP_RSP_MISC.RSPIFWDMPTL_HITSF": { "Box": "CHA", "Category": "CHA CBO SNOOP RESPONSE Events", "Desc": "Misc Snoop Responses Received", "EvSel": 107, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.SNOOP_RSP_MISC.PULLDATAPTL_HITSF": { "Box": "CHA", "Category": "CHA CBO SNOOP RESPONSE Events", "Desc": "Misc Snoop Responses Received", "EvSel": 107, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.SNOOP_RSP_MISC.MTOI_RSPIFWDM": { "Box": "CHA", "Category": "CHA CBO SNOOP RESPONSE Events", "Desc": "Misc Snoop Responses Received", "EvSel": 107, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.SNOOP_RSP_MISC.PULLDATAPTL_HITLLC": { "Box": "CHA", "Category": "CHA CBO SNOOP RESPONSE Events", "Desc": "Misc Snoop Responses Received", "EvSel": 107, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.STALL0_NO_TxR_HORZ_CRD_AD_AG0": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", }, "CHA.STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR2": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR0": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR1": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR6": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR7": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR3": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR5": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR4": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.STALL0_NO_TxR_HORZ_CRD_AD_AG1": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", }, "CHA.STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR0": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR1": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR2": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR5": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR4": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR6": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR3": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR7": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.STALL0_NO_TxR_HORZ_CRD_BL_AG0": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", }, "CHA.STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR1": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR0": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR2": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR4": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR5": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR3": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR7": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR6": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.STALL0_NO_TxR_HORZ_CRD_BL_AG1": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", }, "CHA.STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR0": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR1": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR2": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR5": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR4": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR6": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR3": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR7": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.STALL1_NO_TxR_HORZ_CRD_AD_AG0": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 209, "ExtSel": "", "Notes": "Extension not used by ICX.", }, "CHA.STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR8": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 209, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxxxx1", }, "CHA.STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR10": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 209, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxx1xx", }, "CHA.STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR9": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 209, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxxx1x", }, "CHA.STALL1_NO_TxR_HORZ_CRD_AD_AG1_1": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 211, "ExtSel": "", "Notes": "Extension not used by ICX.", }, "CHA.STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR9": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 211, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxxx1x", }, "CHA.STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR10": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 211, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxx1xx", }, "CHA.STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR8": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 211, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxxxx1", }, "CHA.STALL1_NO_TxR_HORZ_CRD_BL_AG0_1": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 213, "ExtSel": "", "Notes": "Extension not used by ICX.", }, "CHA.STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR8": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 213, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxxxx1", }, "CHA.STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR9": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 213, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxxx1x", }, "CHA.STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR10": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 213, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxx1xx", }, "CHA.STALL1_NO_TxR_HORZ_CRD_BL_AG1_1": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 215, "ExtSel": "", "Notes": "Extension not used by ICX.", }, "CHA.STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR9": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 215, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxxx1x", }, "CHA.STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR10": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 215, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxx1xx", }, "CHA.STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR8": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 215, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxxxx1", }, "CHA.TOR_INSERTS": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", }, "CHA.TOR_INSERTS.ISOC": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "bxxxxxxxx", "UmaskExt": "b1xxxxxxxxxxxxxxxxxxxxxxxxx", }, "CHA.TOR_INSERTS.IO_MISS_RFO": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000100", "UmaskExt": 0xC803FE, }, "CHA.TOR_INSERTS.MMCFG": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxxxxxxxxxxxxxxx1xxxxx", }, "CHA.TOR_INSERTS.IA_MISS_LOCAL_WCIL_DDR": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC86E86, }, "CHA.TOR_INSERTS.IA_DRD_OPT": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC827FF, }, "CHA.TOR_INSERTS.IO_HIT_PCIRDCUR": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000100", "UmaskExt": 0xC8F3FD, }, "CHA.TOR_INSERTS.LOC_IA": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC000FF, }, "CHA.TOR_INSERTS.IO_MISS_ITOM": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000100", "UmaskExt": 0xCC43FE, }, "CHA.TOR_INSERTS.IA_MISS_CRD_PREF_REMOTE": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC88F7E, }, "CHA.TOR_INSERTS.IA_LLCPREFRFO": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xCCC7FF, }, "CHA.TOR_INSERTS.IA_MISS_DRD_PREF": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC897FE, }, "CHA.TOR_INSERTS.IA_MISS_DRD_LOCAL": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC816FE, }, "CHA.TOR_INSERTS.IA_MISS_LOCAL_WCILF_DDR": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC86686, }, "CHA.TOR_INSERTS.IA_MISS_WCILF_PMM": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC8678A, }, "CHA.TOR_INSERTS.IO_ITOM": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000100", "UmaskExt": 0xCC43FF, }, "CHA.TOR_INSERTS.IA_MISS_DRD_LOCAL_DDR": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC81686, }, "CHA.TOR_INSERTS.IA_HIT_DRD_PREF": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC897FD, }, "CHA.TOR_INSERTS.NONCOH": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "bxxxxxxxx", "UmaskExt": "bx1xxxxxxxxxxxxxxxxxxxxxxxx", }, "CHA.TOR_INSERTS.IA_HIT_CRD_PREF": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC88FFD, }, "CHA.TOR_INSERTS.IA_HIT_LLCPREFCODE": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xCCCFFD, }, "CHA.TOR_INSERTS.IA_MISS_RFO_REMOTE": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC8077E, }, "CHA.TOR_INSERTS.MISS": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxxxxxxxxxxxxxxxxxxx1x", }, "CHA.TOR_INSERTS.IA_ITOMCACHENEAR": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xCD47FF, }, "CHA.TOR_INSERTS.IA_SPECITOM": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xCC57FF, }, "CHA.TOR_INSERTS.IO": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000100", "UmaskExt": 0xC001FF, }, "CHA.TOR_INSERTS.IO_HIT_RFO": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000100", "UmaskExt": 0xC803FD, }, "CHA.TOR_INSERTS.IA_MISS_LLCPREFDATA": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xCCD7FE, }, "CHA.TOR_INSERTS.IA_LLCPREFDATA": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xCCD7FF, }, "CHA.TOR_INSERTS.IA_LLCPREFCODE": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xCCCFFF, }, "CHA.TOR_INSERTS.IA_MISS_DRD_PREF_LOCAL_DDR": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC89686, }, "CHA.TOR_INSERTS.IRQ_NON_IA": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "bxxx1xxxx", "UmaskExt": "bxxxxxxxxxxxxxxxxxxxxxxxxxx", }, "CHA.TOR_INSERTS.IPQ": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "bxxxx1xxx", "UmaskExt": "bxxxxxxxxxxxxxxxxxxxxxxxxxx", }, "CHA.TOR_INSERTS.IA_MISS_DRD_OPT": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC827FE, }, "CHA.TOR_INSERTS.IA_MISS_CRD": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC80FFE, }, "CHA.TOR_INSERTS.IO_RFO": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000100", "UmaskExt": 0xC803FF, }, "CHA.TOR_INSERTS.IA_MISS_DRD_REMOTE": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC8177E, }, "CHA.TOR_INSERTS.IA_HIT_DRD_OPT_PREF": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC8A7FD, }, "CHA.TOR_INSERTS.IA_DRD_OPT_PREF": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC8A7FF, }, "CHA.TOR_INSERTS.IO_HIT": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000100", "UmaskExt": 0xC001FD, }, "CHA.TOR_INSERTS.IO_PCIRDCUR": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000100", "UmaskExt": 0xC8F3FF, }, "CHA.TOR_INSERTS.LOC_ALL": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000101", "UmaskExt": 0xC000FF, }, "CHA.TOR_INSERTS.PREMORPH_OPC": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxXXXXXXXXXXX1xxxxxxxxxx", }, "CHA.TOR_INSERTS.IA_MISS_DRD_LOCAL_PMM": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC8168A, }, "CHA.TOR_INSERTS.IO_MISS": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000100", "UmaskExt": 0xC001FE, }, "CHA.TOR_INSERTS.IRQ_IA": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "bxxxxxxx1", "UmaskExt": "bxxxxxxxxxxxxxxxxxxxxxxxxxx", }, "CHA.TOR_INSERTS.IA_MISS_DRD_REMOTE_DDR": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC81706, }, "CHA.TOR_INSERTS.IA_MISS_LLCPREFCODE": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xCCCFFE, }, "CHA.TOR_INSERTS.PMM": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxxxxxxxxxxxxxxxxx1xxx", }, "CHA.TOR_INSERTS.IA_DRDPTE": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC837FF, }, "CHA.TOR_INSERTS.NOT_NEARMEM": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "bxxxxxxxx", "UmaskExt": "bxx1xxxxxxxxxxxxxxxxxxxxxxx", }, "CHA.TOR_INSERTS.IA_MISS_DRD_PREF_PMM": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC8978A, }, "CHA.TOR_INSERTS.PRQ_NON_IOSF": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "bxx1xxxxx", "UmaskExt": "bxxxxxxxxxxxxxxxxxxxxxxxxxx", }, "CHA.TOR_INSERTS.IA_HIT_CRD": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC80FFD, }, "CHA.TOR_INSERTS.IA_MISS_REMOTE_WCIL_DDR": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC86F06, }, "CHA.TOR_INSERTS.IA_HIT_DRD": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC817FD, }, "CHA.TOR_INSERTS.IA_CLFLUSH": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC8C7FF, }, "CHA.TOR_INSERTS.IO_HIT_ITOM": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000100", "UmaskExt": 0xCC43FD, }, "CHA.TOR_INSERTS.WBQ": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b1xxxxxxx", "UmaskExt": "bxxxxxxxxxxxxxxxxxxxxxxxxxx", }, "CHA.TOR_INSERTS.IA_MISS_RFO_PREF": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC887FE, }, "CHA.TOR_INSERTS.IO_MISS_ITOMCACHENEAR": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000100", "UmaskExt": 0xCD43FE, }, "CHA.TOR_INSERTS.IA_HIT_LLCPREFRFO": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xCCC7FD, }, "CHA.TOR_INSERTS.IA_MISS_RFO_LOCAL": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC806FE, }, "CHA.TOR_INSERTS.LOC_IO": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000100", "UmaskExt": 0xC000FF, }, "CHA.TOR_INSERTS.IA_MISS_CRD_PREF_LOCAL": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC88EFE, }, "CHA.TOR_INSERTS.IA_MISS_WCIL_DDR": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC86F86, }, "CHA.TOR_INSERTS.HBM": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxxxxxxxxxxxxxxxx1xxxx", }, "CHA.TOR_INSERTS.IA_WBMTOI": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xCC27FF, }, "CHA.TOR_INSERTS.IA_RFO_PREF": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC887FF, }, "CHA.TOR_INSERTS.IA_DRD": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC817FF, }, "CHA.TOR_INSERTS.IA_MISS_UCRDF": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC877DE, }, "CHA.TOR_INSERTS.IA_MISS_DRD_PREF_DDR": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC89786, }, "CHA.TOR_INSERTS.IA_MISS_DRD_OPT_PREF": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC8A7FE, }, "CHA.TOR_INSERTS.IA_MISS_DRD_PMM": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC8178A, }, "CHA.TOR_INSERTS.IA_MISS_CRD_LOCAL": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC80EFE, }, "CHA.TOR_INSERTS.IA_MISS_LOCAL_WCIL_PMM": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC86E8A, }, "CHA.TOR_INSERTS.IA_HIT_RFO": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC807FD, }, "CHA.TOR_INSERTS.IA_MISS_RFO_PREF_LOCAL": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC886FE, }, "CHA.TOR_INSERTS.IA_MISS_CRD_PREF": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC88FFE, }, "CHA.TOR_INSERTS.IA": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC001FF, }, "CHA.TOR_INSERTS.NEARMEM": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "bxxxxxxxx", "UmaskExt": "bxxx1xxxxxxxxxxxxxxxxxxxxxx", }, "CHA.TOR_INSERTS.IA_WCIL": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC86FFF, }, "CHA.TOR_INSERTS.IA_MISS_WIL": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC87FDE, }, "CHA.TOR_INSERTS.IO_ITOMCACHENEAR": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000100", "UmaskExt": 0xCD43FF, }, "CHA.TOR_INSERTS.IA_MISS_DRDPTE": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC837FE, }, "CHA.TOR_INSERTS.IA_MISS_DRD_DDR": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC81786, }, "CHA.TOR_INSERTS.IA_HIT_DRDPTE": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC837FD, }, "CHA.TOR_INSERTS.IA_MISS_WCILF_DDR": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC86786, }, "CHA.TOR_INSERTS.IA_MISS_WCIL": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC86FFE, }, "CHA.TOR_INSERTS.IO_HIT_ITOMCACHENEAR": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000100", "UmaskExt": 0xCD43FD, }, "CHA.TOR_INSERTS.IA_MISS_LOCAL_WCILF_PMM": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC8668A, }, "CHA.TOR_INSERTS.MATCH_OPC": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxXXXXXXXXXXXx1xxxxxxxxx", }, "CHA.TOR_INSERTS.IA_MISS_DRD_PREF_REMOTE_DDR": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC89706, }, "CHA.TOR_INSERTS.REMOTE_TGT": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxxxxxxxxxxxx1xxxxxxxx", }, "CHA.TOR_INSERTS.IA_ITOM": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xCC47FF, }, "CHA.TOR_INSERTS.IA_HIT_LLCPREFDATA": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xCCD7FD, }, "CHA.TOR_INSERTS.IA_MISS_ITOM": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xCC47FE, }, "CHA.TOR_INSERTS.IA_MISS_RFO": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC807FE, }, "CHA.TOR_INSERTS.IA_HIT_DRD_OPT": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC827FD, }, "CHA.TOR_INSERTS.IA_MISS_DRD_PREF_LOCAL_PMM": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC8968A, }, "CHA.TOR_INSERTS.IA_MISS_REMOTE_WCILF_PMM": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC8670A, }, "CHA.TOR_INSERTS.IA_MISS_WCILF": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC867FE, }, "CHA.TOR_INSERTS.IA_MISS_LLCPREFRFO": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xCCC7FE, }, "CHA.TOR_INSERTS.IA_HIT_RFO_PREF": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC887FD, }, "CHA.TOR_INSERTS.IA_MISS_REMOTE_WCILF_DDR": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC86706, }, "CHA.TOR_INSERTS.IA_CRD": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC80FFF, }, "CHA.TOR_INSERTS.IO_MISS_PCIRDCUR": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000100", "UmaskExt": 0xC8F3FE, }, "CHA.TOR_INSERTS.IO_WBMTOI": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000100", "UmaskExt": 0xCC23FF, }, "CHA.TOR_INSERTS.IA_WCILF": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC867FF, }, "CHA.TOR_INSERTS.PRQ_IOSF": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "bxxxxx1xx", "UmaskExt": "bxxxxxxxxxxxxxxxxxxxxxxxxxx", }, "CHA.TOR_INSERTS.HIT": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxxxxxxxxxxxxxxxxxxxx1", }, "CHA.TOR_INSERTS.IA_MISS_CRD_REMOTE": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC80F7E, }, "CHA.TOR_INSERTS.MMIO": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxxxxxxxxxxxxxx1xxxxxx", }, "CHA.TOR_INSERTS.IA_CLFLUSHOPT": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC8D7FF, }, "CHA.TOR_INSERTS.LOCAL_TGT": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxxxxxxxxxxxxx1xxxxxxx", }, "CHA.TOR_INSERTS.IA_MISS_DRD": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC817FE, }, "CHA.TOR_INSERTS.IA_MISS_DRD_REMOTE_PMM": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC8170A, }, "CHA.TOR_INSERTS.EVICT": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "bxxxxxx1x", "UmaskExt": "bxxxxxxxxxxxxxxxxxxxxxxxxxx", }, "CHA.TOR_INSERTS.IA_MISS_REMOTE_WCIL_PMM": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC86F0A, }, "CHA.TOR_INSERTS.IA_HIT_ITOM": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xCC47FD, }, "CHA.TOR_INSERTS.IO_CLFLUSH": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000100", "UmaskExt": 0xC8C3FF, }, "CHA.TOR_INSERTS.IA_MISS_WCIL_PMM": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC86F8A, }, "CHA.TOR_INSERTS.IA_MISS": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC001FE, }, "CHA.TOR_INSERTS.IA_HIT": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC001FD, }, "CHA.TOR_INSERTS.RRQ": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "bx1xxxxxx", "UmaskExt": "bxxxxxxxxxxxxxxxxxxxxxxxxxx", }, "CHA.TOR_INSERTS.IA_MISS_RFO_PREF_REMOTE": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC8877E, }, "CHA.TOR_INSERTS.ALL": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b11111111", "UmaskExt": 0xC001FF, }, "CHA.TOR_INSERTS.DDR": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxxxxxxxxxxxxxxxxxx1xx", }, "CHA.TOR_INSERTS.IA_MISS_DRD_PREF_REMOTE_PMM": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC8970A, }, "CHA.TOR_INSERTS.IA_RFO": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC807FF, }, "CHA.TOR_INSERTS.IA_DRD_PREF": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaning it is necessary to set one of the queue bits before one can measure .HIT or .MISS.", "Umask": "b00000001", "UmaskExt": 0xC897FF, }, "CHA.TOR_OCCUPANCY": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", }, "CHA.TOR_OCCUPANCY.IA_MISS_REMOTE_WCILF_DDR": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC86706, }, "CHA.TOR_OCCUPANCY.IA_CRD": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC80FFF, }, "CHA.TOR_OCCUPANCY.IA_HIT_RFO_PREF": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC887FD, }, "CHA.TOR_OCCUPANCY.HIT": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxxxxxxxxxxxxxxxxxxxx1", }, "CHA.TOR_OCCUPANCY.IA_WCILF": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC867FF, }, "CHA.TOR_OCCUPANCY.IO_MISS_PCIRDCUR": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000100", "UmaskExt": 0xC8F3FE, }, "CHA.TOR_OCCUPANCY.IO_WBMTOI": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000100", "UmaskExt": 0xCC23FF, }, "CHA.TOR_OCCUPANCY.IA_MISS_DRD_REMOTE_PMM": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC8170A, }, "CHA.TOR_OCCUPANCY.IA_MISS_DRD": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC817FE, }, "CHA.TOR_OCCUPANCY.LOCAL_TGT": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxxxxxxxxxxxxx1xxxxxxx", }, "CHA.TOR_OCCUPANCY.EVICT": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "bxxxxxx1x", "UmaskExt": "bxxxxxxxxxxxxxxxxxxxxxxxxxx", }, "CHA.TOR_OCCUPANCY.MMIO": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxxxxxxxxxxxxxx1xxxxxx", }, "CHA.TOR_OCCUPANCY.IA_MISS_CRD_REMOTE": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC80F7E, }, "CHA.TOR_OCCUPANCY.IA_CLFLUSHOPT": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC8D7FF, }, "CHA.TOR_OCCUPANCY.IA_MISS_WCIL_PMM": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC86F8A, }, "CHA.TOR_OCCUPANCY.IA_HIT_ITOM": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xCC47FD, }, "CHA.TOR_OCCUPANCY.IA_MISS_REMOTE_WCIL_PMM": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC86F0A, }, "CHA.TOR_OCCUPANCY.IO_CLFLUSH": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000100", "UmaskExt": 0xC8C3FF, }, "CHA.TOR_OCCUPANCY.IA_MISS": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC001FE, }, "CHA.TOR_OCCUPANCY.IA_HIT": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC001FD, }, "CHA.TOR_OCCUPANCY.DDR": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxxxxxxxxxxxxxxxxxx1xx", }, "CHA.TOR_OCCUPANCY.IA_MISS_DRD_PREF_REMOTE_PMM": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC8970A, }, "CHA.TOR_OCCUPANCY.IA_MISS_RFO_PREF_REMOTE": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC8877E, }, "CHA.TOR_OCCUPANCY.IA_RFO": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC807FF, }, "CHA.TOR_OCCUPANCY.IA_DRD_PREF": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC897FF, }, "CHA.TOR_OCCUPANCY.IA_MISS_DRD_PMM": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC8178A, }, "CHA.TOR_OCCUPANCY.IA_MISS_DRD_OPT_PREF": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC8A7FE, }, "CHA.TOR_OCCUPANCY.IA_MISS_UCRDF": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC877DE, }, "CHA.TOR_OCCUPANCY.IA_MISS_DRD_PREF_DDR": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC89786, }, "CHA.TOR_OCCUPANCY.IA_DRD": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC817FF, }, "CHA.TOR_OCCUPANCY.IA_RFO_PREF": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC887FF, }, "CHA.TOR_OCCUPANCY.IA": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC001FF, }, "CHA.TOR_OCCUPANCY.IA_MISS_CRD_PREF": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC88FFE, }, "CHA.TOR_OCCUPANCY.IA_MISS_CRD_LOCAL": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC80EFE, }, "CHA.TOR_OCCUPANCY.IA_HIT_RFO": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC807FD, }, "CHA.TOR_OCCUPANCY.IA_MISS_RFO_PREF_LOCAL": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC886FE, }, "CHA.TOR_OCCUPANCY.IA_MISS_LOCAL_WCIL_PMM": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC86E8A, }, "CHA.TOR_OCCUPANCY.NEARMEM": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "bxxxxxxxx", "UmaskExt": "bxxx1xxxxxxxxxxxxxxxxxxxxxx", }, "CHA.TOR_OCCUPANCY.IA_MISS_WIL": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC87FDE, }, "CHA.TOR_OCCUPANCY.IA_WCIL": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC86FFF, }, "CHA.TOR_OCCUPANCY.IA_MISS_DRD_DDR": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC81786, }, "CHA.TOR_OCCUPANCY.IA_MISS_DRDPTE": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC837FE, }, "CHA.TOR_OCCUPANCY.IO_ITOMCACHENEAR": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000100", "UmaskExt": 0xCD43FF, }, "CHA.TOR_OCCUPANCY.IA_MISS_WCIL": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC86FFE, }, "CHA.TOR_OCCUPANCY.PRQ": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "bxxxxx1xx", "UmaskExt": "bxxxxxxxxxxxxxxxxxxxxxxxxxx", }, "CHA.TOR_OCCUPANCY.IA_MISS_WCILF_DDR": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC86786, }, "CHA.TOR_OCCUPANCY.IA_MISS_LOCAL_WCILF_PMM": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC8668A, }, "CHA.TOR_OCCUPANCY.IO_HIT_ITOMCACHENEAR": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000100", "UmaskExt": 0xCD43FD, }, "CHA.TOR_OCCUPANCY.IA_HIT_DRDPTE": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC837FD, }, "CHA.TOR_OCCUPANCY.IA_HIT_LLCPREFDATA": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xCCD7FD, }, "CHA.TOR_OCCUPANCY.IA_ITOM": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xCC47FF, }, "CHA.TOR_OCCUPANCY.REMOTE_TGT": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxxxxxxxxxxxx1xxxxxxxx", }, "CHA.TOR_OCCUPANCY.MATCH_OPC": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxXXXXXXXXXXXx1xxxxxxxxx", }, "CHA.TOR_OCCUPANCY.IA_MISS_DRD_PREF_REMOTE_DDR": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC89706, }, "CHA.TOR_OCCUPANCY.IA_HIT_DRD_OPT": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC827FD, }, "CHA.TOR_OCCUPANCY.IA_MISS_RFO": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC807FE, }, "CHA.TOR_OCCUPANCY.IA_MISS_DRD_PREF_LOCAL_PMM": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC8968A, }, "CHA.TOR_OCCUPANCY.IA_MISS_ITOM": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xCC47FE, }, "CHA.TOR_OCCUPANCY.IA_MISS_LLCPREFRFO": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xCCC7FE, }, "CHA.TOR_OCCUPANCY.IA_MISS_REMOTE_WCILF_PMM": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC8670A, }, "CHA.TOR_OCCUPANCY.IA_MISS_WCILF": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC867FE, }, "CHA.TOR_OCCUPANCY.IA_LLCPREFCODE": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xCCCFFF, }, "CHA.TOR_OCCUPANCY.IA_MISS_DRD_PREF_LOCAL_DDR": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC89686, }, "CHA.TOR_OCCUPANCY.IPQ": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "bxxxx1xxx", "UmaskExt": "bxxxxxxxxxxxxxxxxxxxxxxxxxx", }, "CHA.TOR_OCCUPANCY.IRQ_NON_IA": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "bxxx1xxxx", "UmaskExt": "bxxxxxxxxxxxxxxxxxxxxxxxxxx", }, "CHA.TOR_OCCUPANCY.IA_MISS_DRD_REMOTE": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC8177E, }, "CHA.TOR_OCCUPANCY.IA_DRD_OPT_PREF": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC8A7FF, }, "CHA.TOR_OCCUPANCY.IA_HIT_DRD_OPT_PREF": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC8A7FD, }, "CHA.TOR_OCCUPANCY.IA_MISS_DRD_OPT": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC827FE, }, "CHA.TOR_OCCUPANCY.IA_MISS_CRD": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC80FFE, }, "CHA.TOR_OCCUPANCY.IO_RFO": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000100", "UmaskExt": 0xC803FF, }, "CHA.TOR_OCCUPANCY.IO_PCIRDCUR": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000100", "UmaskExt": 0xC8F3FF, }, "CHA.TOR_OCCUPANCY.IO_HIT": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000100", "UmaskExt": 0xC001FD, }, "CHA.TOR_OCCUPANCY.LOC_ALL": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000101", "UmaskExt": 0xC000FF, }, "CHA.TOR_OCCUPANCY.IO_MISS": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000100", "UmaskExt": 0xC001FE, }, "CHA.TOR_OCCUPANCY.IA_MISS_DRD_LOCAL_PMM": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC8168A, }, "CHA.TOR_OCCUPANCY.PREMORPH_OPC": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxXXXXXXXXXXX1xxxxxxxxxx", }, "CHA.TOR_OCCUPANCY.PMM": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxxxxxxxxxxxxxxxxx1xxx", }, "CHA.TOR_OCCUPANCY.IA_DRDPTE": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC837FF, }, "CHA.TOR_OCCUPANCY.IRQ_IA": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "bxxxxxxx1", "UmaskExt": "bxxxxxxxxxxxxxxxxxxxxxxxxxx", }, "CHA.TOR_OCCUPANCY.IA_MISS_DRD_REMOTE_DDR": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC81706, }, "CHA.TOR_OCCUPANCY.IA_MISS_LLCPREFCODE": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xCCCFFE, }, "CHA.TOR_OCCUPANCY.PRQ_NON_IOSF": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "bxx1xxxxx", "UmaskExt": "bxxxxxxxxxxxxxxxxxxxxxxxxxx", }, "CHA.TOR_OCCUPANCY.NOT_NEARMEM": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "bxxxxxxxx", "UmaskExt": "bxx1xxxxxxxxxxxxxxxxxxxxxxx", }, "CHA.TOR_OCCUPANCY.IA_MISS_DRD_PREF_PMM": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC8978A, }, "CHA.TOR_OCCUPANCY.IA_CLFLUSH": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC8C7FF, }, "CHA.TOR_OCCUPANCY.IO_HIT_ITOM": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000100", "UmaskExt": 0xCC43FD, }, "CHA.TOR_OCCUPANCY.IA_HIT_CRD": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC80FFD, }, "CHA.TOR_OCCUPANCY.IA_MISS_REMOTE_WCIL_DDR": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC86F06, }, "CHA.TOR_OCCUPANCY.IA_HIT_DRD": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC817FD, }, "CHA.TOR_OCCUPANCY.IA_MISS_RFO_LOCAL": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC806FE, }, "CHA.TOR_OCCUPANCY.IA_HIT_LLCPREFRFO": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xCCC7FD, }, "CHA.TOR_OCCUPANCY.IA_MISS_CRD_PREF_LOCAL": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC88EFE, }, "CHA.TOR_OCCUPANCY.LOC_IO": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000100", "UmaskExt": 0xC000FF, }, "CHA.TOR_OCCUPANCY.IO_MISS_ITOMCACHENEAR": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000100", "UmaskExt": 0xCD43FE, }, "CHA.TOR_OCCUPANCY.IA_MISS_RFO_PREF": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC887FE, }, "CHA.TOR_OCCUPANCY.IA_WBMTOI": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xCC27FF, }, "CHA.TOR_OCCUPANCY.HBM": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxxxxxxxxxxxxxxxx1xxxx", }, "CHA.TOR_OCCUPANCY.IA_MISS_WCIL_DDR": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC86F86, }, "CHA.TOR_OCCUPANCY.IO_HIT_PCIRDCUR": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000100", "UmaskExt": 0xC8F3FD, }, "CHA.TOR_OCCUPANCY.LOC_IA": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC000FF, }, "CHA.TOR_OCCUPANCY.IA_MISS_LOCAL_WCIL_DDR": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC86E86, }, "CHA.TOR_OCCUPANCY.IA_DRD_OPT": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC827FF, }, "CHA.TOR_OCCUPANCY.IO_MISS_ITOM": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000100", "UmaskExt": 0xCC43FE, }, "CHA.TOR_OCCUPANCY.ISOC": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "bxxxxxxxx", "UmaskExt": "b1xxxxxxxxxxxxxxxxxxxxxxxxx", }, "CHA.TOR_OCCUPANCY.MMCFG": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxxxxxxxxxxxxxxx1xxxxx", }, "CHA.TOR_OCCUPANCY.IO_MISS_RFO": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000100", "UmaskExt": 0xC803FE, }, "CHA.TOR_OCCUPANCY.IA_MISS_CRD_PREF_REMOTE": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC88F7E, }, "CHA.TOR_OCCUPANCY.IA_LLCPREFRFO": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xCCC7FF, }, "CHA.TOR_OCCUPANCY.IA_MISS_LOCAL_WCILF_DDR": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC86686, }, "CHA.TOR_OCCUPANCY.IA_MISS_DRD_LOCAL": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC816FE, }, "CHA.TOR_OCCUPANCY.IA_MISS_DRD_PREF": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC897FE, }, "CHA.TOR_OCCUPANCY.IA_MISS_WCILF_PMM": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC8678A, }, "CHA.TOR_OCCUPANCY.IO_ITOM": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000100", "UmaskExt": 0xCC43FF, }, "CHA.TOR_OCCUPANCY.IA_HIT_DRD_PREF": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC897FD, }, "CHA.TOR_OCCUPANCY.IA_MISS_DRD_LOCAL_DDR": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC81686, }, "CHA.TOR_OCCUPANCY.IA_HIT_CRD_PREF": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC88FFD, }, "CHA.TOR_OCCUPANCY.NONCOH": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "bxxxxxxxx", "UmaskExt": "bx1xxxxxxxxxxxxxxxxxxxxxxxx", }, "CHA.TOR_OCCUPANCY.IA_MISS_RFO_REMOTE": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xC8077E, }, "CHA.TOR_OCCUPANCY.IA_ITOMCACHENEAR": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xCD47FF, }, "CHA.TOR_OCCUPANCY.MISS": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxxxxxxxxxxxxxxxxxxx1x", }, "CHA.TOR_OCCUPANCY.IA_HIT_LLCPREFCODE": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xCCCFFD, }, "CHA.TOR_OCCUPANCY.IA_SPECITOM": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xCC57FF, }, "CHA.TOR_OCCUPANCY.IO_HIT_RFO": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000100", "UmaskExt": 0xC803FD, }, "CHA.TOR_OCCUPANCY.IA_MISS_LLCPREFDATA": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xCCD7FE, }, "CHA.TOR_OCCUPANCY.IA_LLCPREFDATA": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000001", "UmaskExt": 0xCCD7FF, }, "CHA.TOR_OCCUPANCY.IO": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00000100", "UmaskExt": 0xC001FF, }, "CHA.TxR_HORZ_ADS_USED": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal ADS Used", "EvSel": 166, "ExtSel": "", }, "CHA.TxR_HORZ_ADS_USED.AD_CRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal ADS Used", "EvSel": 166, "ExtSel": "", "Umask": "b00010000", }, "CHA.TxR_HORZ_ADS_USED.AD_UNCRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal ADS Used", "EvSel": 166, "ExtSel": "", "Umask": "b00000001", }, "CHA.TxR_HORZ_ADS_USED.BL_ALL": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal ADS Used", "EvSel": 166, "ExtSel": "", "Umask": "b01000100", }, "CHA.TxR_HORZ_ADS_USED.AD_ALL": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal ADS Used", "EvSel": 166, "ExtSel": "", "Umask": "b00010001", }, "CHA.TxR_HORZ_ADS_USED.BL_UNCRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal ADS Used", "EvSel": 166, "ExtSel": "", "Umask": "b00000100", }, "CHA.TxR_HORZ_ADS_USED.BL_CRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal ADS Used", "EvSel": 166, "ExtSel": "", "Umask": "b01000000", }, "CHA.TxR_HORZ_BYPASS": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 167, "ExtSel": "", }, "CHA.TxR_HORZ_BYPASS.AD_CRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 167, "ExtSel": "", "Umask": "b00010000", }, "CHA.TxR_HORZ_BYPASS.BL_ALL": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 167, "ExtSel": "", "Umask": "b01000100", }, "CHA.TxR_HORZ_BYPASS.AK": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 167, "ExtSel": "", "Umask": "b00000010", }, "CHA.TxR_HORZ_BYPASS.AD_ALL": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 167, "ExtSel": "", "Umask": "b00010001", }, "CHA.TxR_HORZ_BYPASS.BL_CRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 167, "ExtSel": "", "Umask": "b01000000", }, "CHA.TxR_HORZ_BYPASS.AD_UNCRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 167, "ExtSel": "", "Umask": "b00000001", }, "CHA.TxR_HORZ_BYPASS.AKC_UNCRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 167, "ExtSel": "", "Umask": "b10000000", }, "CHA.TxR_HORZ_BYPASS.BL_UNCRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 167, "ExtSel": "", "Umask": "b00000100", }, "CHA.TxR_HORZ_BYPASS.IV": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 167, "ExtSel": "", "Umask": "b00001000", }, "CHA.TxR_HORZ_CYCLES_FULL": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 162, "ExtSel": "", }, "CHA.TxR_HORZ_CYCLES_FULL.AD_UNCRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 162, "ExtSel": "", "Umask": "b00000001", }, "CHA.TxR_HORZ_CYCLES_FULL.AKC_UNCRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 162, "ExtSel": "", "Umask": "b10000000", }, "CHA.TxR_HORZ_CYCLES_FULL.IV": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 162, "ExtSel": "", "Umask": "b00001000", }, "CHA.TxR_HORZ_CYCLES_FULL.BL_UNCRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 162, "ExtSel": "", "Umask": "b00000100", }, "CHA.TxR_HORZ_CYCLES_FULL.BL_ALL": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 162, "ExtSel": "", "Umask": "b01000100", }, "CHA.TxR_HORZ_CYCLES_FULL.AD_CRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 162, "ExtSel": "", "Umask": "b00010000", }, "CHA.TxR_HORZ_CYCLES_FULL.BL_CRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 162, "ExtSel": "", "Umask": "b01000000", }, "CHA.TxR_HORZ_CYCLES_FULL.AD_ALL": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 162, "ExtSel": "", "Umask": "b00010001", }, "CHA.TxR_HORZ_CYCLES_FULL.AK": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 162, "ExtSel": "", "Umask": "b00000010", }, "CHA.TxR_HORZ_CYCLES_NE": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 163, "ExtSel": "", }, "CHA.TxR_HORZ_CYCLES_NE.AD_ALL": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 163, "ExtSel": "", "Umask": "b00010001", }, "CHA.TxR_HORZ_CYCLES_NE.AK": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 163, "ExtSel": "", "Umask": "b00000010", }, "CHA.TxR_HORZ_CYCLES_NE.BL_CRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 163, "ExtSel": "", "Umask": "b01000000", }, "CHA.TxR_HORZ_CYCLES_NE.AD_CRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 163, "ExtSel": "", "Umask": "b00010000", }, "CHA.TxR_HORZ_CYCLES_NE.BL_ALL": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 163, "ExtSel": "", "Umask": "b01000100", }, "CHA.TxR_HORZ_CYCLES_NE.AKC_UNCRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 163, "ExtSel": "", "Umask": "b10000000", }, "CHA.TxR_HORZ_CYCLES_NE.BL_UNCRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 163, "ExtSel": "", "Umask": "b00000100", }, "CHA.TxR_HORZ_CYCLES_NE.IV": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 163, "ExtSel": "", "Umask": "b00001000", }, "CHA.TxR_HORZ_CYCLES_NE.AD_UNCRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 163, "ExtSel": "", "Umask": "b00000001", }, "CHA.TxR_HORZ_INSERTS": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 161, "ExtSel": "", }, "CHA.TxR_HORZ_INSERTS.AD_ALL": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 161, "ExtSel": "", "Umask": "b00010001", }, "CHA.TxR_HORZ_INSERTS.AK": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 161, "ExtSel": "", "Umask": "b00000010", }, "CHA.TxR_HORZ_INSERTS.BL_CRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 161, "ExtSel": "", "Umask": "b01000000", }, "CHA.TxR_HORZ_INSERTS.AD_CRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 161, "ExtSel": "", "Umask": "b00010000", }, "CHA.TxR_HORZ_INSERTS.BL_ALL": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 161, "ExtSel": "", "Umask": "b01000100", }, "CHA.TxR_HORZ_INSERTS.IV": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 161, "ExtSel": "", "Umask": "b00001000", }, "CHA.TxR_HORZ_INSERTS.AKC_UNCRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 161, "ExtSel": "", "Umask": "b10000000", }, "CHA.TxR_HORZ_INSERTS.BL_UNCRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 161, "ExtSel": "", "Umask": "b00000100", }, "CHA.TxR_HORZ_INSERTS.AD_UNCRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 161, "ExtSel": "", "Umask": "b00000001", }, "CHA.TxR_HORZ_NACK": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 164, "ExtSel": "", }, "CHA.TxR_HORZ_NACK.BL_CRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 164, "ExtSel": "", "Umask": "b01000000", }, "CHA.TxR_HORZ_NACK.AK": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 164, "ExtSel": "", "Umask": "b00000010", }, "CHA.TxR_HORZ_NACK.AD_ALL": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 164, "ExtSel": "", "Umask": "b00010001", }, "CHA.TxR_HORZ_NACK.BL_ALL": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 164, "ExtSel": "", "Umask": "b01000100", }, "CHA.TxR_HORZ_NACK.AD_CRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 164, "ExtSel": "", "Umask": "b00010000", }, "CHA.TxR_HORZ_NACK.BL_UNCRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 164, "ExtSel": "", "Umask": "b00000100", }, "CHA.TxR_HORZ_NACK.AKC_UNCRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 164, "ExtSel": "", "Umask": "b10000000", }, "CHA.TxR_HORZ_NACK.IV": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 164, "ExtSel": "", "Umask": "b00001000", }, "CHA.TxR_HORZ_NACK.AD_UNCRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 164, "ExtSel": "", "Umask": "b00000001", }, "CHA.TxR_HORZ_OCCUPANCY": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 160, "ExtSel": "", }, "CHA.TxR_HORZ_OCCUPANCY.AK": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 160, "ExtSel": "", "Umask": "b00000010", }, "CHA.TxR_HORZ_OCCUPANCY.AD_ALL": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 160, "ExtSel": "", "Umask": "b00010001", }, "CHA.TxR_HORZ_OCCUPANCY.BL_CRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 160, "ExtSel": "", "Umask": "b01000000", }, "CHA.TxR_HORZ_OCCUPANCY.AD_CRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 160, "ExtSel": "", "Umask": "b00010000", }, "CHA.TxR_HORZ_OCCUPANCY.BL_ALL": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 160, "ExtSel": "", "Umask": "b01000100", }, "CHA.TxR_HORZ_OCCUPANCY.IV": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 160, "ExtSel": "", "Umask": "b00001000", }, "CHA.TxR_HORZ_OCCUPANCY.AKC_UNCRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 160, "ExtSel": "", "Umask": "b10000000", }, "CHA.TxR_HORZ_OCCUPANCY.BL_UNCRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 160, "ExtSel": "", "Umask": "b00000100", }, "CHA.TxR_HORZ_OCCUPANCY.AD_UNCRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 160, "ExtSel": "", "Umask": "b00000001", }, "CHA.TxR_HORZ_STARVED": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.", "Desc": "CMS Horizontal Egress Injection Starvation", "EvSel": 165, "ExtSel": "", }, "CHA.TxR_HORZ_STARVED.BL_ALL": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.", "Desc": "CMS Horizontal Egress Injection Starvation", "EvSel": 165, "ExtSel": "", "Umask": "b00000100", }, "CHA.TxR_HORZ_STARVED.AK": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.", "Desc": "CMS Horizontal Egress Injection Starvation", "EvSel": 165, "ExtSel": "", "Umask": "b00000010", }, "CHA.TxR_HORZ_STARVED.AD_ALL": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.", "Desc": "CMS Horizontal Egress Injection Starvation", "EvSel": 165, "ExtSel": "", "Umask": "b00000001", }, "CHA.TxR_HORZ_STARVED.AD_UNCRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.", "Desc": "CMS Horizontal Egress Injection Starvation", "EvSel": 165, "ExtSel": "", "Umask": "b00000001", }, "CHA.TxR_HORZ_STARVED.IV": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.", "Desc": "CMS Horizontal Egress Injection Starvation", "EvSel": 165, "ExtSel": "", "Umask": "b00001000", }, "CHA.TxR_HORZ_STARVED.AKC_UNCRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.", "Desc": "CMS Horizontal Egress Injection Starvation", "EvSel": 165, "ExtSel": "", "Umask": "b10000000", }, "CHA.TxR_HORZ_STARVED.BL_UNCRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.", "Desc": "CMS Horizontal Egress Injection Starvation", "EvSel": 165, "ExtSel": "", "Umask": "b00000100", }, "CHA.TxR_VERT_ADS_USED": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 156, "ExtSel": "", }, "CHA.TxR_VERT_ADS_USED.AD_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 156, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.TxR_VERT_ADS_USED.BL_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 156, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.TxR_VERT_ADS_USED.AD_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 156, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.TxR_VERT_ADS_USED.BL_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 156, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.TxR_VERT_BYPASS": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 157, "ExtSel": "", }, "CHA.TxR_VERT_BYPASS.IV_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 157, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.TxR_VERT_BYPASS.BL_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 157, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.TxR_VERT_BYPASS.AK_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 157, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.TxR_VERT_BYPASS.AD_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 157, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.TxR_VERT_BYPASS.AD_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 157, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.TxR_VERT_BYPASS.AK_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 157, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.TxR_VERT_BYPASS.BL_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 157, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.TxR_VERT_BYPASS_1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 158, "ExtSel": "", }, "CHA.TxR_VERT_BYPASS_1.AKC_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 158, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.TxR_VERT_BYPASS_1.AKC_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 158, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.TxR_VERT_CYCLES_FULL0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 148, "ExtSel": "", }, "CHA.TxR_VERT_CYCLES_FULL0.BL_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 148, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.TxR_VERT_CYCLES_FULL0.AD_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 148, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.TxR_VERT_CYCLES_FULL0.AK_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 148, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.TxR_VERT_CYCLES_FULL0.AD_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 148, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.TxR_VERT_CYCLES_FULL0.AK_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 148, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.TxR_VERT_CYCLES_FULL0.IV_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 148, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.TxR_VERT_CYCLES_FULL0.BL_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 148, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.TxR_VERT_CYCLES_FULL1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 149, "ExtSel": "", }, "CHA.TxR_VERT_CYCLES_FULL1.AKC_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 149, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.TxR_VERT_CYCLES_FULL1.AKC_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 149, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.TxR_VERT_CYCLES_NE0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 150, "ExtSel": "", }, "CHA.TxR_VERT_CYCLES_NE0.IV_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 150, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.TxR_VERT_CYCLES_NE0.BL_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 150, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.TxR_VERT_CYCLES_NE0.AD_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 150, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.TxR_VERT_CYCLES_NE0.AK_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 150, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.TxR_VERT_CYCLES_NE0.AK_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 150, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.TxR_VERT_CYCLES_NE0.AD_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 150, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.TxR_VERT_CYCLES_NE0.BL_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 150, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.TxR_VERT_CYCLES_NE1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 151, "ExtSel": "", }, "CHA.TxR_VERT_CYCLES_NE1.AKC_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 151, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.TxR_VERT_CYCLES_NE1.AKC_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 151, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.TxR_VERT_INSERTS0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 146, "ExtSel": "", }, "CHA.TxR_VERT_INSERTS0.BL_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 146, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.TxR_VERT_INSERTS0.IV_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 146, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.TxR_VERT_INSERTS0.AD_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 146, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.TxR_VERT_INSERTS0.AK_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 146, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.TxR_VERT_INSERTS0.AK_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 146, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.TxR_VERT_INSERTS0.AD_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 146, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.TxR_VERT_INSERTS0.BL_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 146, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.TxR_VERT_INSERTS1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 147, "ExtSel": "", }, "CHA.TxR_VERT_INSERTS1.AKC_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 147, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.TxR_VERT_INSERTS1.AKC_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 147, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.TxR_VERT_NACK0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", }, "CHA.TxR_VERT_NACK0.AD_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.TxR_VERT_NACK0.AK_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.TxR_VERT_NACK0.BL_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.TxR_VERT_NACK0.BL_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.TxR_VERT_NACK0.IV_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.TxR_VERT_NACK0.AD_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.TxR_VERT_NACK0.AK_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.TxR_VERT_NACK1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 153, "ExtSel": "", }, "CHA.TxR_VERT_NACK1.AKC_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 153, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.TxR_VERT_NACK1.AKC_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 153, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.TxR_VERT_OCCUPANCY0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", }, "CHA.TxR_VERT_OCCUPANCY0.BL_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.TxR_VERT_OCCUPANCY0.AK_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.TxR_VERT_OCCUPANCY0.AD_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.TxR_VERT_OCCUPANCY0.AK_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.TxR_VERT_OCCUPANCY0.AD_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.TxR_VERT_OCCUPANCY0.BL_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.TxR_VERT_OCCUPANCY0.IV_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.TxR_VERT_OCCUPANCY1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 145, "ExtSel": "", }, "CHA.TxR_VERT_OCCUPANCY1.AKC_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 145, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.TxR_VERT_OCCUPANCY1.AKC_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 145, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.TxR_VERT_STARVED0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", }, "CHA.TxR_VERT_STARVED0.AK_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.TxR_VERT_STARVED0.AD_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.TxR_VERT_STARVED0.BL_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.TxR_VERT_STARVED0.IV_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.TxR_VERT_STARVED0.BL_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.TxR_VERT_STARVED0.AK_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.TxR_VERT_STARVED0.AD_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.TxR_VERT_STARVED1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 155, "ExtSel": "", }, "CHA.TxR_VERT_STARVED1.AKC_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 155, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.TxR_VERT_STARVED1.TGC": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 155, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.TxR_VERT_STARVED1.AKC_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 155, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.VERT_RING_AD_IN_USE": { "Box": "CHA", "Category": "CHA Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AD Ring In Use", "EvSel": 176, "ExtSel": "", }, "CHA.VERT_RING_AD_IN_USE.UP_EVEN": { "Box": "CHA", "Category": "CHA Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AD Ring In Use", "EvSel": 176, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.VERT_RING_AD_IN_USE.DN_ODD": { "Box": "CHA", "Category": "CHA Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AD Ring In Use", "EvSel": 176, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.VERT_RING_AD_IN_USE.DN_EVEN": { "Box": "CHA", "Category": "CHA Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AD Ring In Use", "EvSel": 176, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.VERT_RING_AD_IN_USE.UP_ODD": { "Box": "CHA", "Category": "CHA Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AD Ring In Use", "EvSel": 176, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.VERT_RING_AKC_IN_USE": { "Box": "CHA", "Category": "CHA Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AKC Ring In Use", "EvSel": 180, "ExtSel": "", }, "CHA.VERT_RING_AKC_IN_USE.UP_ODD": { "Box": "CHA", "Category": "CHA Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AKC Ring In Use", "EvSel": 180, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.VERT_RING_AKC_IN_USE.DN_EVEN": { "Box": "CHA", "Category": "CHA Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AKC Ring In Use", "EvSel": 180, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.VERT_RING_AKC_IN_USE.DN_ODD": { "Box": "CHA", "Category": "CHA Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AKC Ring In Use", "EvSel": 180, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.VERT_RING_AKC_IN_USE.UP_EVEN": { "Box": "CHA", "Category": "CHA Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AKC Ring In Use", "EvSel": 180, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.VERT_RING_AK_IN_USE": { "Box": "CHA", "Category": "CHA Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AK Ring In Use", "EvSel": 177, "ExtSel": "", }, "CHA.VERT_RING_AK_IN_USE.DN_EVEN": { "Box": "CHA", "Category": "CHA Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AK Ring In Use", "EvSel": 177, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.VERT_RING_AK_IN_USE.UP_ODD": { "Box": "CHA", "Category": "CHA Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AK Ring In Use", "EvSel": 177, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.VERT_RING_AK_IN_USE.UP_EVEN": { "Box": "CHA", "Category": "CHA Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AK Ring In Use", "EvSel": 177, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.VERT_RING_AK_IN_USE.DN_ODD": { "Box": "CHA", "Category": "CHA Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AK Ring In Use", "EvSel": 177, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.VERT_RING_BL_IN_USE": { "Box": "CHA", "Category": "CHA Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical BL Ring in Use", "EvSel": 178, "ExtSel": "", }, "CHA.VERT_RING_BL_IN_USE.DN_ODD": { "Box": "CHA", "Category": "CHA Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical BL Ring in Use", "EvSel": 178, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.VERT_RING_BL_IN_USE.UP_EVEN": { "Box": "CHA", "Category": "CHA Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical BL Ring in Use", "EvSel": 178, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.VERT_RING_BL_IN_USE.UP_ODD": { "Box": "CHA", "Category": "CHA Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical BL Ring in Use", "EvSel": 178, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.VERT_RING_BL_IN_USE.DN_EVEN": { "Box": "CHA", "Category": "CHA Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical BL Ring in Use", "EvSel": 178, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.VERT_RING_IV_IN_USE": { "Box": "CHA", "Category": "CHA Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "Vertical IV Ring in Use", "EvSel": 179, "ExtSel": "", }, "CHA.VERT_RING_IV_IN_USE.DN": { "Box": "CHA", "Category": "CHA Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "Vertical IV Ring in Use", "EvSel": 179, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.VERT_RING_IV_IN_USE.UP": { "Box": "CHA", "Category": "CHA Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "Vertical IV Ring in Use", "EvSel": 179, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.VERT_RING_TGC_IN_USE": { "Box": "CHA", "Category": "CHA Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical TGC Ring In Use", "EvSel": 181, "ExtSel": "", }, "CHA.VERT_RING_TGC_IN_USE.UP_ODD": { "Box": "CHA", "Category": "CHA Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical TGC Ring In Use", "EvSel": 181, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.VERT_RING_TGC_IN_USE.DN_EVEN": { "Box": "CHA", "Category": "CHA Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical TGC Ring In Use", "EvSel": 181, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.VERT_RING_TGC_IN_USE.DN_ODD": { "Box": "CHA", "Category": "CHA Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical TGC Ring In Use", "EvSel": 181, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.VERT_RING_TGC_IN_USE.UP_EVEN": { "Box": "CHA", "Category": "CHA Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical TGC Ring In Use", "EvSel": 181, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.WB_PUSH_MTOI": { "Box": "CHA", "Category": "CHA HA WBPUSHMTOI Events", "Defn": "Counts the number of times when the CHA was received WbPushMtoI", "Desc": "WbPushMtoI", "EvSel": 86, "ExtSel": "", }, "CHA.WB_PUSH_MTOI.MEM": { "Box": "CHA", "Category": "CHA HA WBPUSHMTOI Events", "Defn": "Counts the number of times when the CHA was received WbPushMtoI", "Desc": "WbPushMtoI", "EvSel": 86, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.WB_PUSH_MTOI.LLC": { "Box": "CHA", "Category": "CHA HA WBPUSHMTOI Events", "Defn": "Counts the number of times when the CHA was received WbPushMtoI", "Desc": "WbPushMtoI", "EvSel": 86, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.WRITE_NO_CREDITS": { "Box": "CHA", "Category": "CHA MC Credit and Traffic Events", "Defn": "Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue.", "Desc": "CHA iMC CHNx WRITE Credits Empty", "EvSel": 90, "ExtSel": "", }, "CHA.WRITE_NO_CREDITS.MC2": { "Box": "CHA", "Category": "CHA MC Credit and Traffic Events", "Defn": "Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue.", "Desc": "CHA iMC CHNx WRITE Credits Empty", "EvSel": 90, "ExtSel": "", "Umask": "bxxxxx1xx", "UmaskExt": "bxxxxxxxx", }, "CHA.WRITE_NO_CREDITS.MC5": { "Box": "CHA", "Category": "CHA MC Credit and Traffic Events", "Defn": "Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue.", "Desc": "CHA iMC CHNx WRITE Credits Empty", "EvSel": 90, "ExtSel": "", "Umask": "bxx1xxxxx", "UmaskExt": "bxxxxxxxx", }, "CHA.WRITE_NO_CREDITS.MC9": { "Box": "CHA", "Category": "CHA MC Credit and Traffic Events", "Defn": "Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue.", "Desc": "CHA iMC CHNx WRITE Credits Empty", "EvSel": 90, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxx1x", }, "CHA.WRITE_NO_CREDITS.MC7": { "Box": "CHA", "Category": "CHA MC Credit and Traffic Events", "Defn": "Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue.", "Desc": "CHA iMC CHNx WRITE Credits Empty", "EvSel": 90, "ExtSel": "", "Umask": "b1xxxxxxx", "UmaskExt": "bxxxxxxxx", }, "CHA.WRITE_NO_CREDITS.MC8": { "Box": "CHA", "Category": "CHA MC Credit and Traffic Events", "Defn": "Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue.", "Desc": "CHA iMC CHNx WRITE Credits Empty", "EvSel": 90, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxx1", }, "CHA.WRITE_NO_CREDITS.MC10": { "Box": "CHA", "Category": "CHA MC Credit and Traffic Events", "Defn": "Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue.", "Desc": "CHA iMC CHNx WRITE Credits Empty", "EvSel": 90, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxx1xx", }, "CHA.WRITE_NO_CREDITS.MC13": { "Box": "CHA", "Category": "CHA MC Credit and Traffic Events", "Defn": "Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue.", "Desc": "CHA iMC CHNx WRITE Credits Empty", "EvSel": 90, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxx1xxxxx", }, "CHA.WRITE_NO_CREDITS.MC4": { "Box": "CHA", "Category": "CHA MC Credit and Traffic Events", "Defn": "Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue.", "Desc": "CHA iMC CHNx WRITE Credits Empty", "EvSel": 90, "ExtSel": "", "Umask": "bxxx1xxxx", "UmaskExt": "bxxxxxxxx", }, "CHA.WRITE_NO_CREDITS.MC3": { "Box": "CHA", "Category": "CHA MC Credit and Traffic Events", "Defn": "Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue.", "Desc": "CHA iMC CHNx WRITE Credits Empty", "EvSel": 90, "ExtSel": "", "Umask": "bxxxx1xxx", "UmaskExt": "bxxxxxxxx", }, "CHA.WRITE_NO_CREDITS.MC6": { "Box": "CHA", "Category": "CHA MC Credit and Traffic Events", "Defn": "Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue.", "Desc": "CHA iMC CHNx WRITE Credits Empty", "EvSel": 90, "ExtSel": "", "Umask": "bx1xxxxxx", "UmaskExt": "bxxxxxxxx", }, "CHA.WRITE_NO_CREDITS.MC12": { "Box": "CHA", "Category": "CHA MC Credit and Traffic Events", "Defn": "Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue.", "Desc": "CHA iMC CHNx WRITE Credits Empty", "EvSel": 90, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxx1xxxx", }, "CHA.WRITE_NO_CREDITS.MC11": { "Box": "CHA", "Category": "CHA MC Credit and Traffic Events", "Defn": "Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue.", "Desc": "CHA iMC CHNx WRITE Credits Empty", "EvSel": 90, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxx1xxx", }, "CHA.WRITE_NO_CREDITS.MC1": { "Box": "CHA", "Category": "CHA MC Credit and Traffic Events", "Defn": "Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue.", "Desc": "CHA iMC CHNx WRITE Credits Empty", "EvSel": 90, "ExtSel": "", "Umask": "bxxxxxx1x", "UmaskExt": "bxxxxxxxx", }, "CHA.WRITE_NO_CREDITS.MC0": { "Box": "CHA", "Category": "CHA MC Credit and Traffic Events", "Defn": "Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue.", "Desc": "CHA iMC CHNx WRITE Credits Empty", "EvSel": 90, "ExtSel": "", "Umask": "bxxxxxxx1", "UmaskExt": "bxxxxxxxx", }, "CHA.XPT_PREF": { "Box": "CHA", "Category": "CHA XPT Events", "Desc": "XPT Prefetches", "EvSel": 111, "ExtSel": "", }, "CHA.XPT_PREF.DROP1_NOCRD": { "Box": "CHA", "Category": "CHA XPT Events", "Desc": "XPT Prefetches", "EvSel": 111, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.XPT_PREF.DROP0_CONFLICT": { "Box": "CHA", "Category": "CHA XPT Events", "Desc": "XPT Prefetches", "EvSel": 111, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.XPT_PREF.SENT0": { "Box": "CHA", "Category": "CHA XPT Events", "Desc": "XPT Prefetches", "EvSel": 111, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.XPT_PREF.DROP1_CONFLICT": { "Box": "CHA", "Category": "CHA XPT Events", "Desc": "XPT Prefetches", "EvSel": 111, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.XPT_PREF.DROP0_NOCRD": { "Box": "CHA", "Category": "CHA XPT Events", "Desc": "XPT Prefetches", "EvSel": 111, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.XPT_PREF.SENT1": { "Box": "CHA", "Category": "CHA XPT Events", "Desc": "XPT Prefetches", "EvSel": 111, "ExtSel": "", "Umask": "bxxx1xxxx", }, # IRP: "IRP.CACHE_TOTAL_OCCUPANCY": { "Box": "IRP", "Category": "IRP WRITE_CACHE Events", "Counters": "0-1", "Defn": "Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.", "Desc": "Total Write Cache Occupancy", "EvSel": 15, "ExtSel": "", "MaxIncCyc": 511, }, "IRP.CACHE_TOTAL_OCCUPANCY.MEM": { "Box": "IRP", "Category": "IRP WRITE_CACHE Events", "Counters": "0-1", "Defn": "Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.", "Desc": "Total Write Cache Occupancy", "EvSel": 15, "ExtSel": "", "MaxIncCyc": 511, "Umask": "b00000100", }, "IRP.CACHE_TOTAL_OCCUPANCY.ANY": { "Box": "IRP", "Category": "IRP WRITE_CACHE Events", "Counters": "0-1", "Defn": "Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.", "Desc": "Total Write Cache Occupancy", "EvSel": 15, "ExtSel": "", "MaxIncCyc": 511, "Umask": "b00000001", }, "IRP.CACHE_TOTAL_OCCUPANCY.IV_Q": { "Box": "IRP", "Category": "IRP WRITE_CACHE Events", "Counters": "0-1", "Defn": "Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.", "Desc": "Total Write Cache Occupancy", "EvSel": 15, "ExtSel": "", "MaxIncCyc": 511, "Umask": "b00000010", }, "IRP.CLOCKTICKS": { "Box": "IRP", "Category": "IRP CLOCK Events", "Counters": "0-1", "Desc": "Clockticks of the IO coherency tracker (IRP)", "EvSel": 1, "ExtSel": "", }, "IRP.COHERENT_OPS": { "Box": "IRP", "Category": "IRP Coherency Events", "Counters": "0-1", "Defn": "Counts the number of coherency related operations servied by the IRP", "Desc": "Coherent Ops", "EvSel": 16, "ExtSel": "", "MaxIncCyc": 2, }, "IRP.COHERENT_OPS.WBMTOI": { "Box": "IRP", "Category": "IRP Coherency Events", "Counters": "0-1", "Defn": "Counts the number of coherency related operations servied by the IRP", "Desc": "Coherent Ops", "EvSel": 16, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bx1xxxxxx", }, "IRP.COHERENT_OPS.PCIRDCUR": { "Box": "IRP", "Category": "IRP Coherency Events", "Counters": "0-1", "Defn": "Counts the number of coherency related operations servied by the IRP", "Desc": "Coherent Ops", "EvSel": 16, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxxxxx1", }, "IRP.COHERENT_OPS.CLFLUSH": { "Box": "IRP", "Category": "IRP Coherency Events", "Counters": "0-1", "Defn": "Counts the number of coherency related operations servied by the IRP", "Desc": "Coherent Ops", "EvSel": 16, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b1xxxxxxx", }, "IRP.COHERENT_OPS.RFO": { "Box": "IRP", "Category": "IRP Coherency Events", "Counters": "0-1", "Defn": "Counts the number of coherency related operations servied by the IRP", "Desc": "Coherent Ops", "EvSel": 16, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxx1xxx", }, "IRP.COHERENT_OPS.PCITOM": { "Box": "IRP", "Category": "IRP Coherency Events", "Counters": "0-1", "Defn": "Counts the number of coherency related operations servied by the IRP", "Desc": "Coherent Ops", "EvSel": 16, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxx1xxxx", }, "IRP.FAF_FULL": { "Box": "IRP", "Category": "IRP FAF Events", "Counters": "0-1", "Desc": "FAF RF full", "EvSel": 23, "ExtSel": "", }, "IRP.FAF_INSERTS": { "Box": "IRP", "Category": "IRP FAF Events", "Counters": "0-1", "Defn": "Read transactions", "Desc": "FAF - request insert from TC.", "EvSel": 24, "ExtSel": "", "Notes": "Read prefetch transactions no longer go through M2IOSF", }, "IRP.FAF_OCCUPANCY": { "Box": "IRP", "Category": "IRP FAF Events", "Counters": "0-1", "Desc": "FAF occupancy", "EvSel": 25, "ExtSel": "", "MaxIncCyc": 31, }, "IRP.FAF_TRANSACTIONS": { "Box": "IRP", "Category": "IRP FAF Events", "Counters": "0-1", "Desc": "FAF allocation -- sent to ADQ", "EvSel": 22, "ExtSel": "", }, "IRP.IRP_ALL": { "Box": "IRP", "Category": "IRP IRP Buffer Events", "Counters": "0-1", "EvSel": 32, "ExtSel": "", "MaxIncCyc": 3, }, "IRP.IRP_ALL.INBOUND_INSERTS": { "Box": "IRP", "Category": "IRP IRP Buffer Events", "Counters": "0-1", "EvSel": 32, "ExtSel": "", "MaxIncCyc": 3, "Umask": "b00000001", }, "IRP.IRP_ALL.OUTBOUND_INSERTS": { "Box": "IRP", "Category": "IRP IRP Buffer Events", "Counters": "0-1", "EvSel": 32, "ExtSel": "", "MaxIncCyc": 3, "Umask": "b00000010", }, "IRP.IRP_ALL.EVICTS": { "Box": "IRP", "Category": "IRP IRP Buffer Events", "Counters": "0-1", "EvSel": 32, "ExtSel": "", "MaxIncCyc": 3, "Umask": "b00000100", }, "IRP.MISC0": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Counts Timeouts - Set 0", "EvSel": 30, "ExtSel": "", "MaxIncCyc": 2, }, "IRP.MISC0.FAST_REJ": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Counts Timeouts - Set 0", "EvSel": 30, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b0000001x", }, "IRP.MISC0.SLOWPATH_FWPF_NO_PRF": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Counts Timeouts - Set 0", "EvSel": 30, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b1xx00000", }, "IRP.MISC0.FAST_XFER": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Counts Timeouts - Set 0", "EvSel": 30, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxx100000", }, "IRP.MISC0.2ND_WR_INSERT": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Counts Timeouts - Set 0", "EvSel": 30, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bx00x1x00", }, "IRP.MISC0.2ND_RD_INSERT": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Counts Timeouts - Set 0", "EvSel": 30, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bx00xx100", }, "IRP.MISC0.PF_ACK_HINT": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Counts Timeouts - Set 0", "EvSel": 30, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bx1x00000", }, "IRP.MISC0.FAST_REQ": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Counts Timeouts - Set 0", "EvSel": 30, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b000000x1", }, "IRP.MISC0.2ND_ATOMIC_INSERT": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Counts Timeouts - Set 0", "EvSel": 30, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bx001xx00", }, "IRP.MISC1": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 1", "EvSel": 31, "ExtSel": "", "MaxIncCyc": 2, }, "IRP.MISC1.SLOW_M": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 1", "EvSel": 31, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b000x1xxx", }, "IRP.MISC1.SLOW_I": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 1", "EvSel": 31, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b000xxxx1", }, "IRP.MISC1.SEC_RCVD_VLD": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 1", "EvSel": 31, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bx1xx0000", }, "IRP.MISC1.SLOW_E": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 1", "EvSel": 31, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b000xx1xx", }, "IRP.MISC1.SEC_RCVD_INVLD": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 1", "EvSel": 31, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxx1x0000", }, "IRP.MISC1.SLOW_S": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 1", "EvSel": 31, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b000xxx1x", }, "IRP.MISC1.LOST_FWD": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 1", "EvSel": 31, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b0001xxxx", }, "IRP.P2P_INSERTS": { "Box": "IRP", "Category": "IRP P2P Events", "Counters": "0-1", "Defn": "P2P requests from the ITC", "Desc": "P2P Requests", "EvSel": 20, "ExtSel": "", }, "IRP.P2P_OCCUPANCY": { "Box": "IRP", "Category": "IRP P2P Events", "Counters": "0-1", "Defn": "P2P B & S Queue Occupancy", "Desc": "P2P Occupancy", "EvSel": 21, "ExtSel": "", "MaxIncCyc": 63, }, "IRP.P2P_TRANSACTIONS": { "Box": "IRP", "Category": "IRP P2P Events", "Counters": "0-1", "Desc": "P2P Transactions", "EvSel": 19, "ExtSel": "", "Notes": "Top 4 bits can be Ored. Bottom 4 bits allow specific filtering. .REM = 0 and .LOC = 0 == ALL. .REM = 1 and .LOC = 1 == ALL. .REM = 1 and .LOC = 0 then you just measure remote traffic. .REM = 0 and .LOC = 1 then you just measure local traffic of whatever type of traffic chosen in first 4 bits. Unsure how to define P2P target. Some HW ID?", }, "IRP.P2P_TRANSACTIONS.MSG": { "Box": "IRP", "Category": "IRP P2P Events", "Counters": "0-1", "Desc": "P2P Transactions", "EvSel": 19, "ExtSel": "", "Notes": "Top 4 bits can be Ored. Bottom 4 bits allow specific filtering. .REM = 0 and .LOC = 0 == ALL. .REM = 1 and .LOC = 1 == ALL. .REM = 1 and .LOC = 0 then you just measure remote traffic. .REM = 0 and .LOC = 1 then you just measure local traffic of whatever type of traffic chosen in first 4 bits. Unsure how to define P2P target. Some HW ID?", "Umask": "bxxxxx1xx", }, "IRP.P2P_TRANSACTIONS.CMPL": { "Box": "IRP", "Category": "IRP P2P Events", "Counters": "0-1", "Desc": "P2P Transactions", "EvSel": 19, "ExtSel": "", "Notes": "Top 4 bits can be Ored. Bottom 4 bits allow specific filtering. .REM = 0 and .LOC = 0 == ALL. .REM = 1 and .LOC = 1 == ALL. .REM = 1 and .LOC = 0 then you just measure remote traffic. .REM = 0 and .LOC = 1 then you just measure local traffic of whatever type of traffic chosen in first 4 bits. Unsure how to define P2P target. Some HW ID?", "Umask": "bxxxx1xxx", }, "IRP.P2P_TRANSACTIONS.RD": { "Box": "IRP", "Category": "IRP P2P Events", "Counters": "0-1", "Desc": "P2P Transactions", "EvSel": 19, "ExtSel": "", "Notes": "Top 4 bits can be Ored. Bottom 4 bits allow specific filtering. .REM = 0 and .LOC = 0 == ALL. .REM = 1 and .LOC = 1 == ALL. .REM = 1 and .LOC = 0 then you just measure remote traffic. .REM = 0 and .LOC = 1 then you just measure local traffic of whatever type of traffic chosen in first 4 bits. Unsure how to define P2P target. Some HW ID?", "Umask": "bxxxxxxx1", }, "IRP.P2P_TRANSACTIONS.LOC_AND_TGT_MATCH": { "Box": "IRP", "Category": "IRP P2P Events", "Counters": "0-1", "Desc": "P2P Transactions", "EvSel": 19, "ExtSel": "", "Notes": "Top 4 bits can be Ored. Bottom 4 bits allow specific filtering. .REM = 0 and .LOC = 0 == ALL. .REM = 1 and .LOC = 1 == ALL. .REM = 1 and .LOC = 0 then you just measure remote traffic. .REM = 0 and .LOC = 1 then you just measure local traffic of whatever type of traffic chosen in first 4 bits. Unsure how to define P2P target. Some HW ID?", "Umask": "b1xxxxxxx", }, "IRP.P2P_TRANSACTIONS.REM": { "Box": "IRP", "Category": "IRP P2P Events", "Counters": "0-1", "Desc": "P2P Transactions", "EvSel": 19, "ExtSel": "", "Notes": "Top 4 bits can be Ored. Bottom 4 bits allow specific filtering. .REM = 0 and .LOC = 0 == ALL. .REM = 1 and .LOC = 1 == ALL. .REM = 1 and .LOC = 0 then you just measure remote traffic. .REM = 0 and .LOC = 1 then you just measure local traffic of whatever type of traffic chosen in first 4 bits. Unsure how to define P2P target. Some HW ID?", "Umask": "bxxx1xxxx", }, "IRP.P2P_TRANSACTIONS.WR": { "Box": "IRP", "Category": "IRP P2P Events", "Counters": "0-1", "Desc": "P2P Transactions", "EvSel": 19, "ExtSel": "", "Notes": "Top 4 bits can be Ored. Bottom 4 bits allow specific filtering. .REM = 0 and .LOC = 0 == ALL. .REM = 1 and .LOC = 1 == ALL. .REM = 1 and .LOC = 0 then you just measure remote traffic. .REM = 0 and .LOC = 1 then you just measure local traffic of whatever type of traffic chosen in first 4 bits. Unsure how to define P2P target. Some HW ID?", "Umask": "bxxxxxx1x", }, "IRP.P2P_TRANSACTIONS.REM_AND_TGT_MATCH": { "Box": "IRP", "Category": "IRP P2P Events", "Counters": "0-1", "Desc": "P2P Transactions", "EvSel": 19, "ExtSel": "", "Notes": "Top 4 bits can be Ored. Bottom 4 bits allow specific filtering. .REM = 0 and .LOC = 0 == ALL. .REM = 1 and .LOC = 1 == ALL. .REM = 1 and .LOC = 0 then you just measure remote traffic. .REM = 0 and .LOC = 1 then you just measure local traffic of whatever type of traffic chosen in first 4 bits. Unsure how to define P2P target. Some HW ID?", "Umask": "bxx1xxxxx", }, "IRP.P2P_TRANSACTIONS.LOC": { "Box": "IRP", "Category": "IRP P2P Events", "Counters": "0-1", "Desc": "P2P Transactions", "EvSel": 19, "ExtSel": "", "Notes": "Top 4 bits can be Ored. Bottom 4 bits allow specific filtering. .REM = 0 and .LOC = 0 == ALL. .REM = 1 and .LOC = 1 == ALL. .REM = 1 and .LOC = 0 then you just measure remote traffic. .REM = 0 and .LOC = 1 then you just measure local traffic of whatever type of traffic chosen in first 4 bits. Unsure how to define P2P target. Some HW ID?", "Umask": "bx1xxxxxx", }, "IRP.SNOOP_RESP": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Desc": "Snoop Responses", "EvSel": 18, "ExtSel": "", "MaxIncCyc": 2, "Notes": "The first 4 subevent bits are the Responses to the Code/Data/Invalid Snoops represented by the last 3 subevent bits. At least 1 of the bottom 4 bits must be combined with 1 of the top 3 bits to obtain counts. Unsure which combinations are possible.", }, "IRP.SNOOP_RESP.HIT_M": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Desc": "Snoop Responses", "EvSel": 18, "ExtSel": "", "MaxIncCyc": 2, "Notes": "The first 4 subevent bits are the Responses to the Code/Data/Invalid Snoops represented by the last 3 subevent bits. At least 1 of the bottom 4 bits must be combined with 1 of the top 3 bits to obtain counts. Unsure which combinations are possible.", "Umask": "bxxxx1xxx", }, "IRP.SNOOP_RESP.ALL_HIT": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Desc": "Snoop Responses", "EvSel": 18, "ExtSel": "", "MaxIncCyc": 2, "Notes": "The first 4 subevent bits are the Responses to the Code/Data/Invalid Snoops represented by the last 3 subevent bits. At least 1 of the bottom 4 bits must be combined with 1 of the top 3 bits to obtain counts. Unsure which combinations are possible.", "Umask": "b01111110", }, "IRP.SNOOP_RESP.ALL_HIT_ES": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Desc": "Snoop Responses", "EvSel": 18, "ExtSel": "", "MaxIncCyc": 2, "Notes": "The first 4 subevent bits are the Responses to the Code/Data/Invalid Snoops represented by the last 3 subevent bits. At least 1 of the bottom 4 bits must be combined with 1 of the top 3 bits to obtain counts. Unsure which combinations are possible.", "Umask": "b01110100", }, "IRP.SNOOP_RESP.ALL_HIT_M": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Desc": "Snoop Responses", "EvSel": 18, "ExtSel": "", "MaxIncCyc": 2, "Notes": "The first 4 subevent bits are the Responses to the Code/Data/Invalid Snoops represented by the last 3 subevent bits. At least 1 of the bottom 4 bits must be combined with 1 of the top 3 bits to obtain counts. Unsure which combinations are possible.", "Umask": "b01111000", }, "IRP.SNOOP_RESP.ALL_HIT_I": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Desc": "Snoop Responses", "EvSel": 18, "ExtSel": "", "MaxIncCyc": 2, "Notes": "The first 4 subevent bits are the Responses to the Code/Data/Invalid Snoops represented by the last 3 subevent bits. At least 1 of the bottom 4 bits must be combined with 1 of the top 3 bits to obtain counts. Unsure which combinations are possible.", "Umask": "b01110010", }, "IRP.SNOOP_RESP.MISS": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Desc": "Snoop Responses", "EvSel": 18, "ExtSel": "", "MaxIncCyc": 2, "Notes": "The first 4 subevent bits are the Responses to the Code/Data/Invalid Snoops represented by the last 3 subevent bits. At least 1 of the bottom 4 bits must be combined with 1 of the top 3 bits to obtain counts. Unsure which combinations are possible.", "Umask": "bxxxxxxx1", }, "IRP.SNOOP_RESP.SNPINV": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Desc": "Snoop Responses", "EvSel": 18, "ExtSel": "", "MaxIncCyc": 2, "Notes": "The first 4 subevent bits are the Responses to the Code/Data/Invalid Snoops represented by the last 3 subevent bits. At least 1 of the bottom 4 bits must be combined with 1 of the top 3 bits to obtain counts. Unsure which combinations are possible.", "Umask": "bx1xxxxxx", }, "IRP.SNOOP_RESP.SNPDATA": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Desc": "Snoop Responses", "EvSel": 18, "ExtSel": "", "MaxIncCyc": 2, "Notes": "The first 4 subevent bits are the Responses to the Code/Data/Invalid Snoops represented by the last 3 subevent bits. At least 1 of the bottom 4 bits must be combined with 1 of the top 3 bits to obtain counts. Unsure which combinations are possible.", "Umask": "bxx1xxxxx", }, "IRP.SNOOP_RESP.HIT_I": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Desc": "Snoop Responses", "EvSel": 18, "ExtSel": "", "MaxIncCyc": 2, "Notes": "The first 4 subevent bits are the Responses to the Code/Data/Invalid Snoops represented by the last 3 subevent bits. At least 1 of the bottom 4 bits must be combined with 1 of the top 3 bits to obtain counts. Unsure which combinations are possible.", "Umask": "bxxxxxx1x", }, "IRP.SNOOP_RESP.ALL_MISS": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Desc": "Snoop Responses", "EvSel": 18, "ExtSel": "", "MaxIncCyc": 2, "Notes": "The first 4 subevent bits are the Responses to the Code/Data/Invalid Snoops represented by the last 3 subevent bits. At least 1 of the bottom 4 bits must be combined with 1 of the top 3 bits to obtain counts. Unsure which combinations are possible.", "Umask": "b01110001", }, "IRP.SNOOP_RESP.HIT_ES": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Desc": "Snoop Responses", "EvSel": 18, "ExtSel": "", "MaxIncCyc": 2, "Notes": "The first 4 subevent bits are the Responses to the Code/Data/Invalid Snoops represented by the last 3 subevent bits. At least 1 of the bottom 4 bits must be combined with 1 of the top 3 bits to obtain counts. Unsure which combinations are possible.", "Umask": "bxxxxx1xx", }, "IRP.SNOOP_RESP.SNPCODE": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Desc": "Snoop Responses", "EvSel": 18, "ExtSel": "", "MaxIncCyc": 2, "Notes": "The first 4 subevent bits are the Responses to the Code/Data/Invalid Snoops represented by the last 3 subevent bits. At least 1 of the bottom 4 bits must be combined with 1 of the top 3 bits to obtain counts. Unsure which combinations are possible.", "Umask": "bxxx1xxxx", }, "IRP.TRANSACTIONS": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Defn": "Counts the number of \"Inbound\" transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.", "Desc": "Inbound Transaction Count", "EvSel": 17, "ExtSel": "", "Notes": "Bit 7 is a filter that can be applied to the other subevents. Meaningless by itself.", }, "IRP.TRANSACTIONS.WR_PREF": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Defn": "Counts the number of \"Inbound\" transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.", "Desc": "Inbound Transaction Count", "EvSel": 17, "ExtSel": "", "Notes": "Bit 7 is a filter that can be applied to the other subevents. Meaningless by itself.", "Umask": "bxxxx1xxx", }, "IRP.TRANSACTIONS.ATOMIC": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Defn": "Counts the number of \"Inbound\" transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.", "Desc": "Inbound Transaction Count", "EvSel": 17, "ExtSel": "", "Notes": "Bit 7 is a filter that can be applied to the other subevents. Meaningless by itself.", "Umask": "bxxx1xxxx", }, "IRP.TRANSACTIONS.WRITES": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Defn": "Counts the number of \"Inbound\" transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.", "Desc": "Inbound Transaction Count", "EvSel": 17, "ExtSel": "", "Notes": "Bit 7 is a filter that can be applied to the other subevents. Meaningless by itself.", "Umask": "bxxxxxx1x", }, "IRP.TRANSACTIONS.OTHER": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Defn": "Counts the number of \"Inbound\" transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.", "Desc": "Inbound Transaction Count", "EvSel": 17, "ExtSel": "", "Notes": "Bit 7 is a filter that can be applied to the other subevents. Meaningless by itself.", "Umask": "bxx1xxxxx", }, "IRP.TRANSACTIONS.ORDERINGQ": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Defn": "Counts the number of \"Inbound\" transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.", "Desc": "Inbound Transaction Count", "EvSel": 17, "ExtSel": "", "Notes": "Bit 7 is a filter that can be applied to the other subevents. Meaningless by itself.", "Umask": "bx1xxxxxx", }, "IRP.TxC_AK_INSERTS": { "Box": "IRP", "Category": "IRP AK Egress Events", "Counters": "0-1", "Desc": "AK Egress Allocations", "EvSel": 11, "ExtSel": "", }, "IRP.TxC_BL_DRS_CYCLES_FULL": { "Box": "IRP", "Category": "IRP BL Egress Events", "Counters": "0-1", "Desc": "BL DRS Egress Cycles Full", "EvSel": 5, "ExtSel": "", }, "IRP.TxC_BL_DRS_INSERTS": { "Box": "IRP", "Category": "IRP BL Egress Events", "Counters": "0-1", "Desc": "BL DRS Egress Inserts", "EvSel": 2, "ExtSel": "", }, "IRP.TxC_BL_DRS_OCCUPANCY": { "Box": "IRP", "Category": "IRP BL Egress Events", "Counters": "0-1", "Desc": "BL DRS Egress Occupancy", "EvSel": 8, "ExtSel": "", "MaxIncCyc": 63, }, "IRP.TxC_BL_NCB_CYCLES_FULL": { "Box": "IRP", "Category": "IRP BL Egress Events", "Counters": "0-1", "Desc": "BL NCB Egress Cycles Full", "EvSel": 6, "ExtSel": "", }, "IRP.TxC_BL_NCB_INSERTS": { "Box": "IRP", "Category": "IRP BL Egress Events", "Counters": "0-1", "Desc": "BL NCB Egress Inserts", "EvSel": 3, "ExtSel": "", }, "IRP.TxC_BL_NCB_OCCUPANCY": { "Box": "IRP", "Category": "IRP BL Egress Events", "Counters": "0-1", "Desc": "BL NCB Egress Occupancy", "EvSel": 9, "ExtSel": "", "MaxIncCyc": 31, }, "IRP.TxC_BL_NCS_CYCLES_FULL": { "Box": "IRP", "Category": "IRP BL Egress Events", "Counters": "0-1", "Desc": "BL NCS Egress Cycles Full", "EvSel": 7, "ExtSel": "", }, "IRP.TxC_BL_NCS_INSERTS": { "Box": "IRP", "Category": "IRP BL Egress Events", "Counters": "0-1", "Desc": "BL NCS Egress Inserts", "EvSel": 4, "ExtSel": "", }, "IRP.TxC_BL_NCS_OCCUPANCY": { "Box": "IRP", "Category": "IRP BL Egress Events", "Counters": "0-1", "Desc": "BL NCS Egress Occupancy", "EvSel": 10, "ExtSel": "", "MaxIncCyc": 15, }, "IRP.TxR2_AD01_STALL_CREDIT_CYCLES": { "Box": "IRP", "Category": "IRP STALL_CYCLES Events", "Counters": "0-1", "Defn": "Counts the number times when it is not possible to issue a request to the M2PCIe because there are no Egress Credits available on AD0, A1 or AD0&AD1 both. Stalls on both AD0 and AD1 will count as 2", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 3, }, "IRP.TxR2_AD0_STALL_CREDIT_CYCLES": { "Box": "IRP", "Category": "IRP STALL_CYCLES Events", "Counters": "0-1", "Defn": "Counts the number times when it is not possible to issue a request to the M2PCIe because there are no AD0 Egress Credits available.", "Desc": "No AD0 Egress Credits Stalls", "EvSel": 26, "ExtSel": "", }, "IRP.TxR2_AD1_STALL_CREDIT_CYCLES": { "Box": "IRP", "Category": "IRP STALL_CYCLES Events", "Counters": "0-1", "Defn": "Counts the number times when it is not possible to issue a request to the M2PCIe because there are no AD1 Egress Credits available.", "Desc": "No AD1 Egress Credits Stalls", "EvSel": 27, "ExtSel": "", }, "IRP.TxR2_BL_STALL_CREDIT_CYCLES": { "Box": "IRP", "Category": "IRP STALL_CYCLES Events", "Counters": "0-1", "Defn": "Counts the number times when it is not possible to issue data to the R2PCIe because there are no BL Egress Credits available.", "Desc": "No BL Egress Credit Stalls", "EvSel": 29, "ExtSel": "", }, "IRP.TxS_DATA_INSERTS_NCB": { "Box": "IRP", "Category": "IRP OUTBOUND_REQUESTS Events", "Counters": "0-1", "Defn": "Counts the number of requests issued to the switch (towards the devices).", "Desc": "Outbound Read Requests", "EvSel": 13, "ExtSel": "", }, "IRP.TxS_DATA_INSERTS_NCS": { "Box": "IRP", "Category": "IRP OUTBOUND_REQUESTS Events", "Counters": "0-1", "Defn": "Counts the number of requests issued to the switch (towards the devices).", "Desc": "Outbound Read Requests", "EvSel": 14, "ExtSel": "", }, "IRP.TxS_REQUEST_OCCUPANCY": { "Box": "IRP", "Category": "IRP OUTBOUND_REQUESTS Events", "Counters": "0-1", "Defn": "Accumultes the number of outstanding outbound requests from the IRP to the switch (towards the devices). This can be used in conjuection with the allocations event in order to calculate average latency of outbound requests.", "Desc": "Outbound Request Queue Occupancy", "EvSel": 12, "ExtSel": "", }, # M2M: "M2M.AG0_AD_CRD_ACQUIRED0": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", }, "M2M.AG0_AD_CRD_ACQUIRED0.TGR1": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxxxx1x", }, "M2M.AG0_AD_CRD_ACQUIRED0.TGR0": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxxxxx1", }, "M2M.AG0_AD_CRD_ACQUIRED0.TGR2": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxxx1xx", }, "M2M.AG0_AD_CRD_ACQUIRED0.TGR4": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxx1xxxx", }, "M2M.AG0_AD_CRD_ACQUIRED0.TGR5": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxx1xxxxx", }, "M2M.AG0_AD_CRD_ACQUIRED0.TGR3": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxx1xxx", }, "M2M.AG0_AD_CRD_ACQUIRED0.TGR7": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "b1xxxxxxx", }, "M2M.AG0_AD_CRD_ACQUIRED0.TGR6": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bx1xxxxxx", }, "M2M.AG0_AD_CRD_ACQUIRED1": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 129, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected. Extension not used by ICX.", }, "M2M.AG0_AD_CRD_ACQUIRED1.TGR10": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 129, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected. Extension not used by ICX.", "Umask": "bxxxxx1xx", }, "M2M.AG0_AD_CRD_ACQUIRED1.TGR9": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 129, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected. Extension not used by ICX.", "Umask": "bxxxxxx1x", }, "M2M.AG0_AD_CRD_ACQUIRED1.TGR8": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 129, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected. Extension not used by ICX.", "Umask": "bxxxxxxx1", }, "M2M.AG0_AD_CRD_OCCUPANCY0": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", }, "M2M.AG0_AD_CRD_OCCUPANCY0.TGR4": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b00010000", }, "M2M.AG0_AD_CRD_OCCUPANCY0.TGR5": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b00100000", }, "M2M.AG0_AD_CRD_OCCUPANCY0.TGR6": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b01000000", }, "M2M.AG0_AD_CRD_OCCUPANCY0.TGR7": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b10000000", }, "M2M.AG0_AD_CRD_OCCUPANCY0.TGR3": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b00001000", }, "M2M.AG0_AD_CRD_OCCUPANCY0.TGR0": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b00000001", }, "M2M.AG0_AD_CRD_OCCUPANCY0.TGR1": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b00000010", }, "M2M.AG0_AD_CRD_OCCUPANCY0.TGR2": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b00000100", }, "M2M.AG0_AD_CRD_OCCUPANCY1": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 131, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected. Extension not used by ICX.", }, "M2M.AG0_AD_CRD_OCCUPANCY1.TGR10": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 131, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected. Extension not used by ICX.", "Umask": "b00000100", }, "M2M.AG0_AD_CRD_OCCUPANCY1.TGR9": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 131, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected. Extension not used by ICX.", "Umask": "b00000010", }, "M2M.AG0_AD_CRD_OCCUPANCY1.TGR8": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 131, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected. Extension not used by ICX.", "Umask": "b00000001", }, "M2M.AG0_BL_CRD_ACQUIRED0": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", }, "M2M.AG0_BL_CRD_ACQUIRED0.TGR5": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2M.AG0_BL_CRD_ACQUIRED0.TGR4": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.AG0_BL_CRD_ACQUIRED0.TGR6": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2M.AG0_BL_CRD_ACQUIRED0.TGR3": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.AG0_BL_CRD_ACQUIRED0.TGR7": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "b1xxxxxxx", }, "M2M.AG0_BL_CRD_ACQUIRED0.TGR0": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.AG0_BL_CRD_ACQUIRED0.TGR1": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.AG0_BL_CRD_ACQUIRED0.TGR2": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.AG0_BL_CRD_ACQUIRED1": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 137, "ExtSel": "", "Notes": "Extension not used by ICX.", }, "M2M.AG0_BL_CRD_ACQUIRED1.TGR9": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 137, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxxx1x", }, "M2M.AG0_BL_CRD_ACQUIRED1.TGR10": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 137, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxx1xx", }, "M2M.AG0_BL_CRD_ACQUIRED1.TGR8": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 137, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxxxx1", }, "M2M.AG0_BL_CRD_OCCUPANCY0": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", }, "M2M.AG0_BL_CRD_OCCUPANCY0.TGR2": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "b00000100", }, "M2M.AG0_BL_CRD_OCCUPANCY0.TGR1": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "b00000010", }, "M2M.AG0_BL_CRD_OCCUPANCY0.TGR0": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "b00000001", }, "M2M.AG0_BL_CRD_OCCUPANCY0.TGR7": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "b10000000", }, "M2M.AG0_BL_CRD_OCCUPANCY0.TGR3": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "b00001000", }, "M2M.AG0_BL_CRD_OCCUPANCY0.TGR6": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "b01000000", }, "M2M.AG0_BL_CRD_OCCUPANCY0.TGR5": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "b00100000", }, "M2M.AG0_BL_CRD_OCCUPANCY0.TGR4": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "b00010000", }, "M2M.AG0_BL_CRD_OCCUPANCY1": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 139, "ExtSel": "", "Notes": "Extension not used by ICX.", }, "M2M.AG0_BL_CRD_OCCUPANCY1.TGR9": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 139, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "b00000010", }, "M2M.AG0_BL_CRD_OCCUPANCY1.TGR10": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 139, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "b00000100", }, "M2M.AG0_BL_CRD_OCCUPANCY1.TGR8": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 139, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "b00000001", }, "M2M.AG1_AD_CRD_ACQUIRED0": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", }, "M2M.AG1_AD_CRD_ACQUIRED0.TGR2": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxxx1xx", }, "M2M.AG1_AD_CRD_ACQUIRED0.TGR1": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxxxx1x", }, "M2M.AG1_AD_CRD_ACQUIRED0.TGR0": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxxxxx1", }, "M2M.AG1_AD_CRD_ACQUIRED0.TGR3": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxx1xxx", }, "M2M.AG1_AD_CRD_ACQUIRED0.TGR7": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "b1xxxxxxx", }, "M2M.AG1_AD_CRD_ACQUIRED0.TGR6": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bx1xxxxxx", }, "M2M.AG1_AD_CRD_ACQUIRED0.TGR4": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxx1xxxx", }, "M2M.AG1_AD_CRD_ACQUIRED0.TGR5": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxx1xxxxx", }, "M2M.AG1_AD_CRD_ACQUIRED1": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 133, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected. Extension not used by ICX.", }, "M2M.AG1_AD_CRD_ACQUIRED1.TGR10": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 133, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected. Extension not used by ICX.", "Umask": "bxxxxx1xx", }, "M2M.AG1_AD_CRD_ACQUIRED1.TGR9": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 133, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected. Extension not used by ICX.", "Umask": "bxxxxxx1x", }, "M2M.AG1_AD_CRD_ACQUIRED1.TGR8": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 133, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected. Extension not used by ICX.", "Umask": "bxxxxxxx1", }, "M2M.AG1_AD_CRD_OCCUPANCY0": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", }, "M2M.AG1_AD_CRD_OCCUPANCY0.TGR2": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b00000100", }, "M2M.AG1_AD_CRD_OCCUPANCY0.TGR0": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b00000001", }, "M2M.AG1_AD_CRD_OCCUPANCY0.TGR1": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b00000010", }, "M2M.AG1_AD_CRD_OCCUPANCY0.TGR6": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b01000000", }, "M2M.AG1_AD_CRD_OCCUPANCY0.TGR3": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b00001000", }, "M2M.AG1_AD_CRD_OCCUPANCY0.TGR7": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b10000000", }, "M2M.AG1_AD_CRD_OCCUPANCY0.TGR5": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b00100000", }, "M2M.AG1_AD_CRD_OCCUPANCY0.TGR4": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b00010000", }, "M2M.AG1_AD_CRD_OCCUPANCY1": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 135, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected. Extension not used by ICX.", }, "M2M.AG1_AD_CRD_OCCUPANCY1.TGR8": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 135, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected. Extension not used by ICX.", "Umask": "b00000001", }, "M2M.AG1_AD_CRD_OCCUPANCY1.TGR9": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 135, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected. Extension not used by ICX.", "Umask": "b00000010", }, "M2M.AG1_AD_CRD_OCCUPANCY1.TGR10": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 135, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected. Extension not used by ICX.", "Umask": "b00000100", }, "M2M.AG1_BL_CRD_ACQUIRED0": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", }, "M2M.AG1_BL_CRD_ACQUIRED0.TGR2": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.AG1_BL_CRD_ACQUIRED0.TGR1": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.AG1_BL_CRD_ACQUIRED0.TGR0": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.AG1_BL_CRD_ACQUIRED0.TGR7": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "b1xxxxxxx", }, "M2M.AG1_BL_CRD_ACQUIRED0.TGR3": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.AG1_BL_CRD_ACQUIRED0.TGR6": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2M.AG1_BL_CRD_ACQUIRED0.TGR4": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.AG1_BL_CRD_ACQUIRED0.TGR5": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2M.AG1_BL_CRD_ACQUIRED1": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 141, "ExtSel": "", "Notes": "Extension not used by ICX.", }, "M2M.AG1_BL_CRD_ACQUIRED1.TGR9": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 141, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxxx1x", }, "M2M.AG1_BL_CRD_ACQUIRED1.TGR10": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 141, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxx1xx", }, "M2M.AG1_BL_CRD_ACQUIRED1.TGR8": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 141, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxxxx1", }, "M2M.AG1_BL_CRD_OCCUPANCY0": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", }, "M2M.AG1_BL_CRD_OCCUPANCY0.TGR2": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "b00000100", }, "M2M.AG1_BL_CRD_OCCUPANCY0.TGR0": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "b00000001", }, "M2M.AG1_BL_CRD_OCCUPANCY0.TGR1": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "b00000010", }, "M2M.AG1_BL_CRD_OCCUPANCY0.TGR6": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "b01000000", }, "M2M.AG1_BL_CRD_OCCUPANCY0.TGR7": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "b10000000", }, "M2M.AG1_BL_CRD_OCCUPANCY0.TGR3": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "b00001000", }, "M2M.AG1_BL_CRD_OCCUPANCY0.TGR4": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "b00010000", }, "M2M.AG1_BL_CRD_OCCUPANCY0.TGR5": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "b00100000", }, "M2M.AG1_BL_CRD_OCCUPANCY1": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 143, "ExtSel": "", "Notes": "Extension not used by ICX.", }, "M2M.AG1_BL_CRD_OCCUPANCY1.TGR9": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 143, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "b00000010", }, "M2M.AG1_BL_CRD_OCCUPANCY1.TGR10": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 143, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "b00000100", }, "M2M.AG1_BL_CRD_OCCUPANCY1.TGR8": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 143, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "b00000001", }, "M2M.BYPASS_M2M_EGRESS": { "Box": "M2M", "Category": "M2M BL Egress Events", "Desc": "M2M to iMC Bypass", "EvSel": 34, "ExtSel": "", }, "M2M.BYPASS_M2M_EGRESS.NOT_TAKEN": { "Box": "M2M", "Category": "M2M BL Egress Events", "Desc": "M2M to iMC Bypass", "EvSel": 34, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.BYPASS_M2M_EGRESS.TAKEN": { "Box": "M2M", "Category": "M2M BL Egress Events", "Desc": "M2M to iMC Bypass", "EvSel": 34, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.BYPASS_M2M_INGRESS": { "Box": "M2M", "Category": "M2M BL Ingress Events", "Counters": "0-3", "Desc": "M2M to iMC Bypass", "EvSel": 33, "ExtSel": "", }, "M2M.BYPASS_M2M_INGRESS.NOT_TAKEN": { "Box": "M2M", "Category": "M2M BL Ingress Events", "Counters": "0-3", "Desc": "M2M to iMC Bypass", "EvSel": 33, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.BYPASS_M2M_INGRESS.TAKEN": { "Box": "M2M", "Category": "M2M BL Ingress Events", "Counters": "0-3", "Desc": "M2M to iMC Bypass", "EvSel": 33, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.CLOCKTICKS": { "Box": "M2M", "Category": "M2M UCLK Events", "Desc": "Clockticks of the mesh to memory (M2M)", "EvSel": 0, "ExtSel": "", }, "M2M.CMS_CLOCKTICKS": { "Box": "M2M", "Category": "M2M Misc Events", "Desc": "CMS Clockticks", "EvSel": 192, "ExtSel": "", }, "M2M.DIRECT2CORE_NOT_TAKEN_DIRSTATE": { "Box": "M2M", "Category": "M2M DIRECT2CORE Events", "Counters": "0-3", "Desc": "Cycles when direct to core mode, which bypasses the CHA, was disabled", "EvSel": 36, "ExtSel": "", }, "M2M.DIRECT2CORE_NOT_TAKEN_NOTFORKED": { "Box": "M2M", "Category": "M2M DIRECT2CORE Events", "Counters": "0-3", "EvSel": 96, "ExtSel": "", }, "M2M.DIRECT2CORE_TXN_OVERRIDE": { "Box": "M2M", "Category": "M2M DIRECT2CORE Events", "Counters": "0-3", "Desc": "Number of reads in which direct to core transaction was overridden", "EvSel": 37, "ExtSel": "", }, "M2M.DIRECT2UPI_NOT_TAKEN_CREDITS": { "Box": "M2M", "Category": "M2M DIRECT2UPI Events", "Desc": "Number of reads in which direct to Intel UPI transactions were overridden", "EvSel": 40, "ExtSel": "", }, "M2M.DIRECT2UPI_NOT_TAKEN_DIRSTATE": { "Box": "M2M", "Category": "M2M DIRECT2UPI Events", "Desc": "Cycles when Direct2UPI was Disabled", "EvSel": 39, "ExtSel": "", }, "M2M.DIRECT2UPI_TXN_OVERRIDE": { "Box": "M2M", "Category": "M2M DIRECT2UPI Events", "Desc": "Number of times a direct to UPI transaction was overridden.", "EvSel": 41, "ExtSel": "", }, "M2M.DIRECTORY_HIT": { "Box": "M2M", "Category": "M2M Directory State Events", "Desc": "Directory Hit", "EvSel": 42, "ExtSel": "", "Notes": "Covers NearMem Reads (Demand and Underfill).", }, "M2M.DIRECTORY_HIT.CLEAN_P": { "Box": "M2M", "Category": "M2M Directory State Events", "Desc": "Directory Hit", "EvSel": 42, "ExtSel": "", "Notes": "Covers NearMem Reads (Demand and Underfill).", "Umask": "bx1xxxxxx", }, "M2M.DIRECTORY_HIT.DIRTY_P": { "Box": "M2M", "Category": "M2M Directory State Events", "Desc": "Directory Hit", "EvSel": 42, "ExtSel": "", "Notes": "Covers NearMem Reads (Demand and Underfill).", "Umask": "bxxxxx1xx", }, "M2M.DIRECTORY_HIT.DIRTY_S": { "Box": "M2M", "Category": "M2M Directory State Events", "Desc": "Directory Hit", "EvSel": 42, "ExtSel": "", "Notes": "Covers NearMem Reads (Demand and Underfill).", "Umask": "bxxxxxx1x", }, "M2M.DIRECTORY_HIT.CLEAN_A": { "Box": "M2M", "Category": "M2M Directory State Events", "Desc": "Directory Hit", "EvSel": 42, "ExtSel": "", "Notes": "Covers NearMem Reads (Demand and Underfill).", "Umask": "b1xxxxxxx", }, "M2M.DIRECTORY_HIT.CLEAN_I": { "Box": "M2M", "Category": "M2M Directory State Events", "Desc": "Directory Hit", "EvSel": 42, "ExtSel": "", "Notes": "Covers NearMem Reads (Demand and Underfill).", "Umask": "bxxx1xxxx", }, "M2M.DIRECTORY_HIT.DIRTY_A": { "Box": "M2M", "Category": "M2M Directory State Events", "Desc": "Directory Hit", "EvSel": 42, "ExtSel": "", "Notes": "Covers NearMem Reads (Demand and Underfill).", "Umask": "bxxxx1xxx", }, "M2M.DIRECTORY_HIT.CLEAN_S": { "Box": "M2M", "Category": "M2M Directory State Events", "Desc": "Directory Hit", "EvSel": 42, "ExtSel": "", "Notes": "Covers NearMem Reads (Demand and Underfill).", "Umask": "bxx1xxxxx", }, "M2M.DIRECTORY_HIT.DIRTY_I": { "Box": "M2M", "Category": "M2M Directory State Events", "Desc": "Directory Hit", "EvSel": 42, "ExtSel": "", "Notes": "Covers NearMem Reads (Demand and Underfill).", "Umask": "bxxxxxxx1", }, "M2M.DIRECTORY_LOOKUP": { "Box": "M2M", "Category": "M2M DIRECTORY Events", "Counters": "0-3", "Desc": "Multi-socket cacheline Directory Lookups", "EvSel": 45, "ExtSel": "", }, "M2M.DIRECTORY_LOOKUP.ANY": { "Box": "M2M", "Category": "M2M DIRECTORY Events", "Counters": "0-3", "Desc": "Multi-socket cacheline Directory Lookups", "EvSel": 45, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.DIRECTORY_LOOKUP.STATE_S": { "Box": "M2M", "Category": "M2M DIRECTORY Events", "Counters": "0-3", "Desc": "Multi-socket cacheline Directory Lookups", "EvSel": 45, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.DIRECTORY_LOOKUP.STATE_I": { "Box": "M2M", "Category": "M2M DIRECTORY Events", "Counters": "0-3", "Desc": "Multi-socket cacheline Directory Lookups", "EvSel": 45, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.DIRECTORY_LOOKUP.STATE_A": { "Box": "M2M", "Category": "M2M DIRECTORY Events", "Counters": "0-3", "Desc": "Multi-socket cacheline Directory Lookups", "EvSel": 45, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.DIRECTORY_MISS": { "Box": "M2M", "Category": "M2M Directory State Events", "Desc": "Directory Miss", "EvSel": 43, "ExtSel": "", "Notes": "Covers NearMem Reads (Demand and Underfill).", }, "M2M.DIRECTORY_MISS.DIRTY_A": { "Box": "M2M", "Category": "M2M Directory State Events", "Desc": "Directory Miss", "EvSel": 43, "ExtSel": "", "Notes": "Covers NearMem Reads (Demand and Underfill).", "Umask": "bxxxx1xxx", }, "M2M.DIRECTORY_MISS.CLEAN_S": { "Box": "M2M", "Category": "M2M Directory State Events", "Desc": "Directory Miss", "EvSel": 43, "ExtSel": "", "Notes": "Covers NearMem Reads (Demand and Underfill).", "Umask": "bxx1xxxxx", }, "M2M.DIRECTORY_MISS.DIRTY_I": { "Box": "M2M", "Category": "M2M Directory State Events", "Desc": "Directory Miss", "EvSel": 43, "ExtSel": "", "Notes": "Covers NearMem Reads (Demand and Underfill).", "Umask": "bxxxxxxx1", }, "M2M.DIRECTORY_MISS.CLEAN_A": { "Box": "M2M", "Category": "M2M Directory State Events", "Desc": "Directory Miss", "EvSel": 43, "ExtSel": "", "Notes": "Covers NearMem Reads (Demand and Underfill).", "Umask": "b1xxxxxxx", }, "M2M.DIRECTORY_MISS.DIRTY_S": { "Box": "M2M", "Category": "M2M Directory State Events", "Desc": "Directory Miss", "EvSel": 43, "ExtSel": "", "Notes": "Covers NearMem Reads (Demand and Underfill).", "Umask": "bxxxxxx1x", }, "M2M.DIRECTORY_MISS.CLEAN_I": { "Box": "M2M", "Category": "M2M Directory State Events", "Desc": "Directory Miss", "EvSel": 43, "ExtSel": "", "Notes": "Covers NearMem Reads (Demand and Underfill).", "Umask": "bxxx1xxxx", }, "M2M.DIRECTORY_MISS.DIRTY_P": { "Box": "M2M", "Category": "M2M Directory State Events", "Desc": "Directory Miss", "EvSel": 43, "ExtSel": "", "Notes": "Covers NearMem Reads (Demand and Underfill).", "Umask": "bxxxxx1xx", }, "M2M.DIRECTORY_MISS.CLEAN_P": { "Box": "M2M", "Category": "M2M Directory State Events", "Desc": "Directory Miss", "EvSel": 43, "ExtSel": "", "Notes": "Covers NearMem Reads (Demand and Underfill).", "Umask": "bx1xxxxxx", }, "M2M.DISTRESS_ASSERTED": { "Box": "M2M", "Category": "M2M Horizontal RING Events", "Defn": "Counts the number of cycles either the local or incoming distress signals are asserted.", "Desc": "Distress signal asserted", "EvSel": 175, "ExtSel": "", }, "M2M.DISTRESS_ASSERTED.DPT_NONLOCAL": { "Box": "M2M", "Category": "M2M Horizontal RING Events", "Defn": "Counts the number of cycles either the local or incoming distress signals are asserted.", "Desc": "Distress signal asserted", "EvSel": 175, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.DISTRESS_ASSERTED.VERT": { "Box": "M2M", "Category": "M2M Horizontal RING Events", "Defn": "Counts the number of cycles either the local or incoming distress signals are asserted.", "Desc": "Distress signal asserted", "EvSel": 175, "ExtSel": "", "Umask": "b00000001", }, "M2M.DISTRESS_ASSERTED.PMM_NONLOCAL": { "Box": "M2M", "Category": "M2M Horizontal RING Events", "Defn": "Counts the number of cycles either the local or incoming distress signals are asserted.", "Desc": "Distress signal asserted", "EvSel": 175, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2M.DISTRESS_ASSERTED.HORZ": { "Box": "M2M", "Category": "M2M Horizontal RING Events", "Defn": "Counts the number of cycles either the local or incoming distress signals are asserted.", "Desc": "Distress signal asserted", "EvSel": 175, "ExtSel": "", "Umask": "b00000010", }, "M2M.DISTRESS_ASSERTED.PMM_LOCAL": { "Box": "M2M", "Category": "M2M Horizontal RING Events", "Defn": "Counts the number of cycles either the local or incoming distress signals are asserted.", "Desc": "Distress signal asserted", "EvSel": 175, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.DISTRESS_ASSERTED.DPT_LOCAL": { "Box": "M2M", "Category": "M2M Horizontal RING Events", "Defn": "Counts the number of cycles either the local or incoming distress signals are asserted.", "Desc": "Distress signal asserted", "EvSel": 175, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.DISTRESS_ASSERTED.DPT_STALL_NOCRD": { "Box": "M2M", "Category": "M2M Horizontal RING Events", "Defn": "Counts the number of cycles either the local or incoming distress signals are asserted.", "Desc": "Distress signal asserted", "EvSel": 175, "ExtSel": "", "Umask": "b1xxxxxxx", }, "M2M.DISTRESS_ASSERTED.DPT_STALL_IV": { "Box": "M2M", "Category": "M2M Horizontal RING Events", "Defn": "Counts the number of cycles either the local or incoming distress signals are asserted.", "Desc": "Distress signal asserted", "EvSel": 175, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2M.DISTRESS_PMM": { "Box": "M2M", "Category": "M2M Distress Events", "Counters": "0-3", "EvSel": 242, "ExtSel": "", }, "M2M.DISTRESS_PMM_MEMMODE": { "Box": "M2M", "Category": "M2M Distress Events", "Counters": "0-3", "EvSel": 241, "ExtSel": "", }, "M2M.EGRESS_ORDERING": { "Box": "M2M", "Category": "M2M Horizontal In Use RING Events", "Defn": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements", "Desc": "Egress Blocking due to Ordering requirements", "EvSel": 186, "ExtSel": "", }, "M2M.EGRESS_ORDERING.IV_SNOOPGO_UP": { "Box": "M2M", "Category": "M2M Horizontal In Use RING Events", "Defn": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements", "Desc": "Egress Blocking due to Ordering requirements", "EvSel": 186, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.EGRESS_ORDERING.IV_SNOOPGO_DN": { "Box": "M2M", "Category": "M2M Horizontal In Use RING Events", "Defn": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements", "Desc": "Egress Blocking due to Ordering requirements", "EvSel": 186, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.HORZ_RING_AD_IN_USE": { "Box": "M2M", "Category": "M2M Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AD Ring In Use", "EvSel": 182, "ExtSel": "", }, "M2M.HORZ_RING_AD_IN_USE.RIGHT_EVEN": { "Box": "M2M", "Category": "M2M Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AD Ring In Use", "EvSel": 182, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.HORZ_RING_AD_IN_USE.LEFT_ODD": { "Box": "M2M", "Category": "M2M Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AD Ring In Use", "EvSel": 182, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.HORZ_RING_AD_IN_USE.LEFT_EVEN": { "Box": "M2M", "Category": "M2M Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AD Ring In Use", "EvSel": 182, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.HORZ_RING_AD_IN_USE.RIGHT_ODD": { "Box": "M2M", "Category": "M2M Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AD Ring In Use", "EvSel": 182, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.HORZ_RING_AKC_IN_USE": { "Box": "M2M", "Category": "M2M Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 187, "ExtSel": "", }, "M2M.HORZ_RING_AKC_IN_USE.RIGHT_ODD": { "Box": "M2M", "Category": "M2M Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 187, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.HORZ_RING_AKC_IN_USE.RIGHT_EVEN": { "Box": "M2M", "Category": "M2M Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 187, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.HORZ_RING_AKC_IN_USE.LEFT_EVEN": { "Box": "M2M", "Category": "M2M Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 187, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.HORZ_RING_AKC_IN_USE.LEFT_ODD": { "Box": "M2M", "Category": "M2M Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 187, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.HORZ_RING_AK_IN_USE": { "Box": "M2M", "Category": "M2M Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 183, "ExtSel": "", }, "M2M.HORZ_RING_AK_IN_USE.RIGHT_ODD": { "Box": "M2M", "Category": "M2M Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 183, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.HORZ_RING_AK_IN_USE.LEFT_EVEN": { "Box": "M2M", "Category": "M2M Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 183, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.HORZ_RING_AK_IN_USE.LEFT_ODD": { "Box": "M2M", "Category": "M2M Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 183, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.HORZ_RING_AK_IN_USE.RIGHT_EVEN": { "Box": "M2M", "Category": "M2M Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 183, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.HORZ_RING_BL_IN_USE": { "Box": "M2M", "Category": "M2M Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal BL Ring in Use", "EvSel": 184, "ExtSel": "", }, "M2M.HORZ_RING_BL_IN_USE.LEFT_ODD": { "Box": "M2M", "Category": "M2M Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal BL Ring in Use", "EvSel": 184, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.HORZ_RING_BL_IN_USE.LEFT_EVEN": { "Box": "M2M", "Category": "M2M Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal BL Ring in Use", "EvSel": 184, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.HORZ_RING_BL_IN_USE.RIGHT_EVEN": { "Box": "M2M", "Category": "M2M Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal BL Ring in Use", "EvSel": 184, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.HORZ_RING_BL_IN_USE.RIGHT_ODD": { "Box": "M2M", "Category": "M2M Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal BL Ring in Use", "EvSel": 184, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.HORZ_RING_IV_IN_USE": { "Box": "M2M", "Category": "M2M Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "Horizontal IV Ring in Use", "EvSel": 185, "ExtSel": "", }, "M2M.HORZ_RING_IV_IN_USE.LEFT": { "Box": "M2M", "Category": "M2M Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "Horizontal IV Ring in Use", "EvSel": 185, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.HORZ_RING_IV_IN_USE.RIGHT": { "Box": "M2M", "Category": "M2M Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "Horizontal IV Ring in Use", "EvSel": 185, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.IMC_READS": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Reads Issued to iMC", "EvSel": 55, "ExtSel": "", "Notes": "Scrub Reads due to ECC errors not currently included", }, "M2M.IMC_READS.CH1_TO_DDR_AS_CACHE": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Reads Issued to iMC", "EvSel": 55, "ExtSel": "", "Notes": "Scrub Reads due to ECC errors not currently included", "Umask": "bxxx1xxxx", "UmaskExt": "bx1x", }, "M2M.IMC_READS.TO_DDR_AS_MEM": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Reads Issued to iMC", "EvSel": 55, "ExtSel": "", "Notes": "Scrub Reads due to ECC errors not currently included", "Umask": "bxxxx1xxx", "UmaskExt": "b111", }, "M2M.IMC_READS.NORMAL": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Reads Issued to iMC", "EvSel": 55, "ExtSel": "", "Notes": "Scrub Reads due to ECC errors not currently included", "Umask": "bxxxxxxx1", "UmaskExt": "b111", }, "M2M.IMC_READS.CH1_FROM_TGR": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Reads Issued to iMC", "EvSel": 55, "ExtSel": "", "Notes": "Scrub Reads due to ECC errors not currently included", "Umask": "bx1xxxxxx", "UmaskExt": "bx1x", }, "M2M.IMC_READS.CH0_TO_PMM": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Reads Issued to iMC", "EvSel": 55, "ExtSel": "", "Notes": "Scrub Reads due to ECC errors not currently included", "Umask": "bxx1xxxxx", "UmaskExt": "bxx1", }, "M2M.IMC_READS.ALL": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Reads Issued to iMC", "EvSel": 55, "ExtSel": "", "Notes": "Scrub Reads due to ECC errors not currently included", "Umask": "bxxxxx1xx", "UmaskExt": "b111", }, "M2M.IMC_READS.CH1_ISOCH": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Reads Issued to iMC", "EvSel": 55, "ExtSel": "", "Notes": "Scrub Reads due to ECC errors not currently included", "Umask": "bxxxxxx1x", "UmaskExt": "bx1x", }, "M2M.IMC_READS.CH1_TO_DDR_AS_MEM": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Reads Issued to iMC", "EvSel": 55, "ExtSel": "", "Notes": "Scrub Reads due to ECC errors not currently included", "Umask": "bxxxx1xxx", "UmaskExt": "bx1x", }, "M2M.IMC_READS.CH1_ALL": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Reads Issued to iMC", "EvSel": 55, "ExtSel": "", "Notes": "Scrub Reads due to ECC errors not currently included", "Umask": "bxxxxx1xx", "UmaskExt": "bx1x", }, "M2M.IMC_READS.CH0_FROM_TGR": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Reads Issued to iMC", "EvSel": 55, "ExtSel": "", "Notes": "Scrub Reads due to ECC errors not currently included", "Umask": "bx1xxxxxx", "UmaskExt": "bxx1", }, "M2M.IMC_READS.CH0_TO_DDR_AS_CACHE": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Reads Issued to iMC", "EvSel": 55, "ExtSel": "", "Notes": "Scrub Reads due to ECC errors not currently included", "Umask": "bxxx1xxxx", "UmaskExt": "bxx1", }, "M2M.IMC_READS.CH1_TO_PMM": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Reads Issued to iMC", "EvSel": 55, "ExtSel": "", "Notes": "Scrub Reads due to ECC errors not currently included", "Umask": "bxx1xxxxx", "UmaskExt": "bx1x", }, "M2M.IMC_READS.CH0_TO_DDR_AS_MEM": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Reads Issued to iMC", "EvSel": 55, "ExtSel": "", "Notes": "Scrub Reads due to ECC errors not currently included", "Umask": "bxxxx1xxx", "UmaskExt": "bxx1", }, "M2M.IMC_READS.CH0_ISOCH": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Reads Issued to iMC", "EvSel": 55, "ExtSel": "", "Notes": "Scrub Reads due to ECC errors not currently included", "Umask": "bxxxxxx1x", "UmaskExt": "bxx1", }, "M2M.IMC_READS.CH0_ALL": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Reads Issued to iMC", "EvSel": 55, "ExtSel": "", "Notes": "Scrub Reads due to ECC errors not currently included", "Umask": "bxxxxx1xx", "UmaskExt": "bxx1", }, "M2M.IMC_READS.TO_PMM": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Reads Issued to iMC", "EvSel": 55, "ExtSel": "", "Notes": "Scrub Reads due to ECC errors not currently included", "Umask": "bxx1xxxxx", "UmaskExt": "b111", }, "M2M.IMC_READS.CH1_NORMAL": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Reads Issued to iMC", "EvSel": 55, "ExtSel": "", "Notes": "Scrub Reads due to ECC errors not currently included", "Umask": "bxxxxxxx1", "UmaskExt": "bx1x", }, "M2M.IMC_READS.CH2_FROM_TGR": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Reads Issued to iMC", "EvSel": 55, "ExtSel": "", "Notes": "Scrub Reads due to ECC errors not currently included", "Umask": "bx1xxxxxx", "UmaskExt": "b1xx", }, "M2M.IMC_READS.CH0_NORMAL": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Reads Issued to iMC", "EvSel": 55, "ExtSel": "", "Notes": "Scrub Reads due to ECC errors not currently included", "Umask": "bxxxxxxx1", "UmaskExt": "bxx1", }, "M2M.IMC_READS.TO_DDR_AS_CACHE": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Reads Issued to iMC", "EvSel": 55, "ExtSel": "", "Notes": "Scrub Reads due to ECC errors not currently included", "Umask": "bxxx1xxxx", "UmaskExt": "b111", }, "M2M.IMC_READS.FROM_TGR": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Reads Issued to iMC", "EvSel": 55, "ExtSel": "", "Notes": "Scrub Reads due to ECC errors not currently included", "Umask": "bx1xxxxxx", "UmaskExt": "b111", }, "M2M.IMC_READS.ISOCH": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Reads Issued to iMC", "EvSel": 55, "ExtSel": "", "Notes": "Scrub Reads due to ECC errors not currently included", "Umask": "bxxxxxx1x", "UmaskExt": "b111", }, "M2M.IMC_WRITES": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Writes Issued to iMC", "EvSel": 56, "ExtSel": "", "Notes": "Scrub Writes due to ECC errors not currently included", }, "M2M.IMC_WRITES.CH1_FULL": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Writes Issued to iMC", "EvSel": 56, "ExtSel": "", "Notes": "Scrub Writes due to ECC errors not currently included", "Umask": "bxxxxxxx1", "UmaskExt": "bx1xxx", }, "M2M.IMC_WRITES.TO_PMM": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Writes Issued to iMC", "EvSel": 56, "ExtSel": "", "Notes": "Scrub Writes due to ECC errors not currently included", "Umask": "b1xxxxxxx", "UmaskExt": "b111xx", }, "M2M.IMC_WRITES.CH0_PARTIAL_ISOCH": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Writes Issued to iMC", "EvSel": 56, "ExtSel": "", "Notes": "Scrub Writes due to ECC errors not currently included", "Umask": "bxxxx1xxx", "UmaskExt": "bxx1xx", }, "M2M.IMC_WRITES.PARTIAL_ISOCH": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Writes Issued to iMC", "EvSel": 56, "ExtSel": "", "Notes": "Scrub Writes due to ECC errors not currently included", "Umask": "bxxxx1xxx", "UmaskExt": "b111xx", }, "M2M.IMC_WRITES.CH1_FULL_ISOCH": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Writes Issued to iMC", "EvSel": 56, "ExtSel": "", "Notes": "Scrub Writes due to ECC errors not currently included", "Umask": "bxxxxx1xx", "UmaskExt": "bx1xxx", }, "M2M.IMC_WRITES.CH1_PARTIAL": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Writes Issued to iMC", "EvSel": 56, "ExtSel": "", "Notes": "Scrub Writes due to ECC errors not currently included", "Umask": "bxxxxxx1x", "UmaskExt": "bx1xxx", }, "M2M.IMC_WRITES.NI": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Writes Issued to iMC", "EvSel": 56, "ExtSel": "", "Notes": "Scrub Writes due to ECC errors not currently included", "Umask": "bxxxxxxxx", "UmaskExt": "b1111x", }, "M2M.IMC_WRITES.CH1_NI": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Writes Issued to iMC", "EvSel": 56, "ExtSel": "", "Notes": "Scrub Writes due to ECC errors not currently included", "Umask": "bxxxxxxxx", "UmaskExt": "bx1x1x", }, "M2M.IMC_WRITES.TO_DDR_AS_CACHE": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Writes Issued to iMC", "EvSel": 56, "ExtSel": "", "Notes": "Scrub Writes due to ECC errors not currently included", "Umask": "bx1xxxxxx", "UmaskExt": "b111xx", }, "M2M.IMC_WRITES.CH1_TO_DDR_AS_CACHE": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Writes Issued to iMC", "EvSel": 56, "ExtSel": "", "Notes": "Scrub Writes due to ECC errors not currently included", "Umask": "bx1xxxxxx", "UmaskExt": "bx1xxx", }, "M2M.IMC_WRITES.CH0_TO_PMM": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Writes Issued to iMC", "EvSel": 56, "ExtSel": "", "Notes": "Scrub Writes due to ECC errors not currently included", "Umask": "b1xxxxxxx", "UmaskExt": "bxx1xx", }, "M2M.IMC_WRITES.FULL_ISOCH": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Writes Issued to iMC", "EvSel": 56, "ExtSel": "", "Notes": "Scrub Writes due to ECC errors not currently included", "Umask": "bxxxxx1xx", "UmaskExt": "b111xx", }, "M2M.IMC_WRITES.CH1_ALL": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Writes Issued to iMC", "EvSel": 56, "ExtSel": "", "Notes": "Scrub Writes due to ECC errors not currently included", "Umask": "bxxx1xxxx", "UmaskExt": "bx1xxx", }, "M2M.IMC_WRITES.CH0_FROM_TGR": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Writes Issued to iMC", "EvSel": 56, "ExtSel": "", "Notes": "Scrub Writes due to ECC errors not currently included", "Umask": "bxxxxxxxx", "UmaskExt": "bxx1x1", }, "M2M.IMC_WRITES.CH0_NI_MISS": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Writes Issued to iMC", "EvSel": 56, "ExtSel": "", "Notes": "Scrub Writes due to ECC errors not currently included", "Umask": "bxxxxxxxx", "UmaskExt": "b1xxxxx", }, "M2M.IMC_WRITES.CH0_TO_DDR_AS_MEM": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Writes Issued to iMC", "EvSel": 56, "ExtSel": "", "Notes": "Scrub Writes due to ECC errors not currently included", "Umask": "bxx1xxxxx", "UmaskExt": "bxx1xx", }, "M2M.IMC_WRITES.FULL": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Writes Issued to iMC", "EvSel": 56, "ExtSel": "", "Notes": "Scrub Writes due to ECC errors not currently included", "Umask": "bxxxxxxx1", "UmaskExt": "b111xx", }, "M2M.IMC_WRITES.PARTIAL": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Writes Issued to iMC", "EvSel": 56, "ExtSel": "", "Notes": "Scrub Writes due to ECC errors not currently included", "Umask": "bxxxxxx1x", "UmaskExt": "b111xx", }, "M2M.IMC_WRITES.CH0_FULL": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Writes Issued to iMC", "EvSel": 56, "ExtSel": "", "Notes": "Scrub Writes due to ECC errors not currently included", "Umask": "bxxxxxxx1", "UmaskExt": "bxx1xx", }, "M2M.IMC_WRITES.CH0_FULL_ISOCH": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Writes Issued to iMC", "EvSel": 56, "ExtSel": "", "Notes": "Scrub Writes due to ECC errors not currently included", "Umask": "bxxxxx1xx", "UmaskExt": "bxx1xx", }, "M2M.IMC_WRITES.NI_MISS": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Writes Issued to iMC", "EvSel": 56, "ExtSel": "", "Notes": "Scrub Writes due to ECC errors not currently included", "Umask": "bxxxxxxxx", "UmaskExt": "b111xx", }, "M2M.IMC_WRITES.CH1_PARTIAL_ISOCH": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Writes Issued to iMC", "EvSel": 56, "ExtSel": "", "Notes": "Scrub Writes due to ECC errors not currently included", "Umask": "bxxxx1xxx", "UmaskExt": "bx1xxx", }, "M2M.IMC_WRITES.FROM_TGR": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Writes Issued to iMC", "EvSel": 56, "ExtSel": "", "Notes": "Scrub Writes due to ECC errors not currently included", "Umask": "bxxxxxxxx", "UmaskExt": "b111x1", }, "M2M.IMC_WRITES.CH0_PARTIAL": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Writes Issued to iMC", "EvSel": 56, "ExtSel": "", "Notes": "Scrub Writes due to ECC errors not currently included", "Umask": "bxxxxxx1x", "UmaskExt": "bxx1xx", }, "M2M.IMC_WRITES.CH0_NI": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Writes Issued to iMC", "EvSel": 56, "ExtSel": "", "Notes": "Scrub Writes due to ECC errors not currently included", "Umask": "bxxxxxxxx", "UmaskExt": "bxx11x", }, "M2M.IMC_WRITES.CH1_FROM_TGR": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Writes Issued to iMC", "EvSel": 56, "ExtSel": "", "Notes": "Scrub Writes due to ECC errors not currently included", "Umask": "bxxxxxxxx", "UmaskExt": "bx1xx1", }, "M2M.IMC_WRITES.TO_DDR_AS_MEM": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Writes Issued to iMC", "EvSel": 56, "ExtSel": "", "Notes": "Scrub Writes due to ECC errors not currently included", "Umask": "bxx1xxxxx", "UmaskExt": "b111xx", }, "M2M.IMC_WRITES.ALL": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Writes Issued to iMC", "EvSel": 56, "ExtSel": "", "Notes": "Scrub Writes due to ECC errors not currently included", "Umask": "bxxx1xxxx", "UmaskExt": "b111xx", }, "M2M.IMC_WRITES.CH1_TO_DDR_AS_MEM": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Writes Issued to iMC", "EvSel": 56, "ExtSel": "", "Notes": "Scrub Writes due to ECC errors not currently included", "Umask": "bxx1xxxxx", "UmaskExt": "bx1xxx", }, "M2M.IMC_WRITES.CH1_NI_MISS": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Writes Issued to iMC", "EvSel": 56, "ExtSel": "", "Notes": "Scrub Writes due to ECC errors not currently included", "Umask": "bxxxxxxxx", "UmaskExt": "bx11xx", }, "M2M.IMC_WRITES.CH0_TO_DDR_AS_CACHE": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Writes Issued to iMC", "EvSel": 56, "ExtSel": "", "Notes": "Scrub Writes due to ECC errors not currently included", "Umask": "bx1xxxxxx", "UmaskExt": "bxx1xx", }, "M2M.IMC_WRITES.CH1_TO_PMM": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Writes Issued to iMC", "EvSel": 56, "ExtSel": "", "Notes": "Scrub Writes due to ECC errors not currently included", "Umask": "b1xxxxxxx", "UmaskExt": "bx1xxx", }, "M2M.IMC_WRITES.CH0_ALL": { "Box": "M2M", "Category": "M2M IMC Events", "Counters": "0-3", "Desc": "M2M Writes Issued to iMC", "EvSel": 56, "ExtSel": "", "Notes": "Scrub Writes due to ECC errors not currently included", "Umask": "bxxx1xxxx", "UmaskExt": "bxx1xx", }, "M2M.MIRR_WRQ_INSERTS": { "Box": "M2M", "Category": "M2M Mirror WriteQ EVENTS", "Desc": "Write Tracker Inserts", "EvSel": 100, "ExtSel": "", }, "M2M.MIRR_WRQ_OCCUPANCY": { "Box": "M2M", "Category": "M2M Mirror WriteQ EVENTS", "Desc": "Write Tracker Occupancy", "EvSel": 101, "ExtSel": "", }, "M2M.MISC_EXTERNAL": { "Box": "M2M", "Category": "M2M External Misc Events (eg. From MS2IDI)", "Desc": "Miscellaneous Events (mostly from MS2IDI)", "EvSel": 230, "ExtSel": "", "Notes": "ONLY relevant to the CHA's CMS", }, "M2M.MISC_EXTERNAL.MBE_INST1": { "Box": "M2M", "Category": "M2M External Misc Events (eg. From MS2IDI)", "Desc": "Miscellaneous Events (mostly from MS2IDI)", "EvSel": 230, "ExtSel": "", "Notes": "ONLY relevant to the CHA's CMS", "Umask": "bxxxxxx1x", }, "M2M.MISC_EXTERNAL.MBE_INST0": { "Box": "M2M", "Category": "M2M External Misc Events (eg. From MS2IDI)", "Desc": "Miscellaneous Events (mostly from MS2IDI)", "EvSel": 230, "ExtSel": "", "Notes": "ONLY relevant to the CHA's CMS", "Umask": "bxxxxxxx1", }, "M2M.PKT_MATCH": { "Box": "M2M", "Category": "M2M PACKET MATCH Events", "Desc": "Number Packet Header Matches", "EvSel": 76, "ExtSel": "", }, "M2M.PKT_MATCH.MC": { "Box": "M2M", "Category": "M2M PACKET MATCH Events", "Desc": "Number Packet Header Matches", "EvSel": 76, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.PKT_MATCH.MESH": { "Box": "M2M", "Category": "M2M PACKET MATCH Events", "Desc": "Number Packet Header Matches", "EvSel": 76, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.PREFCAM_CIS_DROPS": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "EvSel": 115, "ExtSel": "", }, "M2M.PREFCAM_CYCLES_FULL": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Prefetch CAM Cycles Full", "EvSel": 107, "ExtSel": "", }, "M2M.PREFCAM_CYCLES_FULL.CH2": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Prefetch CAM Cycles Full", "EvSel": 107, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.PREFCAM_CYCLES_FULL.CH1": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Prefetch CAM Cycles Full", "EvSel": 107, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.PREFCAM_CYCLES_FULL.ALLCH": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Prefetch CAM Cycles Full", "EvSel": 107, "ExtSel": "", "Umask": "bxxxxx111", }, "M2M.PREFCAM_CYCLES_FULL.CH0": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Prefetch CAM Cycles Full", "EvSel": 107, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.PREFCAM_CYCLES_NE": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Prefetch CAM Cycles Not Empty", "EvSel": 108, "ExtSel": "", }, "M2M.PREFCAM_CYCLES_NE.CH2": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Prefetch CAM Cycles Not Empty", "EvSel": 108, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.PREFCAM_CYCLES_NE.CH1": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Prefetch CAM Cycles Not Empty", "EvSel": 108, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.PREFCAM_CYCLES_NE.ALLCH": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Prefetch CAM Cycles Not Empty", "EvSel": 108, "ExtSel": "", "Umask": "bxxxxx111", }, "M2M.PREFCAM_CYCLES_NE.CH0": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Prefetch CAM Cycles Not Empty", "EvSel": 108, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.PREFCAM_DEALLOCS": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Prefetch CAM Deallocs", "EvSel": 110, "ExtSel": "", }, "M2M.PREFCAM_DEALLOCS.CH2_MISS_INVAL": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Prefetch CAM Deallocs", "EvSel": 110, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxx1xx", }, "M2M.PREFCAM_DEALLOCS.CH2_RSP_PDRESET": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Prefetch CAM Deallocs", "EvSel": 110, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxx1xxx", }, "M2M.PREFCAM_DEALLOCS.CH0_HITA0_INVAL": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Prefetch CAM Deallocs", "EvSel": 110, "ExtSel": "", "Umask": "bxxxxxxx1", "UmaskExt": "bxxxxxxxx", }, "M2M.PREFCAM_DEALLOCS.CH0_HITA1_INVAL": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Prefetch CAM Deallocs", "EvSel": 110, "ExtSel": "", "Umask": "bxxxxxx1x", "UmaskExt": "bxxxxxxxx", }, "M2M.PREFCAM_DEALLOCS.CH1_MISS_INVAL": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Prefetch CAM Deallocs", "EvSel": 110, "ExtSel": "", "Umask": "bx1xxxxxx", "UmaskExt": "bxxxxxxxx", }, "M2M.PREFCAM_DEALLOCS.CH1_RSP_PDRESET": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Prefetch CAM Deallocs", "EvSel": 110, "ExtSel": "", "Umask": "b1xxxxxxx", "UmaskExt": "bxxxxxxxx", }, "M2M.PREFCAM_DEALLOCS.CH2_HITA1_INVAL": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Prefetch CAM Deallocs", "EvSel": 110, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxx1x", }, "M2M.PREFCAM_DEALLOCS.CH1_HITA0_INVAL": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Prefetch CAM Deallocs", "EvSel": 110, "ExtSel": "", "Umask": "bxxx1xxxx", "UmaskExt": "bxxxxxxxx", }, "M2M.PREFCAM_DEALLOCS.CH1_HITA1_INVAL": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Prefetch CAM Deallocs", "EvSel": 110, "ExtSel": "", "Umask": "bxx1xxxxx", "UmaskExt": "bxxxxxxxx", }, "M2M.PREFCAM_DEALLOCS.CH2_HITA0_INVAL": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Prefetch CAM Deallocs", "EvSel": 110, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxx1", }, "M2M.PREFCAM_DEALLOCS.CH0_RSP_PDRESET": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Prefetch CAM Deallocs", "EvSel": 110, "ExtSel": "", "Umask": "bxxxx1xxx", "UmaskExt": "bxxxxxxxx", }, "M2M.PREFCAM_DEALLOCS.CH0_MISS_INVAL": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Prefetch CAM Deallocs", "EvSel": 110, "ExtSel": "", "Umask": "bxxxxx1xx", "UmaskExt": "bxxxxxxxx", }, "M2M.PREFCAM_DEMAND_DROPS": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Data Prefetches Dropped", "EvSel": 111, "ExtSel": "", }, "M2M.PREFCAM_DEMAND_DROPS.CH0_XPT": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Data Prefetches Dropped", "EvSel": 111, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.PREFCAM_DEMAND_DROPS.CH0_UPI": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Data Prefetches Dropped", "EvSel": 111, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.PREFCAM_DEMAND_DROPS.CH2_UPI": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Data Prefetches Dropped", "EvSel": 111, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2M.PREFCAM_DEMAND_DROPS.UPI_ALLCH": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Data Prefetches Dropped", "EvSel": 111, "ExtSel": "", "Umask": "b000101010", }, "M2M.PREFCAM_DEMAND_DROPS.XPT_ALLCH": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Data Prefetches Dropped", "EvSel": 111, "ExtSel": "", "Umask": "b000010101", }, "M2M.PREFCAM_DEMAND_DROPS.CH2_XPT": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Data Prefetches Dropped", "EvSel": 111, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.PREFCAM_DEMAND_DROPS.CH1_XPT": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Data Prefetches Dropped", "EvSel": 111, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.PREFCAM_DEMAND_DROPS.CH1_UPI": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Data Prefetches Dropped", "EvSel": 111, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.PREFCAM_DEMAND_MERGE": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Demands Merged with CAMed Prefetches", "EvSel": 116, "ExtSel": "", }, "M2M.PREFCAM_DEMAND_MERGE.CH2_XPTUPI": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Demands Merged with CAMed Prefetches", "EvSel": 116, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.PREFCAM_DEMAND_MERGE.XPTUPI_ALLCH": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Demands Merged with CAMed Prefetches", "EvSel": 116, "ExtSel": "", "Umask": "b000010101", }, "M2M.PREFCAM_DEMAND_MERGE.CH1_XPTUPI": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Demands Merged with CAMed Prefetches", "EvSel": 116, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.PREFCAM_DEMAND_MERGE.CH0_XPTUPI": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Demands Merged with CAMed Prefetches", "EvSel": 116, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.PREFCAM_DEMAND_NO_MERGE": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Demands Not Merged with CAMed Prefetches", "EvSel": 117, "ExtSel": "", }, "M2M.PREFCAM_DEMAND_NO_MERGE.CH0_XPTUPI": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Demands Not Merged with CAMed Prefetches", "EvSel": 117, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.PREFCAM_DEMAND_NO_MERGE.CH1_XPTUPI": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Demands Not Merged with CAMed Prefetches", "EvSel": 117, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.PREFCAM_DEMAND_NO_MERGE.XPTUPI_ALLCH": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Demands Not Merged with CAMed Prefetches", "EvSel": 117, "ExtSel": "", "Umask": "b000010101", }, "M2M.PREFCAM_DEMAND_NO_MERGE.CH2_XPTUPI": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Demands Not Merged with CAMed Prefetches", "EvSel": 117, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.PREFCAM_DROP_REASONS_CH0": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Data Prefetches Dropped Ch0 - Reasons", "EvSel": 112, "ExtSel": "", }, "M2M.PREFCAM_DROP_REASONS_CH0.PF_AD_CRD": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Data Prefetches Dropped Ch0 - Reasons", "EvSel": 112, "ExtSel": "", "Umask": "bxx1xxxxx", "UmaskExt": "bxxxxxxxx", }, "M2M.PREFCAM_DROP_REASONS_CH0.ERRORBLK_RxC": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Data Prefetches Dropped Ch0 - Reasons", "EvSel": 112, "ExtSel": "", "Umask": "bxxx1xxxx", "UmaskExt": "bxxxxxxxx", }, "M2M.PREFCAM_DROP_REASONS_CH0.XPT_THRESH": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Data Prefetches Dropped Ch0 - Reasons", "EvSel": 112, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxx1x", }, "M2M.PREFCAM_DROP_REASONS_CH0.UPI_THRESH": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Data Prefetches Dropped Ch0 - Reasons", "EvSel": 112, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxx1xx", }, "M2M.PREFCAM_DROP_REASONS_CH0.RPQ_PROXY": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Data Prefetches Dropped Ch0 - Reasons", "EvSel": 112, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxx1", }, "M2M.PREFCAM_DROP_REASONS_CH0.WPQ_PROXY": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Data Prefetches Dropped Ch0 - Reasons", "EvSel": 112, "ExtSel": "", "Umask": "b1xxxxxxx", "UmaskExt": "bxxxxxxxx", }, "M2M.PREFCAM_DROP_REASONS_CH0.PF_CAM_HIT": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Data Prefetches Dropped Ch0 - Reasons", "EvSel": 112, "ExtSel": "", "Umask": "bxxxxx1xx", "UmaskExt": "bxxxxxxxx", }, "M2M.PREFCAM_DROP_REASONS_CH0.STOP_B2B": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Data Prefetches Dropped Ch0 - Reasons", "EvSel": 112, "ExtSel": "", "Umask": "bxxxx1xxx", "UmaskExt": "bxxxxxxxx", }, "M2M.PREFCAM_DROP_REASONS_CH0.PF_CAM_FULL": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Data Prefetches Dropped Ch0 - Reasons", "EvSel": 112, "ExtSel": "", "Umask": "bx1xxxxxx", "UmaskExt": "bxxxxxxxx", }, "M2M.PREFCAM_DROP_REASONS_CH0.NOT_PF_SAD_REGION": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Data Prefetches Dropped Ch0 - Reasons", "EvSel": 112, "ExtSel": "", "Umask": "bxxxxxx1x", "UmaskExt": "bxxxxxxxx", }, "M2M.PREFCAM_DROP_REASONS_CH0.PF_SECURE_DROP": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Data Prefetches Dropped Ch0 - Reasons", "EvSel": 112, "ExtSel": "", "Umask": "bxxxxxxx1", "UmaskExt": "bxxxxxxxx", }, "M2M.PREFCAM_DROP_REASONS_CH1": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Data Prefetches Dropped Ch1 - Reasons", "EvSel": 113, "ExtSel": "", }, "M2M.PREFCAM_DROP_REASONS_CH1.NOT_PF_SAD_REGION": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Data Prefetches Dropped Ch1 - Reasons", "EvSel": 113, "ExtSel": "", "Umask": "bxxxxxx1x", "UmaskExt": "bxxxxxxxx", }, "M2M.PREFCAM_DROP_REASONS_CH1.PF_SECURE_DROP": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Data Prefetches Dropped Ch1 - Reasons", "EvSel": 113, "ExtSel": "", "Umask": "bxxxxxxx1", "UmaskExt": "bxxxxxxxx", }, "M2M.PREFCAM_DROP_REASONS_CH1.WPQ_PROXY": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Data Prefetches Dropped Ch1 - Reasons", "EvSel": 113, "ExtSel": "", "Umask": "b1xxxxxxx", "UmaskExt": "bxxxxxxxx", }, "M2M.PREFCAM_DROP_REASONS_CH1.PF_CAM_HIT": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Data Prefetches Dropped Ch1 - Reasons", "EvSel": 113, "ExtSel": "", "Umask": "bxxxxx1xx", "UmaskExt": "bxxxxxxxx", }, "M2M.PREFCAM_DROP_REASONS_CH1.STOP_B2B": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Data Prefetches Dropped Ch1 - Reasons", "EvSel": 113, "ExtSel": "", "Umask": "bxxxx1xxx", "UmaskExt": "bxxxxxxxx", }, "M2M.PREFCAM_DROP_REASONS_CH1.PF_CAM_FULL": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Data Prefetches Dropped Ch1 - Reasons", "EvSel": 113, "ExtSel": "", "Umask": "bx1xxxxxx", "UmaskExt": "bxxxxxxxx", }, "M2M.PREFCAM_DROP_REASONS_CH1.ERRORBLK_RxC": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Data Prefetches Dropped Ch1 - Reasons", "EvSel": 113, "ExtSel": "", "Umask": "bxxx1xxxx", "UmaskExt": "bxxxxxxxx", }, "M2M.PREFCAM_DROP_REASONS_CH1.UPI_THRESH": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Data Prefetches Dropped Ch1 - Reasons", "EvSel": 113, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxx1xx", }, "M2M.PREFCAM_DROP_REASONS_CH1.XPT_THRESH": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Data Prefetches Dropped Ch1 - Reasons", "EvSel": 113, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxx1x", }, "M2M.PREFCAM_DROP_REASONS_CH1.RPQ_PROXY": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Data Prefetches Dropped Ch1 - Reasons", "EvSel": 113, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxx1", }, "M2M.PREFCAM_DROP_REASONS_CH1.PF_AD_CRD": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Data Prefetches Dropped Ch1 - Reasons", "EvSel": 113, "ExtSel": "", "Umask": "bxx1xxxxx", "UmaskExt": "bxxxxxxxx", }, "M2M.PREFCAM_DROP_REASONS_CH2": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Data Prefetches Dropped Ch2 - Reasons", "EvSel": 114, "ExtSel": "", }, "M2M.PREFCAM_DROP_REASONS_CH2.NOT_PF_SAD_REGION": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Data Prefetches Dropped Ch2 - Reasons", "EvSel": 114, "ExtSel": "", "Umask": "bxxxxxx1x", "UmaskExt": "bxxxxxxxx", }, "M2M.PREFCAM_DROP_REASONS_CH2.PF_SECURE_DROP": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Data Prefetches Dropped Ch2 - Reasons", "EvSel": 114, "ExtSel": "", "Umask": "bxxxxxxx1", "UmaskExt": "bxxxxxxxx", }, "M2M.PREFCAM_DROP_REASONS_CH2.STOP_B2B": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Data Prefetches Dropped Ch2 - Reasons", "EvSel": 114, "ExtSel": "", "Umask": "bxxxx1xxx", "UmaskExt": "bxxxxxxxx", }, "M2M.PREFCAM_DROP_REASONS_CH2.PF_CAM_HIT": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Data Prefetches Dropped Ch2 - Reasons", "EvSel": 114, "ExtSel": "", "Umask": "bxxxxx1xx", "UmaskExt": "bxxxxxxxx", }, "M2M.PREFCAM_DROP_REASONS_CH2.WPQ_PROXY": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Data Prefetches Dropped Ch2 - Reasons", "EvSel": 114, "ExtSel": "", "Umask": "b1xxxxxxx", "UmaskExt": "bxxxxxxxx", }, "M2M.PREFCAM_DROP_REASONS_CH2.PF_CAM_FULL": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Data Prefetches Dropped Ch2 - Reasons", "EvSel": 114, "ExtSel": "", "Umask": "bx1xxxxxx", "UmaskExt": "bxxxxxxxx", }, "M2M.PREFCAM_DROP_REASONS_CH2.XPT_THRESH": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Data Prefetches Dropped Ch2 - Reasons", "EvSel": 114, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxx1x", }, "M2M.PREFCAM_DROP_REASONS_CH2.UPI_THRESH": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Data Prefetches Dropped Ch2 - Reasons", "EvSel": 114, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxx1xx", }, "M2M.PREFCAM_DROP_REASONS_CH2.ERRORBLK_RxC": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Data Prefetches Dropped Ch2 - Reasons", "EvSel": 114, "ExtSel": "", "Umask": "bxxx1xxxx", "UmaskExt": "bxxxxxxxx", }, "M2M.PREFCAM_DROP_REASONS_CH2.RPQ_PROXY": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Data Prefetches Dropped Ch2 - Reasons", "EvSel": 114, "ExtSel": "", "Umask": "bxxxxxxxx", "UmaskExt": "bxxxxxxx1", }, "M2M.PREFCAM_DROP_REASONS_CH2.PF_AD_CRD": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Data Prefetches Dropped Ch2 - Reasons", "EvSel": 114, "ExtSel": "", "Umask": "bxx1xxxxx", "UmaskExt": "bxxxxxxxx", }, "M2M.PREFCAM_INSERTS": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Prefetch CAM Inserts", "EvSel": 109, "ExtSel": "", }, "M2M.PREFCAM_INSERTS.CH1_XPT": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Prefetch CAM Inserts", "EvSel": 109, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.PREFCAM_INSERTS.CH1_UPI": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Prefetch CAM Inserts", "EvSel": 109, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.PREFCAM_INSERTS.CH2_UPI": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Prefetch CAM Inserts", "EvSel": 109, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2M.PREFCAM_INSERTS.UPI_ALLCH": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Prefetch CAM Inserts", "EvSel": 109, "ExtSel": "", "Umask": "b000101010", }, "M2M.PREFCAM_INSERTS.XPT_ALLCH": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Prefetch CAM Inserts", "EvSel": 109, "ExtSel": "", "Umask": "b000010101", }, "M2M.PREFCAM_INSERTS.CH2_XPT": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Prefetch CAM Inserts", "EvSel": 109, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.PREFCAM_INSERTS.CH0_XPT": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Prefetch CAM Inserts", "EvSel": 109, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.PREFCAM_INSERTS.CH0_UPI": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Prefetch CAM Inserts", "EvSel": 109, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.PREFCAM_OCCUPANCY": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Prefetch CAM Occupancy", "EvSel": 106, "ExtSel": "", }, "M2M.PREFCAM_OCCUPANCY.CH2": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Prefetch CAM Occupancy", "EvSel": 106, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.PREFCAM_OCCUPANCY.ALLCH": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Prefetch CAM Occupancy", "EvSel": 106, "ExtSel": "", "Umask": "bxxxxx111", }, "M2M.PREFCAM_OCCUPANCY.CH1": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Prefetch CAM Occupancy", "EvSel": 106, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.PREFCAM_OCCUPANCY.CH0": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "Desc": "Prefetch CAM Occupancy", "EvSel": 106, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.PREFCAM_RESP_MISS": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "EvSel": 118, "ExtSel": "", }, "M2M.PREFCAM_RESP_MISS.CH1": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "EvSel": 118, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.PREFCAM_RESP_MISS.ALLCH": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "EvSel": 118, "ExtSel": "", "Umask": "bxxxxx111", }, "M2M.PREFCAM_RESP_MISS.CH0": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "EvSel": 118, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.PREFCAM_RESP_MISS.CH2": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "EvSel": 118, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.PREFCAM_RxC_CYCLES_NE": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "EvSel": 121, "ExtSel": "", }, "M2M.PREFCAM_RxC_DEALLOCS": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "EvSel": 122, "ExtSel": "", }, "M2M.PREFCAM_RxC_DEALLOCS.SQUASHED": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "EvSel": 122, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.PREFCAM_RxC_DEALLOCS.PMM_MEMMODE_ACCEPT": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "EvSel": 122, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.PREFCAM_RxC_DEALLOCS.1LM_POSTED": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "EvSel": 122, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.PREFCAM_RxC_DEALLOCS.CIS": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "EvSel": 122, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.PREFCAM_RxC_INSERTS": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "EvSel": 120, "ExtSel": "", }, "M2M.PREFCAM_RxC_OCCUPANCY": { "Box": "M2M", "Category": "M2M Prefetch CAM Events", "EvSel": 119, "ExtSel": "", }, "M2M.RING_BOUNCES_HORZ": { "Box": "M2M", "Category": "M2M Horizontal RING Events", "Defn": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Horizontal Ring.", "EvSel": 172, "ExtSel": "", }, "M2M.RING_BOUNCES_HORZ.IV": { "Box": "M2M", "Category": "M2M Horizontal RING Events", "Defn": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Horizontal Ring.", "EvSel": 172, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.RING_BOUNCES_HORZ.BL": { "Box": "M2M", "Category": "M2M Horizontal RING Events", "Defn": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Horizontal Ring.", "EvSel": 172, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.RING_BOUNCES_HORZ.AK": { "Box": "M2M", "Category": "M2M Horizontal RING Events", "Defn": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Horizontal Ring.", "EvSel": 172, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.RING_BOUNCES_HORZ.AD": { "Box": "M2M", "Category": "M2M Horizontal RING Events", "Defn": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Horizontal Ring.", "EvSel": 172, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.RING_BOUNCES_VERT": { "Box": "M2M", "Category": "M2M Vertical RING Events", "Defn": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Vertical Ring.", "EvSel": 170, "ExtSel": "", "MaxIncCyc": 2, }, "M2M.RING_BOUNCES_VERT.AK": { "Box": "M2M", "Category": "M2M Vertical RING Events", "Defn": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Vertical Ring.", "EvSel": 170, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxxxx1x", }, "M2M.RING_BOUNCES_VERT.AKC": { "Box": "M2M", "Category": "M2M Vertical RING Events", "Defn": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Vertical Ring.", "EvSel": 170, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxx1xxxx", }, "M2M.RING_BOUNCES_VERT.IV": { "Box": "M2M", "Category": "M2M Vertical RING Events", "Defn": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Vertical Ring.", "EvSel": 170, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxx1xxx", }, "M2M.RING_BOUNCES_VERT.BL": { "Box": "M2M", "Category": "M2M Vertical RING Events", "Defn": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Vertical Ring.", "EvSel": 170, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxxx1xx", }, "M2M.RING_BOUNCES_VERT.AD": { "Box": "M2M", "Category": "M2M Vertical RING Events", "Defn": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Vertical Ring.", "EvSel": 170, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxxxxx1", }, "M2M.RING_SINK_STARVED_HORZ": { "Box": "M2M", "Category": "M2M Horizontal RING Events", "Desc": "Sink Starvation on Horizontal Ring", "EvSel": 173, "ExtSel": "", }, "M2M.RING_SINK_STARVED_HORZ.AD": { "Box": "M2M", "Category": "M2M Horizontal RING Events", "Desc": "Sink Starvation on Horizontal Ring", "EvSel": 173, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.RING_SINK_STARVED_HORZ.AK_AG1": { "Box": "M2M", "Category": "M2M Horizontal RING Events", "Desc": "Sink Starvation on Horizontal Ring", "EvSel": 173, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2M.RING_SINK_STARVED_HORZ.AK": { "Box": "M2M", "Category": "M2M Horizontal RING Events", "Desc": "Sink Starvation on Horizontal Ring", "EvSel": 173, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.RING_SINK_STARVED_HORZ.IV": { "Box": "M2M", "Category": "M2M Horizontal RING Events", "Desc": "Sink Starvation on Horizontal Ring", "EvSel": 173, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.RING_SINK_STARVED_HORZ.BL": { "Box": "M2M", "Category": "M2M Horizontal RING Events", "Desc": "Sink Starvation on Horizontal Ring", "EvSel": 173, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.RING_SINK_STARVED_VERT": { "Box": "M2M", "Category": "M2M Vertical RING Events", "Desc": "Sink Starvation on Vertical Ring", "EvSel": 171, "ExtSel": "", }, "M2M.RING_SINK_STARVED_VERT.AK": { "Box": "M2M", "Category": "M2M Vertical RING Events", "Desc": "Sink Starvation on Vertical Ring", "EvSel": 171, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.RING_SINK_STARVED_VERT.AKC": { "Box": "M2M", "Category": "M2M Vertical RING Events", "Desc": "Sink Starvation on Vertical Ring", "EvSel": 171, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.RING_SINK_STARVED_VERT.IV": { "Box": "M2M", "Category": "M2M Vertical RING Events", "Desc": "Sink Starvation on Vertical Ring", "EvSel": 171, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.RING_SINK_STARVED_VERT.BL": { "Box": "M2M", "Category": "M2M Vertical RING Events", "Desc": "Sink Starvation on Vertical Ring", "EvSel": 171, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.RING_SINK_STARVED_VERT.AD": { "Box": "M2M", "Category": "M2M Vertical RING Events", "Desc": "Sink Starvation on Vertical Ring", "EvSel": 171, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.RING_SRC_THRTL": { "Box": "M2M", "Category": "M2M Horizontal RING Events", "Desc": "Source Throttle", "EvSel": 174, "ExtSel": "", }, "M2M.RPQ_NO_REG_CRD": { "Box": "M2M", "Category": "M2M RPQ CREDIT Events", "Desc": "M2M to iMC RPQ Cycles w/Credits - Regular", "EvSel": 67, "ExtSel": "", "Notes": "To Count # cycles w/o credits, either set .thresh to 1 and .invert to 1 OR subtract this from total cycles", }, "M2M.RPQ_NO_REG_CRD.CH2": { "Box": "M2M", "Category": "M2M RPQ CREDIT Events", "Desc": "M2M to iMC RPQ Cycles w/Credits - Regular", "EvSel": 67, "ExtSel": "", "Notes": "To Count # cycles w/o credits, either set .thresh to 1 and .invert to 1 OR subtract this from total cycles", "Umask": "bxxxxx1xx", }, "M2M.RPQ_NO_REG_CRD.CH0": { "Box": "M2M", "Category": "M2M RPQ CREDIT Events", "Desc": "M2M to iMC RPQ Cycles w/Credits - Regular", "EvSel": 67, "ExtSel": "", "Notes": "To Count # cycles w/o credits, either set .thresh to 1 and .invert to 1 OR subtract this from total cycles", "Umask": "bxxxxxxx1", }, "M2M.RPQ_NO_REG_CRD.CH1": { "Box": "M2M", "Category": "M2M RPQ CREDIT Events", "Desc": "M2M to iMC RPQ Cycles w/Credits - Regular", "EvSel": 67, "ExtSel": "", "Notes": "To Count # cycles w/o credits, either set .thresh to 1 and .invert to 1 OR subtract this from total cycles", "Umask": "bxxxxxx1x", }, "M2M.RPQ_NO_REG_CRD_PMM": { "Box": "M2M", "Category": "M2M RPQ CREDIT Events", "Desc": "M2M->iMC RPQ Cycles w/Credits - PMM", "EvSel": 79, "ExtSel": "", "Notes": "To Count # cycles w/o credits, either set .thresh to 1 and .invert to 1 OR subtract this from total cycles", }, "M2M.RPQ_NO_REG_CRD_PMM.CHN0": { "Box": "M2M", "Category": "M2M RPQ CREDIT Events", "Desc": "M2M->iMC RPQ Cycles w/Credits - PMM", "EvSel": 79, "ExtSel": "", "Notes": "To Count # cycles w/o credits, either set .thresh to 1 and .invert to 1 OR subtract this from total cycles", "Umask": "bxxxxxxx1", }, "M2M.RPQ_NO_REG_CRD_PMM.CHN1": { "Box": "M2M", "Category": "M2M RPQ CREDIT Events", "Desc": "M2M->iMC RPQ Cycles w/Credits - PMM", "EvSel": 79, "ExtSel": "", "Notes": "To Count # cycles w/o credits, either set .thresh to 1 and .invert to 1 OR subtract this from total cycles", "Umask": "bxxxxxx1x", }, "M2M.RPQ_NO_REG_CRD_PMM.CHN2": { "Box": "M2M", "Category": "M2M RPQ CREDIT Events", "Desc": "M2M->iMC RPQ Cycles w/Credits - PMM", "EvSel": 79, "ExtSel": "", "Notes": "To Count # cycles w/o credits, either set .thresh to 1 and .invert to 1 OR subtract this from total cycles", "Umask": "bxxxxx1xx", }, "M2M.RPQ_NO_SPEC_CRD": { "Box": "M2M", "Category": "M2M RPQ CREDIT Events", "Desc": "M2M to iMC RPQ Cycles w/Credits - Special", "EvSel": 68, "ExtSel": "", "Notes": "To Count # cycles w/o credits, either set .thresh to 1 and .invert to 1 OR subtract this from total cycles", }, "M2M.RPQ_NO_SPEC_CRD.CH0": { "Box": "M2M", "Category": "M2M RPQ CREDIT Events", "Desc": "M2M to iMC RPQ Cycles w/Credits - Special", "EvSel": 68, "ExtSel": "", "Notes": "To Count # cycles w/o credits, either set .thresh to 1 and .invert to 1 OR subtract this from total cycles", "Umask": "bxxxxxxx1", }, "M2M.RPQ_NO_SPEC_CRD.CH1": { "Box": "M2M", "Category": "M2M RPQ CREDIT Events", "Desc": "M2M to iMC RPQ Cycles w/Credits - Special", "EvSel": 68, "ExtSel": "", "Notes": "To Count # cycles w/o credits, either set .thresh to 1 and .invert to 1 OR subtract this from total cycles", "Umask": "bxxxxxx1x", }, "M2M.RPQ_NO_SPEC_CRD.CH2": { "Box": "M2M", "Category": "M2M RPQ CREDIT Events", "Desc": "M2M to iMC RPQ Cycles w/Credits - Special", "EvSel": 68, "ExtSel": "", "Notes": "To Count # cycles w/o credits, either set .thresh to 1 and .invert to 1 OR subtract this from total cycles", "Umask": "bxxxxx1xx", }, "M2M.RxC_AD_CYCLES_FULL": { "Box": "M2M", "Category": "M2M AD Ingress Events", "Desc": "AD Ingress (from CMS) Full", "EvSel": 4, "ExtSel": "", }, "M2M.RxC_AD_CYCLES_NE": { "Box": "M2M", "Category": "M2M AD Ingress Events", "Desc": "AD Ingress (from CMS) Not Empty", "EvSel": 3, "ExtSel": "", }, "M2M.RxC_AD_INSERTS": { "Box": "M2M", "Category": "M2M AD Ingress Events", "Desc": "AD Ingress (from CMS) Allocations", "EvSel": 1, "ExtSel": "", }, "M2M.RxC_AD_OCCUPANCY": { "Box": "M2M", "Category": "M2M AD Ingress Events", "Desc": "AD Ingress (from CMS) Occupancy", "EvSel": 2, "ExtSel": "", }, "M2M.RxC_AD_PREF_OCCUPANCY": { "Box": "M2M", "Category": "M2M AD Ingress Events", "Desc": "AD Ingress (from CMS) Occupancy - Prefetches", "EvSel": 119, "ExtSel": "", }, "M2M.RxC_AK_WR_CMP": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Allocations", "EvSel": 92, "ExtSel": "", }, "M2M.RxC_BL_CYCLES_FULL": { "Box": "M2M", "Category": "M2M BL Ingress Events", "Desc": "BL Ingress (from CMS) Full", "EvSel": 8, "ExtSel": "", }, "M2M.RxC_BL_CYCLES_NE": { "Box": "M2M", "Category": "M2M BL Ingress Events", "Desc": "BL Ingress (from CMS) Not Empty", "EvSel": 7, "ExtSel": "", }, "M2M.RxC_BL_INSERTS": { "Box": "M2M", "Category": "M2M BL Ingress Events", "Desc": "BL Ingress (from CMS) Allocations", "EvSel": 5, "ExtSel": "", }, "M2M.RxC_BL_OCCUPANCY": { "Box": "M2M", "Category": "M2M BL Ingress Events", "Desc": "BL Ingress (from CMS) Occupancy", "EvSel": 6, "ExtSel": "", }, "M2M.RxR_BUSY_STARVED": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority", "Desc": "Transgress Injection Starvation", "EvSel": 229, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. Selecting multiple ring types NOT supported", }, "M2M.RxR_BUSY_STARVED.BL_UNCRD": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority", "Desc": "Transgress Injection Starvation", "EvSel": 229, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. Selecting multiple ring types NOT supported", "Umask": "b00000100", }, "M2M.RxR_BUSY_STARVED.BL_CRD": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority", "Desc": "Transgress Injection Starvation", "EvSel": 229, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. Selecting multiple ring types NOT supported", "Umask": "b01000000", }, "M2M.RxR_BUSY_STARVED.AD_ALL": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority", "Desc": "Transgress Injection Starvation", "EvSel": 229, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. Selecting multiple ring types NOT supported", "Umask": "b00010001", }, "M2M.RxR_BUSY_STARVED.BL_ALL": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority", "Desc": "Transgress Injection Starvation", "EvSel": 229, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. Selecting multiple ring types NOT supported", "Umask": "b01000100", }, "M2M.RxR_BUSY_STARVED.AD_CRD": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority", "Desc": "Transgress Injection Starvation", "EvSel": 229, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. Selecting multiple ring types NOT supported", "Umask": "b00010000", }, "M2M.RxR_BUSY_STARVED.AD_UNCRD": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority", "Desc": "Transgress Injection Starvation", "EvSel": 229, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. Selecting multiple ring types NOT supported", "Umask": "b00000001", }, "M2M.RxR_BYPASS": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 226, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", }, "M2M.RxR_BYPASS.BL_CRD": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 226, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b01000000", }, "M2M.RxR_BYPASS.AK": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 226, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00000010", }, "M2M.RxR_BYPASS.AD_ALL": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 226, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00010001", }, "M2M.RxR_BYPASS.BL_ALL": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 226, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b01000100", }, "M2M.RxR_BYPASS.AD_CRD": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 226, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00010000", }, "M2M.RxR_BYPASS.AKC_UNCRD": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 226, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b10000000", }, "M2M.RxR_BYPASS.IV": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 226, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00001000", }, "M2M.RxR_BYPASS.BL_UNCRD": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 226, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00000100", }, "M2M.RxR_BYPASS.AD_UNCRD": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 226, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00000001", }, "M2M.RxR_CRD_STARVED": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 227, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", }, "M2M.RxR_CRD_STARVED.BL_CRD": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 227, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "b01000000", }, "M2M.RxR_CRD_STARVED.AK": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 227, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "b00000010", }, "M2M.RxR_CRD_STARVED.AD_ALL": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 227, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "b00010001", }, "M2M.RxR_CRD_STARVED.IFV": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 227, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "b10000000", }, "M2M.RxR_CRD_STARVED.BL_ALL": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 227, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "b01000100", }, "M2M.RxR_CRD_STARVED.AD_CRD": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 227, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "b00010000", }, "M2M.RxR_CRD_STARVED.IV": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 227, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "b00001000", }, "M2M.RxR_CRD_STARVED.BL_UNCRD": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 227, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "b00000100", }, "M2M.RxR_CRD_STARVED.AD_UNCRD": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 227, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "b00000001", }, "M2M.RxR_CRD_STARVED_1": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 228, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", }, "M2M.RxR_INSERTS": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 225, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", }, "M2M.RxR_INSERTS.AD_UNCRD": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 225, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00000001", }, "M2M.RxR_INSERTS.BL_UNCRD": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 225, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00000100", }, "M2M.RxR_INSERTS.AKC_UNCRD": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 225, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b10000000", }, "M2M.RxR_INSERTS.IV": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 225, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00001000", }, "M2M.RxR_INSERTS.AD_CRD": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 225, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00010000", }, "M2M.RxR_INSERTS.BL_ALL": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 225, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b01000100", }, "M2M.RxR_INSERTS.AD_ALL": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 225, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00010001", }, "M2M.RxR_INSERTS.AK": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 225, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00000010", }, "M2M.RxR_INSERTS.BL_CRD": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 225, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b01000000", }, "M2M.RxR_OCCUPANCY": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 224, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", }, "M2M.RxR_OCCUPANCY.AD_CRD": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 224, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00010000", }, "M2M.RxR_OCCUPANCY.BL_ALL": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 224, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b01000100", }, "M2M.RxR_OCCUPANCY.AK": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 224, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00000010", }, "M2M.RxR_OCCUPANCY.AD_ALL": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 224, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00010001", }, "M2M.RxR_OCCUPANCY.BL_CRD": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 224, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00100000", }, "M2M.RxR_OCCUPANCY.AD_UNCRD": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 224, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00000001", }, "M2M.RxR_OCCUPANCY.AKC_UNCRD": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 224, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b10000000", }, "M2M.RxR_OCCUPANCY.BL_UNCRD": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 224, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00000100", }, "M2M.RxR_OCCUPANCY.IV": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 224, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00001000", }, "M2M.SCOREBOARD_AD_RETRY_ACCEPTS": { "Box": "M2M", "Category": "M2M Scoreboard Events", "EvSel": 51, "ExtSel": "", }, "M2M.SCOREBOARD_AD_RETRY_REJECTS": { "Box": "M2M", "Category": "M2M Scoreboard Events", "EvSel": 52, "ExtSel": "", }, "M2M.SCOREBOARD_BL_RETRY_ACCEPTS": { "Box": "M2M", "Category": "M2M Scoreboard Events", "Desc": "Retry - Mem Mirroring Mode", "EvSel": 53, "ExtSel": "", }, "M2M.SCOREBOARD_BL_RETRY_REJECTS": { "Box": "M2M", "Category": "M2M Scoreboard Events", "Desc": "Retry - Mem Mirroring Mode", "EvSel": 54, "ExtSel": "", }, "M2M.SCOREBOARD_RD_ACCEPTS": { "Box": "M2M", "Category": "M2M Scoreboard Events", "Desc": "Scoreboard Accepts", "EvSel": 47, "ExtSel": "", }, "M2M.SCOREBOARD_RD_REJECTS": { "Box": "M2M", "Category": "M2M Scoreboard Events", "Desc": "Scoreboard Rejects", "EvSel": 48, "ExtSel": "", }, "M2M.SCOREBOARD_WR_ACCEPTS": { "Box": "M2M", "Category": "M2M Scoreboard Events", "Desc": "Scoreboard Accepts", "EvSel": 49, "ExtSel": "", }, "M2M.SCOREBOARD_WR_REJECTS": { "Box": "M2M", "Category": "M2M Scoreboard Events", "Desc": "Scoreboard Rejects", "EvSel": 50, "ExtSel": "", }, "M2M.STALL0_NO_TxR_HORZ_CRD_AD_AG0": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", }, "M2M.STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR7": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "b1xxxxxxx", }, "M2M.STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR3": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR6": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2M.STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR5": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2M.STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR4": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR2": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR1": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR0": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.STALL0_NO_TxR_HORZ_CRD_AD_AG1": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", }, "M2M.STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR4": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR5": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2M.STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR7": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "b1xxxxxxx", }, "M2M.STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR3": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR6": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2M.STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR1": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR0": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR2": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.STALL0_NO_TxR_HORZ_CRD_BL_AG0": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", }, "M2M.STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR4": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR5": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2M.STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR7": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "b1xxxxxxx", }, "M2M.STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR3": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR6": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2M.STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR1": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR0": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR2": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.STALL0_NO_TxR_HORZ_CRD_BL_AG1": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", }, "M2M.STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR3": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR7": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "b1xxxxxxx", }, "M2M.STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR6": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2M.STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR5": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2M.STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR4": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR2": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR1": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR0": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.STALL1_NO_TxR_HORZ_CRD_AD_AG0": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 209, "ExtSel": "", "Notes": "Extension not used by ICX.", }, "M2M.STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR8": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 209, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxxxx1", }, "M2M.STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR9": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 209, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxxx1x", }, "M2M.STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR10": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 209, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxx1xx", }, "M2M.STALL1_NO_TxR_HORZ_CRD_AD_AG1_1": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 211, "ExtSel": "", "Notes": "Extension not used by ICX.", }, "M2M.STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR9": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 211, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxxx1x", }, "M2M.STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR10": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 211, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxx1xx", }, "M2M.STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR8": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 211, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxxxx1", }, "M2M.STALL1_NO_TxR_HORZ_CRD_BL_AG0_1": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 213, "ExtSel": "", "Notes": "Extension not used by ICX.", }, "M2M.STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR8": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 213, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxxxx1", }, "M2M.STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR10": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 213, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxx1xx", }, "M2M.STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR9": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 213, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxxx1x", }, "M2M.STALL1_NO_TxR_HORZ_CRD_BL_AG1_1": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 215, "ExtSel": "", "Notes": "Extension not used by ICX.", }, "M2M.STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR8": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 215, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxxxx1", }, "M2M.STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR9": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 215, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxxx1x", }, "M2M.STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR10": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 215, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxx1xx", }, "M2M.TAG_HIT": { "Box": "M2M", "Category": "M2M Directory State Events", "Defn": "Tag Hit indicates when a request sent to the iMC hit in Near Memory.", "Desc": "Tag Hit", "EvSel": 44, "ExtSel": "", "Notes": "When the iMC returns data, the response's tag is compared against request tag, A hit indicates to M2M the data was found in NearMem.", }, "M2M.TAG_HIT.NM_UFILL_HIT_CLEAN": { "Box": "M2M", "Category": "M2M Directory State Events", "Defn": "Tag Hit indicates when a request sent to the iMC hit in Near Memory.", "Desc": "Tag Hit", "EvSel": 44, "ExtSel": "", "Notes": "When the iMC returns data, the response's tag is compared against request tag, A hit indicates to M2M the data was found in NearMem.", "Umask": "bxxxxx1xx", }, "M2M.TAG_HIT.NM_UFILL_HIT_DIRTY": { "Box": "M2M", "Category": "M2M Directory State Events", "Defn": "Tag Hit indicates when a request sent to the iMC hit in Near Memory.", "Desc": "Tag Hit", "EvSel": 44, "ExtSel": "", "Notes": "When the iMC returns data, the response's tag is compared against request tag, A hit indicates to M2M the data was found in NearMem.", "Umask": "bxxxx1xxx", }, "M2M.TAG_HIT.NM_RD_HIT_DIRTY": { "Box": "M2M", "Category": "M2M Directory State Events", "Defn": "Tag Hit indicates when a request sent to the iMC hit in Near Memory.", "Desc": "Tag Hit", "EvSel": 44, "ExtSel": "", "Notes": "When the iMC returns data, the response's tag is compared against request tag, A hit indicates to M2M the data was found in NearMem.", "Umask": "bxxxxxx1x", }, "M2M.TAG_HIT.NM_RD_HIT_CLEAN": { "Box": "M2M", "Category": "M2M Directory State Events", "Defn": "Tag Hit indicates when a request sent to the iMC hit in Near Memory.", "Desc": "Tag Hit", "EvSel": 44, "ExtSel": "", "Notes": "When the iMC returns data, the response's tag is compared against request tag, A hit indicates to M2M the data was found in NearMem.", "Umask": "bxxxxxxx1", }, "M2M.TAG_MISS": { "Box": "M2M", "Category": "M2M Directory State Events", "Desc": "Tag Miss", "EvSel": 97, "ExtSel": "", }, "M2M.TGR_AD_CREDITS": { "Box": "M2M", "Category": "M2M Transgress Credit Events", "Desc": "Number AD Ingress Credits", "EvSel": 65, "ExtSel": "", }, "M2M.TGR_BL_CREDITS": { "Box": "M2M", "Category": "M2M Transgress Credit Events", "Desc": "Number BL Ingress Credits", "EvSel": 66, "ExtSel": "", }, "M2M.TRACKER_FULL": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Tracker Cycles Full", "EvSel": 69, "ExtSel": "", }, "M2M.TRACKER_FULL.CH0": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Tracker Cycles Full", "EvSel": 69, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TRACKER_FULL.CH1": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Tracker Cycles Full", "EvSel": 69, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TRACKER_FULL.CH2": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Tracker Cycles Full", "EvSel": 69, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.TRACKER_INSERTS": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Tracker Inserts", "EvSel": 73, "ExtSel": "", }, "M2M.TRACKER_INSERTS.CH2": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Tracker Inserts", "EvSel": 73, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.TRACKER_INSERTS.CH0": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Tracker Inserts", "EvSel": 73, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TRACKER_INSERTS.CH1": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Tracker Inserts", "EvSel": 73, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TRACKER_NE": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Tracker Cycles Not Empty", "EvSel": 70, "ExtSel": "", }, "M2M.TRACKER_NE.CH2": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Tracker Cycles Not Empty", "EvSel": 70, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.TRACKER_NE.CH1": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Tracker Cycles Not Empty", "EvSel": 70, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TRACKER_NE.CH0": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Tracker Cycles Not Empty", "EvSel": 70, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TRACKER_OCCUPANCY": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Tracker Occupancy", "EvSel": 71, "ExtSel": "", "Notes": "Tie to Packet Mask/Match?", }, "M2M.TRACKER_OCCUPANCY.CH1": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Tracker Occupancy", "EvSel": 71, "ExtSel": "", "Notes": "Tie to Packet Mask/Match?", "Umask": "bxxxxxx1x", }, "M2M.TRACKER_OCCUPANCY.CH0": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Tracker Occupancy", "EvSel": 71, "ExtSel": "", "Notes": "Tie to Packet Mask/Match?", "Umask": "bxxxxxxx1", }, "M2M.TRACKER_OCCUPANCY.CH2": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Tracker Occupancy", "EvSel": 71, "ExtSel": "", "Notes": "Tie to Packet Mask/Match?", "Umask": "bxxxxx1xx", }, "M2M.TxC_AD_CREDITS_ACQUIRED": { "Box": "M2M", "Category": "M2M AD CMS/Mesh Egress Credit Events", "Desc": "AD Egress (to CMS) Credit Acquired", "EvSel": 13, "ExtSel": "", "Notes": "Not a per channel event?", }, "M2M.TxC_AD_CREDIT_OCCUPANCY": { "Box": "M2M", "Category": "M2M AD CMS/Mesh Egress Credit Events", "Desc": "AD Egress (to CMS) Credits Occupancy", "EvSel": 14, "ExtSel": "", "Notes": "Not a per channel event?", }, "M2M.TxC_AD_CYCLES_FULL": { "Box": "M2M", "Category": "M2M AD Egress Events", "Desc": "AD Egress (to CMS) Full", "EvSel": 12, "ExtSel": "", "Notes": "Not a per channel event?", }, "M2M.TxC_AD_CYCLES_NE": { "Box": "M2M", "Category": "M2M AD Egress Events", "Desc": "AD Egress (to CMS) Not Empty", "EvSel": 11, "ExtSel": "", "Notes": "Not a per channel event?", }, "M2M.TxC_AD_INSERTS": { "Box": "M2M", "Category": "M2M AD Egress Events", "Desc": "AD Egress (to CMS) Allocations", "EvSel": 9, "ExtSel": "", "Notes": "Not a per channel event?", }, "M2M.TxC_AD_NO_CREDIT_CYCLES": { "Box": "M2M", "Category": "M2M AD CMS/Mesh Egress Credit Events", "Desc": "Cycles with No AD Egress (to CMS) Credits", "EvSel": 15, "ExtSel": "", "Notes": "Not a per channel event?", }, "M2M.TxC_AD_NO_CREDIT_STALLED": { "Box": "M2M", "Category": "M2M AD CMS/Mesh Egress Credit Events", "Desc": "Cycles Stalled with No AD Egress (to CMS) Credits", "EvSel": 16, "ExtSel": "", "Notes": "Not a per channel event?", }, "M2M.TxC_AD_OCCUPANCY": { "Box": "M2M", "Category": "M2M AD Egress Events", "Desc": "AD Egress (to CMS) Occupancy", "EvSel": 10, "ExtSel": "", "Notes": "Not a per channel event?", }, "M2M.TxC_AK": { "Box": "M2M", "Category": "M2M OUTBOUND_TX Events", "Desc": "Outbound Ring Transactions on AK", "EvSel": 57, "ExtSel": "", }, "M2M.TxC_AK.CRD_CBO": { "Box": "M2M", "Category": "M2M OUTBOUND_TX Events", "Desc": "Outbound Ring Transactions on AK", "EvSel": 57, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxC_AK.NDR": { "Box": "M2M", "Category": "M2M OUTBOUND_TX Events", "Desc": "Outbound Ring Transactions on AK", "EvSel": 57, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxC_AKC_CREDITS": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AKC Credits", "EvSel": 95, "ExtSel": "", }, "M2M.TxC_AK_CREDITS_ACQUIRED": { "Box": "M2M", "Category": "M2M AK CMS/Mesh Egress Credit Events", "Desc": "AK Egress (to CMS) Credit Acquired", "EvSel": 29, "ExtSel": "", "Notes": "only on g_chnl==0?", }, "M2M.TxC_AK_CREDITS_ACQUIRED.CMS1": { "Box": "M2M", "Category": "M2M AK CMS/Mesh Egress Credit Events", "Desc": "AK Egress (to CMS) Credit Acquired", "EvSel": 29, "ExtSel": "", "Notes": "only on g_chnl==0?", "Umask": "bxxxxxx1x", }, "M2M.TxC_AK_CREDITS_ACQUIRED.CMS0": { "Box": "M2M", "Category": "M2M AK CMS/Mesh Egress Credit Events", "Desc": "AK Egress (to CMS) Credit Acquired", "EvSel": 29, "ExtSel": "", "Notes": "only on g_chnl==0?", "Umask": "bxxxxxxx1", }, "M2M.TxC_AK_CYCLES_FULL": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Full", "EvSel": 20, "ExtSel": "", "Notes": "Some extra wild guesses as to what the subevents count - and why does it seem to track 0 credits for each CMS agent, but the other related events don't?", }, "M2M.TxC_AK_CYCLES_FULL.CMS1": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Full", "EvSel": 20, "ExtSel": "", "Notes": "Some extra wild guesses as to what the subevents count - and why does it seem to track 0 credits for each CMS agent, but the other related events don't?", "Umask": "bxxxxxx1x", }, "M2M.TxC_AK_CYCLES_FULL.CMS0": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Full", "EvSel": 20, "ExtSel": "", "Notes": "Some extra wild guesses as to what the subevents count - and why does it seem to track 0 credits for each CMS agent, but the other related events don't?", "Umask": "bxxxxxxx1", }, "M2M.TxC_AK_CYCLES_FULL.WRCRD0": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Full", "EvSel": 20, "ExtSel": "", "Notes": "Some extra wild guesses as to what the subevents count - and why does it seem to track 0 credits for each CMS agent, but the other related events don't?", "Umask": "b0xx1xxxx", }, "M2M.TxC_AK_CYCLES_FULL.WRCMP0": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Full", "EvSel": 20, "ExtSel": "", "Notes": "Some extra wild guesses as to what the subevents count - and why does it seem to track 0 credits for each CMS agent, but the other related events don't?", "Umask": "b0x1xxxxx", }, "M2M.TxC_AK_CYCLES_FULL.RDCRD0": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Full", "EvSel": 20, "ExtSel": "", "Notes": "Some extra wild guesses as to what the subevents count - and why does it seem to track 0 credits for each CMS agent, but the other related events don't?", "Umask": "b0xxx1xxx", }, "M2M.TxC_AK_CYCLES_FULL.WRCMP1": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Full", "EvSel": 20, "ExtSel": "", "Notes": "Some extra wild guesses as to what the subevents count - and why does it seem to track 0 credits for each CMS agent, but the other related events don't?", "Umask": "b1x1xxxxx", }, "M2M.TxC_AK_CYCLES_FULL.RDCRD1": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Full", "EvSel": 20, "ExtSel": "", "Notes": "Some extra wild guesses as to what the subevents count - and why does it seem to track 0 credits for each CMS agent, but the other related events don't?", "Umask": "b1xxx1xxx", }, "M2M.TxC_AK_CYCLES_FULL.ALL": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Full", "EvSel": 20, "ExtSel": "", "Notes": "Some extra wild guesses as to what the subevents count - and why does it seem to track 0 credits for each CMS agent, but the other related events don't?", "Umask": "bxxxxxx11", }, "M2M.TxC_AK_CYCLES_FULL.WRCRD1": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Full", "EvSel": 20, "ExtSel": "", "Notes": "Some extra wild guesses as to what the subevents count - and why does it seem to track 0 credits for each CMS agent, but the other related events don't?", "Umask": "b1xx1xxxx", }, "M2M.TxC_AK_CYCLES_NE": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Not Empty", "EvSel": 19, "ExtSel": "", }, "M2M.TxC_AK_CYCLES_NE.WRCMP": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Not Empty", "EvSel": 19, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2M.TxC_AK_CYCLES_NE.RDCRD": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Not Empty", "EvSel": 19, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.TxC_AK_CYCLES_NE.ALL": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Not Empty", "EvSel": 19, "ExtSel": "", "Umask": "bxxxxxx11", }, "M2M.TxC_AK_CYCLES_NE.CMS0": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Not Empty", "EvSel": 19, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxC_AK_CYCLES_NE.WRCRD": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Not Empty", "EvSel": 19, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.TxC_AK_CYCLES_NE.CMS1": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Not Empty", "EvSel": 19, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxC_AK_INSERTS": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Allocations", "EvSel": 17, "ExtSel": "", }, "M2M.TxC_AK_INSERTS.RDCRD": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Allocations", "EvSel": 17, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.TxC_AK_INSERTS.PREF_RD_CAM_HIT": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Allocations", "EvSel": 17, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2M.TxC_AK_INSERTS.WRCMP": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Allocations", "EvSel": 17, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2M.TxC_AK_INSERTS.CMS0": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Allocations", "EvSel": 17, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxC_AK_INSERTS.WRCRD": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Allocations", "EvSel": 17, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.TxC_AK_INSERTS.CMS1": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Allocations", "EvSel": 17, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxC_AK_INSERTS.ALL": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Allocations", "EvSel": 17, "ExtSel": "", "Umask": "bxxxxxx11", }, "M2M.TxC_AK_NO_CREDIT_CYCLES": { "Box": "M2M", "Category": "M2M AK CMS/Mesh Egress Credit Events", "Desc": "Cycles with No AK Egress (to CMS) Credits", "EvSel": 31, "ExtSel": "", "Notes": "only on g_chnl==0?", }, "M2M.TxC_AK_NO_CREDIT_CYCLES.CMS1": { "Box": "M2M", "Category": "M2M AK CMS/Mesh Egress Credit Events", "Desc": "Cycles with No AK Egress (to CMS) Credits", "EvSel": 31, "ExtSel": "", "Notes": "only on g_chnl==0?", "Umask": "bxxxxxx1x", }, "M2M.TxC_AK_NO_CREDIT_CYCLES.CMS0": { "Box": "M2M", "Category": "M2M AK CMS/Mesh Egress Credit Events", "Desc": "Cycles with No AK Egress (to CMS) Credits", "EvSel": 31, "ExtSel": "", "Notes": "only on g_chnl==0?", "Umask": "bxxxxxxx1", }, "M2M.TxC_AK_NO_CREDIT_STALLED": { "Box": "M2M", "Category": "M2M AK CMS/Mesh Egress Credit Events", "Desc": "Cycles Stalled with No AK Egress (to CMS) Credits", "EvSel": 32, "ExtSel": "", "Notes": "only on g_chnl==0?", }, "M2M.TxC_AK_NO_CREDIT_STALLED.CMS1": { "Box": "M2M", "Category": "M2M AK CMS/Mesh Egress Credit Events", "Desc": "Cycles Stalled with No AK Egress (to CMS) Credits", "EvSel": 32, "ExtSel": "", "Notes": "only on g_chnl==0?", "Umask": "bxxxxxx1x", }, "M2M.TxC_AK_NO_CREDIT_STALLED.CMS0": { "Box": "M2M", "Category": "M2M AK CMS/Mesh Egress Credit Events", "Desc": "Cycles Stalled with No AK Egress (to CMS) Credits", "EvSel": 32, "ExtSel": "", "Notes": "only on g_chnl==0?", "Umask": "bxxxxxxx1", }, "M2M.TxC_AK_OCCUPANCY": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Occupancy", "EvSel": 18, "ExtSel": "", }, "M2M.TxC_AK_OCCUPANCY.RDCRD": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Occupancy", "EvSel": 18, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.TxC_AK_OCCUPANCY.WRCMP": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Occupancy", "EvSel": 18, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2M.TxC_AK_OCCUPANCY.CMS1": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Occupancy", "EvSel": 18, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxC_AK_OCCUPANCY.WRCRD": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Occupancy", "EvSel": 18, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.TxC_AK_OCCUPANCY.CMS0": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Occupancy", "EvSel": 18, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxC_AK_OCCUPANCY.ALL": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Occupancy", "EvSel": 18, "ExtSel": "", "Umask": "bxxxxxx11", }, "M2M.TxC_BL": { "Box": "M2M", "Category": "M2M OUTBOUND_TX Events", "Desc": "Outbound DRS Ring Transactions to Cache", "EvSel": 64, "ExtSel": "", }, "M2M.TxC_BL.DRS_UPI": { "Box": "M2M", "Category": "M2M OUTBOUND_TX Events", "Desc": "Outbound DRS Ring Transactions to Cache", "EvSel": 64, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.TxC_BL.DRS_CORE": { "Box": "M2M", "Category": "M2M OUTBOUND_TX Events", "Desc": "Outbound DRS Ring Transactions to Cache", "EvSel": 64, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxC_BL.DRS_CACHE": { "Box": "M2M", "Category": "M2M OUTBOUND_TX Events", "Desc": "Outbound DRS Ring Transactions to Cache", "EvSel": 64, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxC_BL_CREDITS_ACQUIRED": { "Box": "M2M", "Category": "M2M BL CMS/Mesh Egress Credit Events", "Desc": "BL Egress (to CMS) Credit Acquired", "EvSel": 25, "ExtSel": "", "Notes": "only on g_chnl==0?", }, "M2M.TxC_BL_CREDITS_ACQUIRED.CMS0": { "Box": "M2M", "Category": "M2M BL CMS/Mesh Egress Credit Events", "Desc": "BL Egress (to CMS) Credit Acquired", "EvSel": 25, "ExtSel": "", "Notes": "only on g_chnl==0?", "Umask": "bxxxxxxx1", }, "M2M.TxC_BL_CREDITS_ACQUIRED.CMS1": { "Box": "M2M", "Category": "M2M BL CMS/Mesh Egress Credit Events", "Desc": "BL Egress (to CMS) Credit Acquired", "EvSel": 25, "ExtSel": "", "Notes": "only on g_chnl==0?", "Umask": "bxxxxxx1x", }, "M2M.TxC_BL_CYCLES_FULL": { "Box": "M2M", "Category": "M2M BL Egress Events", "Desc": "BL Egress (to CMS) Full", "EvSel": 24, "ExtSel": "", "Notes": "only on g_chnl==0?", }, "M2M.TxC_BL_CYCLES_FULL.ALL": { "Box": "M2M", "Category": "M2M BL Egress Events", "Desc": "BL Egress (to CMS) Full", "EvSel": 24, "ExtSel": "", "Notes": "only on g_chnl==0?", "Umask": "bxxxxxx11", }, "M2M.TxC_BL_CYCLES_FULL.CMS1": { "Box": "M2M", "Category": "M2M BL Egress Events", "Desc": "BL Egress (to CMS) Full", "EvSel": 24, "ExtSel": "", "Notes": "only on g_chnl==0?", "Umask": "bxxxxxx1x", }, "M2M.TxC_BL_CYCLES_FULL.CMS0": { "Box": "M2M", "Category": "M2M BL Egress Events", "Desc": "BL Egress (to CMS) Full", "EvSel": 24, "ExtSel": "", "Notes": "only on g_chnl==0?", "Umask": "bxxxxxxx1", }, "M2M.TxC_BL_CYCLES_NE": { "Box": "M2M", "Category": "M2M BL Egress Events", "Desc": "BL Egress (to CMS) Not Empty", "EvSel": 23, "ExtSel": "", "Notes": "only on g_chnl==0?", }, "M2M.TxC_BL_CYCLES_NE.CMS0": { "Box": "M2M", "Category": "M2M BL Egress Events", "Desc": "BL Egress (to CMS) Not Empty", "EvSel": 23, "ExtSel": "", "Notes": "only on g_chnl==0?", "Umask": "bxxxxxxx1", }, "M2M.TxC_BL_CYCLES_NE.CMS1": { "Box": "M2M", "Category": "M2M BL Egress Events", "Desc": "BL Egress (to CMS) Not Empty", "EvSel": 23, "ExtSel": "", "Notes": "only on g_chnl==0?", "Umask": "bxxxxxx1x", }, "M2M.TxC_BL_CYCLES_NE.ALL": { "Box": "M2M", "Category": "M2M BL Egress Events", "Desc": "BL Egress (to CMS) Not Empty", "EvSel": 23, "ExtSel": "", "Notes": "only on g_chnl==0?", "Umask": "bxxxxxx11", }, "M2M.TxC_BL_INSERTS": { "Box": "M2M", "Category": "M2M BL Egress Events", "Desc": "BL Egress (to CMS) Allocations", "EvSel": 21, "ExtSel": "", }, "M2M.TxC_BL_INSERTS.CMS0": { "Box": "M2M", "Category": "M2M BL Egress Events", "Desc": "BL Egress (to CMS) Allocations", "EvSel": 21, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxC_BL_INSERTS.CMS1": { "Box": "M2M", "Category": "M2M BL Egress Events", "Desc": "BL Egress (to CMS) Allocations", "EvSel": 21, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxC_BL_INSERTS.ALL": { "Box": "M2M", "Category": "M2M BL Egress Events", "Desc": "BL Egress (to CMS) Allocations", "EvSel": 21, "ExtSel": "", "Umask": "bxxxxxx11", }, "M2M.TxC_BL_NO_CREDIT_CYCLES": { "Box": "M2M", "Category": "M2M BL CMS/Mesh Egress Credit Events", "Desc": "Cycles with No BL Egress (to CMS) Credits", "EvSel": 27, "ExtSel": "", "Notes": "only on g_chnl==0?", }, "M2M.TxC_BL_NO_CREDIT_CYCLES.CMS0": { "Box": "M2M", "Category": "M2M BL CMS/Mesh Egress Credit Events", "Desc": "Cycles with No BL Egress (to CMS) Credits", "EvSel": 27, "ExtSel": "", "Notes": "only on g_chnl==0?", "Umask": "bxxxxxxx1", }, "M2M.TxC_BL_NO_CREDIT_CYCLES.CMS1": { "Box": "M2M", "Category": "M2M BL CMS/Mesh Egress Credit Events", "Desc": "Cycles with No BL Egress (to CMS) Credits", "EvSel": 27, "ExtSel": "", "Notes": "only on g_chnl==0?", "Umask": "bxxxxxx1x", }, "M2M.TxC_BL_NO_CREDIT_STALLED": { "Box": "M2M", "Category": "M2M BL CMS/Mesh Egress Credit Events", "Desc": "Cycles Stalled with No BL Egress (to CMS) Credits", "EvSel": 28, "ExtSel": "", "Notes": "only on g_chnl==0?", }, "M2M.TxC_BL_NO_CREDIT_STALLED.CMS0": { "Box": "M2M", "Category": "M2M BL CMS/Mesh Egress Credit Events", "Desc": "Cycles Stalled with No BL Egress (to CMS) Credits", "EvSel": 28, "ExtSel": "", "Notes": "only on g_chnl==0?", "Umask": "bxxxxxxx1", }, "M2M.TxC_BL_NO_CREDIT_STALLED.CMS1": { "Box": "M2M", "Category": "M2M BL CMS/Mesh Egress Credit Events", "Desc": "Cycles Stalled with No BL Egress (to CMS) Credits", "EvSel": 28, "ExtSel": "", "Notes": "only on g_chnl==0?", "Umask": "bxxxxxx1x", }, "M2M.TxR_HORZ_ADS_USED": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal ADS Used", "EvSel": 166, "ExtSel": "", }, "M2M.TxR_HORZ_ADS_USED.BL_ALL": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal ADS Used", "EvSel": 166, "ExtSel": "", "Umask": "b01000100", }, "M2M.TxR_HORZ_ADS_USED.AD_CRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal ADS Used", "EvSel": 166, "ExtSel": "", "Umask": "b00010000", }, "M2M.TxR_HORZ_ADS_USED.AD_UNCRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal ADS Used", "EvSel": 166, "ExtSel": "", "Umask": "b00000001", }, "M2M.TxR_HORZ_ADS_USED.BL_UNCRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal ADS Used", "EvSel": 166, "ExtSel": "", "Umask": "b00000100", }, "M2M.TxR_HORZ_ADS_USED.BL_CRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal ADS Used", "EvSel": 166, "ExtSel": "", "Umask": "b01000000", }, "M2M.TxR_HORZ_ADS_USED.AD_ALL": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal ADS Used", "EvSel": 166, "ExtSel": "", "Umask": "b00010001", }, "M2M.TxR_HORZ_BYPASS": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 167, "ExtSel": "", }, "M2M.TxR_HORZ_BYPASS.AD_UNCRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 167, "ExtSel": "", "Umask": "b00000001", }, "M2M.TxR_HORZ_BYPASS.IV": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 167, "ExtSel": "", "Umask": "b00001000", }, "M2M.TxR_HORZ_BYPASS.AKC_UNCRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 167, "ExtSel": "", "Umask": "b10000000", }, "M2M.TxR_HORZ_BYPASS.BL_UNCRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 167, "ExtSel": "", "Umask": "b00000100", }, "M2M.TxR_HORZ_BYPASS.BL_ALL": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 167, "ExtSel": "", "Umask": "b01000100", }, "M2M.TxR_HORZ_BYPASS.AD_CRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 167, "ExtSel": "", "Umask": "b00010000", }, "M2M.TxR_HORZ_BYPASS.BL_CRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 167, "ExtSel": "", "Umask": "b01000000", }, "M2M.TxR_HORZ_BYPASS.AK": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 167, "ExtSel": "", "Umask": "b00000010", }, "M2M.TxR_HORZ_BYPASS.AD_ALL": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 167, "ExtSel": "", "Umask": "b00010001", }, "M2M.TxR_HORZ_CYCLES_FULL": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 162, "ExtSel": "", }, "M2M.TxR_HORZ_CYCLES_FULL.AD_UNCRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 162, "ExtSel": "", "Umask": "b00000001", }, "M2M.TxR_HORZ_CYCLES_FULL.IV": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 162, "ExtSel": "", "Umask": "b00001000", }, "M2M.TxR_HORZ_CYCLES_FULL.AKC_UNCRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 162, "ExtSel": "", "Umask": "b10000000", }, "M2M.TxR_HORZ_CYCLES_FULL.BL_UNCRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 162, "ExtSel": "", "Umask": "b00000100", }, "M2M.TxR_HORZ_CYCLES_FULL.AD_CRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 162, "ExtSel": "", "Umask": "b00010000", }, "M2M.TxR_HORZ_CYCLES_FULL.BL_ALL": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 162, "ExtSel": "", "Umask": "b01000100", }, "M2M.TxR_HORZ_CYCLES_FULL.AD_ALL": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 162, "ExtSel": "", "Umask": "b00010001", }, "M2M.TxR_HORZ_CYCLES_FULL.AK": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 162, "ExtSel": "", "Umask": "b00000010", }, "M2M.TxR_HORZ_CYCLES_FULL.BL_CRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 162, "ExtSel": "", "Umask": "b01000000", }, "M2M.TxR_HORZ_CYCLES_NE": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 163, "ExtSel": "", }, "M2M.TxR_HORZ_CYCLES_NE.BL_UNCRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 163, "ExtSel": "", "Umask": "b00000100", }, "M2M.TxR_HORZ_CYCLES_NE.AKC_UNCRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 163, "ExtSel": "", "Umask": "b10000000", }, "M2M.TxR_HORZ_CYCLES_NE.IV": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 163, "ExtSel": "", "Umask": "b00001000", }, "M2M.TxR_HORZ_CYCLES_NE.AD_UNCRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 163, "ExtSel": "", "Umask": "b00000001", }, "M2M.TxR_HORZ_CYCLES_NE.AD_ALL": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 163, "ExtSel": "", "Umask": "b00010001", }, "M2M.TxR_HORZ_CYCLES_NE.AK": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 163, "ExtSel": "", "Umask": "b00000010", }, "M2M.TxR_HORZ_CYCLES_NE.BL_CRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 163, "ExtSel": "", "Umask": "b01000000", }, "M2M.TxR_HORZ_CYCLES_NE.AD_CRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 163, "ExtSel": "", "Umask": "b00010000", }, "M2M.TxR_HORZ_CYCLES_NE.BL_ALL": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 163, "ExtSel": "", "Umask": "b01000100", }, "M2M.TxR_HORZ_INSERTS": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 161, "ExtSel": "", }, "M2M.TxR_HORZ_INSERTS.BL_ALL": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 161, "ExtSel": "", "Umask": "b01000100", }, "M2M.TxR_HORZ_INSERTS.AD_CRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 161, "ExtSel": "", "Umask": "b00010000", }, "M2M.TxR_HORZ_INSERTS.BL_CRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 161, "ExtSel": "", "Umask": "b01000000", }, "M2M.TxR_HORZ_INSERTS.AD_ALL": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 161, "ExtSel": "", "Umask": "b00010001", }, "M2M.TxR_HORZ_INSERTS.AK": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 161, "ExtSel": "", "Umask": "b00000010", }, "M2M.TxR_HORZ_INSERTS.AD_UNCRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 161, "ExtSel": "", "Umask": "b00000001", }, "M2M.TxR_HORZ_INSERTS.BL_UNCRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 161, "ExtSel": "", "Umask": "b00000100", }, "M2M.TxR_HORZ_INSERTS.AKC_UNCRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 161, "ExtSel": "", "Umask": "b10000000", }, "M2M.TxR_HORZ_INSERTS.IV": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 161, "ExtSel": "", "Umask": "b00001000", }, "M2M.TxR_HORZ_NACK": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 164, "ExtSel": "", }, "M2M.TxR_HORZ_NACK.AKC_UNCRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 164, "ExtSel": "", "Umask": "b10000000", }, "M2M.TxR_HORZ_NACK.BL_UNCRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 164, "ExtSel": "", "Umask": "b00000100", }, "M2M.TxR_HORZ_NACK.IV": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 164, "ExtSel": "", "Umask": "b00001000", }, "M2M.TxR_HORZ_NACK.AD_UNCRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 164, "ExtSel": "", "Umask": "b00000001", }, "M2M.TxR_HORZ_NACK.AD_ALL": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 164, "ExtSel": "", "Umask": "b00010001", }, "M2M.TxR_HORZ_NACK.AK": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 164, "ExtSel": "", "Umask": "b00000010", }, "M2M.TxR_HORZ_NACK.BL_CRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 164, "ExtSel": "", "Umask": "b01000000", }, "M2M.TxR_HORZ_NACK.AD_CRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 164, "ExtSel": "", "Umask": "b00010000", }, "M2M.TxR_HORZ_NACK.BL_ALL": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 164, "ExtSel": "", "Umask": "b01000100", }, "M2M.TxR_HORZ_OCCUPANCY": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 160, "ExtSel": "", }, "M2M.TxR_HORZ_OCCUPANCY.AD_CRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 160, "ExtSel": "", "Umask": "b00010000", }, "M2M.TxR_HORZ_OCCUPANCY.BL_ALL": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 160, "ExtSel": "", "Umask": "b01000100", }, "M2M.TxR_HORZ_OCCUPANCY.AD_ALL": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 160, "ExtSel": "", "Umask": "b00010001", }, "M2M.TxR_HORZ_OCCUPANCY.AK": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 160, "ExtSel": "", "Umask": "b00000010", }, "M2M.TxR_HORZ_OCCUPANCY.BL_CRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 160, "ExtSel": "", "Umask": "b01000000", }, "M2M.TxR_HORZ_OCCUPANCY.AD_UNCRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 160, "ExtSel": "", "Umask": "b00000001", }, "M2M.TxR_HORZ_OCCUPANCY.AKC_UNCRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 160, "ExtSel": "", "Umask": "b10000000", }, "M2M.TxR_HORZ_OCCUPANCY.IV": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 160, "ExtSel": "", "Umask": "b00001000", }, "M2M.TxR_HORZ_OCCUPANCY.BL_UNCRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 160, "ExtSel": "", "Umask": "b00000100", }, "M2M.TxR_HORZ_STARVED": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.", "Desc": "CMS Horizontal Egress Injection Starvation", "EvSel": 165, "ExtSel": "", }, "M2M.TxR_HORZ_STARVED.AKC_UNCRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.", "Desc": "CMS Horizontal Egress Injection Starvation", "EvSel": 165, "ExtSel": "", "Umask": "b10000000", }, "M2M.TxR_HORZ_STARVED.BL_UNCRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.", "Desc": "CMS Horizontal Egress Injection Starvation", "EvSel": 165, "ExtSel": "", "Umask": "b00000100", }, "M2M.TxR_HORZ_STARVED.IV": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.", "Desc": "CMS Horizontal Egress Injection Starvation", "EvSel": 165, "ExtSel": "", "Umask": "b00001000", }, "M2M.TxR_HORZ_STARVED.AD_UNCRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.", "Desc": "CMS Horizontal Egress Injection Starvation", "EvSel": 165, "ExtSel": "", "Umask": "b00000001", }, "M2M.TxR_HORZ_STARVED.AK": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.", "Desc": "CMS Horizontal Egress Injection Starvation", "EvSel": 165, "ExtSel": "", "Umask": "b00000010", }, "M2M.TxR_HORZ_STARVED.AD_ALL": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.", "Desc": "CMS Horizontal Egress Injection Starvation", "EvSel": 165, "ExtSel": "", "Umask": "b00000001", }, "M2M.TxR_HORZ_STARVED.BL_ALL": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.", "Desc": "CMS Horizontal Egress Injection Starvation", "EvSel": 165, "ExtSel": "", "Umask": "b00000100", }, "M2M.TxR_VERT_ADS_USED": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 156, "ExtSel": "", }, "M2M.TxR_VERT_ADS_USED.AD_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 156, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.TxR_VERT_ADS_USED.BL_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 156, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.TxR_VERT_ADS_USED.AD_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 156, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxR_VERT_ADS_USED.BL_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 156, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2M.TxR_VERT_BYPASS": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 157, "ExtSel": "", }, "M2M.TxR_VERT_BYPASS.AD_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 157, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxR_VERT_BYPASS.AK_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 157, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2M.TxR_VERT_BYPASS.BL_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 157, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.TxR_VERT_BYPASS.BL_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 157, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2M.TxR_VERT_BYPASS.IV_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 157, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.TxR_VERT_BYPASS.AK_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 157, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxR_VERT_BYPASS.AD_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 157, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.TxR_VERT_BYPASS_1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 158, "ExtSel": "", }, "M2M.TxR_VERT_BYPASS_1.AKC_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 158, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxR_VERT_BYPASS_1.AKC_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 158, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxR_VERT_CYCLES_FULL0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 148, "ExtSel": "", }, "M2M.TxR_VERT_CYCLES_FULL0.AD_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 148, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.TxR_VERT_CYCLES_FULL0.AK_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 148, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxR_VERT_CYCLES_FULL0.BL_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 148, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2M.TxR_VERT_CYCLES_FULL0.BL_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 148, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.TxR_VERT_CYCLES_FULL0.IV_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 148, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.TxR_VERT_CYCLES_FULL0.AK_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 148, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2M.TxR_VERT_CYCLES_FULL0.AD_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 148, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxR_VERT_CYCLES_FULL1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 149, "ExtSel": "", }, "M2M.TxR_VERT_CYCLES_FULL1.AKC_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 149, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxR_VERT_CYCLES_FULL1.AKC_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 149, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxR_VERT_CYCLES_NE0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 150, "ExtSel": "", }, "M2M.TxR_VERT_CYCLES_NE0.AK_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 150, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2M.TxR_VERT_CYCLES_NE0.AD_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 150, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxR_VERT_CYCLES_NE0.IV_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 150, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.TxR_VERT_CYCLES_NE0.BL_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 150, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.TxR_VERT_CYCLES_NE0.BL_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 150, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2M.TxR_VERT_CYCLES_NE0.AD_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 150, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.TxR_VERT_CYCLES_NE0.AK_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 150, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxR_VERT_CYCLES_NE1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 151, "ExtSel": "", }, "M2M.TxR_VERT_CYCLES_NE1.AKC_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 151, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxR_VERT_CYCLES_NE1.AKC_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 151, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxR_VERT_INSERTS0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 146, "ExtSel": "", }, "M2M.TxR_VERT_INSERTS0.BL_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 146, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.TxR_VERT_INSERTS0.IV_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 146, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.TxR_VERT_INSERTS0.AD_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 146, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxR_VERT_INSERTS0.AK_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 146, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2M.TxR_VERT_INSERTS0.AD_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 146, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.TxR_VERT_INSERTS0.AK_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 146, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxR_VERT_INSERTS0.BL_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 146, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2M.TxR_VERT_INSERTS1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 147, "ExtSel": "", }, "M2M.TxR_VERT_INSERTS1.AKC_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 147, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxR_VERT_INSERTS1.AKC_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 147, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxR_VERT_NACK0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", }, "M2M.TxR_VERT_NACK0.BL_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2M.TxR_VERT_NACK0.AD_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.TxR_VERT_NACK0.AK_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxR_VERT_NACK0.AK_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2M.TxR_VERT_NACK0.AD_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxR_VERT_NACK0.IV_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.TxR_VERT_NACK0.BL_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.TxR_VERT_NACK1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 153, "ExtSel": "", }, "M2M.TxR_VERT_NACK1.AKC_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 153, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxR_VERT_NACK1.AKC_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 153, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxR_VERT_OCCUPANCY0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", }, "M2M.TxR_VERT_OCCUPANCY0.AD_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.TxR_VERT_OCCUPANCY0.AK_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxR_VERT_OCCUPANCY0.BL_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2M.TxR_VERT_OCCUPANCY0.IV_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.TxR_VERT_OCCUPANCY0.BL_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.TxR_VERT_OCCUPANCY0.AD_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxR_VERT_OCCUPANCY0.AK_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2M.TxR_VERT_OCCUPANCY1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 145, "ExtSel": "", }, "M2M.TxR_VERT_OCCUPANCY1.AKC_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 145, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxR_VERT_OCCUPANCY1.AKC_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 145, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxR_VERT_STARVED0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", }, "M2M.TxR_VERT_STARVED0.IV_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.TxR_VERT_STARVED0.BL_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.TxR_VERT_STARVED0.AK_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2M.TxR_VERT_STARVED0.AD_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxR_VERT_STARVED0.AD_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.TxR_VERT_STARVED0.AK_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxR_VERT_STARVED0.BL_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2M.TxR_VERT_STARVED1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 155, "ExtSel": "", }, "M2M.TxR_VERT_STARVED1.AKC_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 155, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxR_VERT_STARVED1.AKC_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 155, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxR_VERT_STARVED1.TGC": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 155, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.VERT_RING_AD_IN_USE": { "Box": "M2M", "Category": "M2M Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AD Ring In Use", "EvSel": 176, "ExtSel": "", }, "M2M.VERT_RING_AD_IN_USE.UP_ODD": { "Box": "M2M", "Category": "M2M Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AD Ring In Use", "EvSel": 176, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.VERT_RING_AD_IN_USE.DN_EVEN": { "Box": "M2M", "Category": "M2M Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AD Ring In Use", "EvSel": 176, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.VERT_RING_AD_IN_USE.DN_ODD": { "Box": "M2M", "Category": "M2M Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AD Ring In Use", "EvSel": 176, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.VERT_RING_AD_IN_USE.UP_EVEN": { "Box": "M2M", "Category": "M2M Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AD Ring In Use", "EvSel": 176, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.VERT_RING_AKC_IN_USE": { "Box": "M2M", "Category": "M2M Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AKC Ring In Use", "EvSel": 180, "ExtSel": "", }, "M2M.VERT_RING_AKC_IN_USE.UP_ODD": { "Box": "M2M", "Category": "M2M Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AKC Ring In Use", "EvSel": 180, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.VERT_RING_AKC_IN_USE.DN_EVEN": { "Box": "M2M", "Category": "M2M Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AKC Ring In Use", "EvSel": 180, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.VERT_RING_AKC_IN_USE.DN_ODD": { "Box": "M2M", "Category": "M2M Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AKC Ring In Use", "EvSel": 180, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.VERT_RING_AKC_IN_USE.UP_EVEN": { "Box": "M2M", "Category": "M2M Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AKC Ring In Use", "EvSel": 180, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.VERT_RING_AK_IN_USE": { "Box": "M2M", "Category": "M2M Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AK Ring In Use", "EvSel": 177, "ExtSel": "", }, "M2M.VERT_RING_AK_IN_USE.UP_ODD": { "Box": "M2M", "Category": "M2M Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AK Ring In Use", "EvSel": 177, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.VERT_RING_AK_IN_USE.DN_EVEN": { "Box": "M2M", "Category": "M2M Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AK Ring In Use", "EvSel": 177, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.VERT_RING_AK_IN_USE.DN_ODD": { "Box": "M2M", "Category": "M2M Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AK Ring In Use", "EvSel": 177, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.VERT_RING_AK_IN_USE.UP_EVEN": { "Box": "M2M", "Category": "M2M Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AK Ring In Use", "EvSel": 177, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.VERT_RING_BL_IN_USE": { "Box": "M2M", "Category": "M2M Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical BL Ring in Use", "EvSel": 178, "ExtSel": "", }, "M2M.VERT_RING_BL_IN_USE.DN_EVEN": { "Box": "M2M", "Category": "M2M Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical BL Ring in Use", "EvSel": 178, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.VERT_RING_BL_IN_USE.UP_ODD": { "Box": "M2M", "Category": "M2M Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical BL Ring in Use", "EvSel": 178, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.VERT_RING_BL_IN_USE.UP_EVEN": { "Box": "M2M", "Category": "M2M Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical BL Ring in Use", "EvSel": 178, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.VERT_RING_BL_IN_USE.DN_ODD": { "Box": "M2M", "Category": "M2M Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical BL Ring in Use", "EvSel": 178, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.VERT_RING_IV_IN_USE": { "Box": "M2M", "Category": "M2M Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "Vertical IV Ring in Use", "EvSel": 179, "ExtSel": "", }, "M2M.VERT_RING_IV_IN_USE.UP": { "Box": "M2M", "Category": "M2M Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "Vertical IV Ring in Use", "EvSel": 179, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.VERT_RING_IV_IN_USE.DN": { "Box": "M2M", "Category": "M2M Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "Vertical IV Ring in Use", "EvSel": 179, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.VERT_RING_TGC_IN_USE": { "Box": "M2M", "Category": "M2M Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical TGC Ring In Use", "EvSel": 181, "ExtSel": "", }, "M2M.VERT_RING_TGC_IN_USE.DN_ODD": { "Box": "M2M", "Category": "M2M Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical TGC Ring In Use", "EvSel": 181, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.VERT_RING_TGC_IN_USE.UP_EVEN": { "Box": "M2M", "Category": "M2M Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical TGC Ring In Use", "EvSel": 181, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.VERT_RING_TGC_IN_USE.UP_ODD": { "Box": "M2M", "Category": "M2M Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical TGC Ring In Use", "EvSel": 181, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.VERT_RING_TGC_IN_USE.DN_EVEN": { "Box": "M2M", "Category": "M2M Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical TGC Ring In Use", "EvSel": 181, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.WPQ_FLUSH": { "Box": "M2M", "Category": "M2M WPQ EVENTS", "Desc": "WPQ Flush", "EvSel": 88, "ExtSel": "", }, "M2M.WPQ_FLUSH.CH1": { "Box": "M2M", "Category": "M2M WPQ EVENTS", "Desc": "WPQ Flush", "EvSel": 88, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.WPQ_FLUSH.CH0": { "Box": "M2M", "Category": "M2M WPQ EVENTS", "Desc": "WPQ Flush", "EvSel": 88, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.WPQ_FLUSH.CH2": { "Box": "M2M", "Category": "M2M WPQ EVENTS", "Desc": "WPQ Flush", "EvSel": 88, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.WPQ_NO_REG_CRD": { "Box": "M2M", "Category": "M2M WPQ CREDIT Events", "Desc": "M2M->iMC WPQ Cycles w/Credits - Regular", "EvSel": 77, "ExtSel": "", "Notes": "To Count # cycles w/o credits, either set .thresh to 1 and .invert to 1 OR subtract this from total cycles", }, "M2M.WPQ_NO_REG_CRD.CHN0": { "Box": "M2M", "Category": "M2M WPQ CREDIT Events", "Desc": "M2M->iMC WPQ Cycles w/Credits - Regular", "EvSel": 77, "ExtSel": "", "Notes": "To Count # cycles w/o credits, either set .thresh to 1 and .invert to 1 OR subtract this from total cycles", "Umask": "bxxxxxxx1", }, "M2M.WPQ_NO_REG_CRD.CHN1": { "Box": "M2M", "Category": "M2M WPQ CREDIT Events", "Desc": "M2M->iMC WPQ Cycles w/Credits - Regular", "EvSel": 77, "ExtSel": "", "Notes": "To Count # cycles w/o credits, either set .thresh to 1 and .invert to 1 OR subtract this from total cycles", "Umask": "bxxxxxx1x", }, "M2M.WPQ_NO_REG_CRD.CHN2": { "Box": "M2M", "Category": "M2M WPQ CREDIT Events", "Desc": "M2M->iMC WPQ Cycles w/Credits - Regular", "EvSel": 77, "ExtSel": "", "Notes": "To Count # cycles w/o credits, either set .thresh to 1 and .invert to 1 OR subtract this from total cycles", "Umask": "bxxxxx1xx", }, "M2M.WPQ_NO_REG_CRD_PMM": { "Box": "M2M", "Category": "M2M WPQ CREDIT Events", "Desc": "M2M->iMC WPQ Cycles w/Credits - PMM", "EvSel": 81, "ExtSel": "", "Notes": "To Count # cycles w/o credits, either set .thresh to 1 and .invert to 1 OR subtract this from total cycles", }, "M2M.WPQ_NO_REG_CRD_PMM.CHN2": { "Box": "M2M", "Category": "M2M WPQ CREDIT Events", "Desc": "M2M->iMC WPQ Cycles w/Credits - PMM", "EvSel": 81, "ExtSel": "", "Notes": "To Count # cycles w/o credits, either set .thresh to 1 and .invert to 1 OR subtract this from total cycles", "Umask": "bxxxxx1xx", }, "M2M.WPQ_NO_REG_CRD_PMM.CHN0": { "Box": "M2M", "Category": "M2M WPQ CREDIT Events", "Desc": "M2M->iMC WPQ Cycles w/Credits - PMM", "EvSel": 81, "ExtSel": "", "Notes": "To Count # cycles w/o credits, either set .thresh to 1 and .invert to 1 OR subtract this from total cycles", "Umask": "bxxxxxxx1", }, "M2M.WPQ_NO_REG_CRD_PMM.CHN1": { "Box": "M2M", "Category": "M2M WPQ CREDIT Events", "Desc": "M2M->iMC WPQ Cycles w/Credits - PMM", "EvSel": 81, "ExtSel": "", "Notes": "To Count # cycles w/o credits, either set .thresh to 1 and .invert to 1 OR subtract this from total cycles", "Umask": "bxxxxxx1x", }, "M2M.WPQ_NO_SPEC_CRD": { "Box": "M2M", "Category": "M2M WPQ CREDIT Events", "Desc": "M2M->iMC WPQ Cycles w/Credits - Special", "EvSel": 78, "ExtSel": "", "Notes": "To Count # cycles w/o credits, either set .thresh to 1 and .invert to 1 OR subtract this from total cycles", }, "M2M.WPQ_NO_SPEC_CRD.CHN2": { "Box": "M2M", "Category": "M2M WPQ CREDIT Events", "Desc": "M2M->iMC WPQ Cycles w/Credits - Special", "EvSel": 78, "ExtSel": "", "Notes": "To Count # cycles w/o credits, either set .thresh to 1 and .invert to 1 OR subtract this from total cycles", "Umask": "bxxxxx1xx", }, "M2M.WPQ_NO_SPEC_CRD.CHN0": { "Box": "M2M", "Category": "M2M WPQ CREDIT Events", "Desc": "M2M->iMC WPQ Cycles w/Credits - Special", "EvSel": 78, "ExtSel": "", "Notes": "To Count # cycles w/o credits, either set .thresh to 1 and .invert to 1 OR subtract this from total cycles", "Umask": "bxxxxxxx1", }, "M2M.WPQ_NO_SPEC_CRD.CHN1": { "Box": "M2M", "Category": "M2M WPQ CREDIT Events", "Desc": "M2M->iMC WPQ Cycles w/Credits - Special", "EvSel": 78, "ExtSel": "", "Notes": "To Count # cycles w/o credits, either set .thresh to 1 and .invert to 1 OR subtract this from total cycles", "Umask": "bxxxxxx1x", }, "M2M.WR_TRACKER_FULL": { "Box": "M2M", "Category": "M2M Write Tracker Events", "Desc": "Write Tracker Cycles Full", "EvSel": 74, "ExtSel": "", }, "M2M.WR_TRACKER_FULL.CH0": { "Box": "M2M", "Category": "M2M Write Tracker Events", "Desc": "Write Tracker Cycles Full", "EvSel": 74, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.WR_TRACKER_FULL.CH1": { "Box": "M2M", "Category": "M2M Write Tracker Events", "Desc": "Write Tracker Cycles Full", "EvSel": 74, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.WR_TRACKER_FULL.MIRR": { "Box": "M2M", "Category": "M2M Write Tracker Events", "Desc": "Write Tracker Cycles Full", "EvSel": 74, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.WR_TRACKER_FULL.CH2": { "Box": "M2M", "Category": "M2M Write Tracker Events", "Desc": "Write Tracker Cycles Full", "EvSel": 74, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.WR_TRACKER_INSERTS": { "Box": "M2M", "Category": "M2M Write Tracker Events", "Desc": "Write Tracker Inserts", "EvSel": 86, "ExtSel": "", }, "M2M.WR_TRACKER_INSERTS.CH0": { "Box": "M2M", "Category": "M2M Write Tracker Events", "Desc": "Write Tracker Inserts", "EvSel": 86, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.WR_TRACKER_INSERTS.CH1": { "Box": "M2M", "Category": "M2M Write Tracker Events", "Desc": "Write Tracker Inserts", "EvSel": 86, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.WR_TRACKER_INSERTS.CH2": { "Box": "M2M", "Category": "M2M Write Tracker Events", "Desc": "Write Tracker Inserts", "EvSel": 86, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.WR_TRACKER_NE": { "Box": "M2M", "Category": "M2M Write Tracker Events", "Desc": "Write Tracker Cycles Not Empty", "EvSel": 75, "ExtSel": "", }, "M2M.WR_TRACKER_NE.CH2": { "Box": "M2M", "Category": "M2M Write Tracker Events", "Desc": "Write Tracker Cycles Not Empty", "EvSel": 75, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.WR_TRACKER_NE.MIRR_PWR": { "Box": "M2M", "Category": "M2M Write Tracker Events", "Desc": "Write Tracker Cycles Not Empty", "EvSel": 75, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2M.WR_TRACKER_NE.MIRR": { "Box": "M2M", "Category": "M2M Write Tracker Events", "Desc": "Write Tracker Cycles Not Empty", "EvSel": 75, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.WR_TRACKER_NE.CH0": { "Box": "M2M", "Category": "M2M Write Tracker Events", "Desc": "Write Tracker Cycles Not Empty", "EvSel": 75, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.WR_TRACKER_NE.MIRR_NONTGR": { "Box": "M2M", "Category": "M2M Write Tracker Events", "Desc": "Write Tracker Cycles Not Empty", "EvSel": 75, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.WR_TRACKER_NE.CH1": { "Box": "M2M", "Category": "M2M Write Tracker Events", "Desc": "Write Tracker Cycles Not Empty", "EvSel": 75, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.WR_TRACKER_NONPOSTED_INSERTS": { "Box": "M2M", "Category": "M2M Write Tracker Events", "Desc": "Write Tracker Non-Posted Inserts", "EvSel": 99, "ExtSel": "", }, "M2M.WR_TRACKER_NONPOSTED_INSERTS.CH2": { "Box": "M2M", "Category": "M2M Write Tracker Events", "Desc": "Write Tracker Non-Posted Inserts", "EvSel": 99, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.WR_TRACKER_NONPOSTED_INSERTS.CH0": { "Box": "M2M", "Category": "M2M Write Tracker Events", "Desc": "Write Tracker Non-Posted Inserts", "EvSel": 99, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.WR_TRACKER_NONPOSTED_INSERTS.CH1": { "Box": "M2M", "Category": "M2M Write Tracker Events", "Desc": "Write Tracker Non-Posted Inserts", "EvSel": 99, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.WR_TRACKER_NONPOSTED_OCCUPANCY": { "Box": "M2M", "Category": "M2M Write Tracker Events", "Desc": "Write Tracker Non-Posted Occupancy", "EvSel": 98, "ExtSel": "", }, "M2M.WR_TRACKER_NONPOSTED_OCCUPANCY.CH2": { "Box": "M2M", "Category": "M2M Write Tracker Events", "Desc": "Write Tracker Non-Posted Occupancy", "EvSel": 98, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.WR_TRACKER_NONPOSTED_OCCUPANCY.CH1": { "Box": "M2M", "Category": "M2M Write Tracker Events", "Desc": "Write Tracker Non-Posted Occupancy", "EvSel": 98, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.WR_TRACKER_NONPOSTED_OCCUPANCY.CH0": { "Box": "M2M", "Category": "M2M Write Tracker Events", "Desc": "Write Tracker Non-Posted Occupancy", "EvSel": 98, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.WR_TRACKER_OCCUPANCY": { "Box": "M2M", "Category": "M2M Write Tracker Events", "Desc": "Write Tracker Occupancy", "EvSel": 85, "ExtSel": "", }, "M2M.WR_TRACKER_OCCUPANCY.MIRR": { "Box": "M2M", "Category": "M2M Write Tracker Events", "Desc": "Write Tracker Occupancy", "EvSel": 85, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.WR_TRACKER_OCCUPANCY.MIRR_PWR": { "Box": "M2M", "Category": "M2M Write Tracker Events", "Desc": "Write Tracker Occupancy", "EvSel": 85, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2M.WR_TRACKER_OCCUPANCY.CH1": { "Box": "M2M", "Category": "M2M Write Tracker Events", "Desc": "Write Tracker Occupancy", "EvSel": 85, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.WR_TRACKER_OCCUPANCY.MIRR_NONTGR": { "Box": "M2M", "Category": "M2M Write Tracker Events", "Desc": "Write Tracker Occupancy", "EvSel": 85, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.WR_TRACKER_OCCUPANCY.CH0": { "Box": "M2M", "Category": "M2M Write Tracker Events", "Desc": "Write Tracker Occupancy", "EvSel": 85, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.WR_TRACKER_OCCUPANCY.CH2": { "Box": "M2M", "Category": "M2M Write Tracker Events", "Desc": "Write Tracker Occupancy", "EvSel": 85, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.WR_TRACKER_POSTED_INSERTS": { "Box": "M2M", "Category": "M2M Write Tracker Events", "Desc": "Write Tracker Posted Inserts", "EvSel": 94, "ExtSel": "", }, "M2M.WR_TRACKER_POSTED_INSERTS.CH0": { "Box": "M2M", "Category": "M2M Write Tracker Events", "Desc": "Write Tracker Posted Inserts", "EvSel": 94, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.WR_TRACKER_POSTED_INSERTS.CH1": { "Box": "M2M", "Category": "M2M Write Tracker Events", "Desc": "Write Tracker Posted Inserts", "EvSel": 94, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.WR_TRACKER_POSTED_INSERTS.CH2": { "Box": "M2M", "Category": "M2M Write Tracker Events", "Desc": "Write Tracker Posted Inserts", "EvSel": 94, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.WR_TRACKER_POSTED_OCCUPANCY": { "Box": "M2M", "Category": "M2M Write Tracker Events", "Desc": "Write Tracker Posted Occupancy", "EvSel": 93, "ExtSel": "", }, "M2M.WR_TRACKER_POSTED_OCCUPANCY.CH2": { "Box": "M2M", "Category": "M2M Write Tracker Events", "Desc": "Write Tracker Posted Occupancy", "EvSel": 93, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.WR_TRACKER_POSTED_OCCUPANCY.CH0": { "Box": "M2M", "Category": "M2M Write Tracker Events", "Desc": "Write Tracker Posted Occupancy", "EvSel": 93, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.WR_TRACKER_POSTED_OCCUPANCY.CH1": { "Box": "M2M", "Category": "M2M Write Tracker Events", "Desc": "Write Tracker Posted Occupancy", "EvSel": 93, "ExtSel": "", "Umask": "bxxxxxx1x", }, # M2PCIe: "M2PCIe.AG0_AD_CRD_ACQUIRED0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", }, "M2PCIe.AG0_AD_CRD_ACQUIRED0.TGR6": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bx1xxxxxx", }, "M2PCIe.AG0_AD_CRD_ACQUIRED0.TGR7": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "b1xxxxxxx", }, "M2PCIe.AG0_AD_CRD_ACQUIRED0.TGR3": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxx1xxx", }, "M2PCIe.AG0_AD_CRD_ACQUIRED0.TGR5": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxx1xxxxx", }, "M2PCIe.AG0_AD_CRD_ACQUIRED0.TGR4": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxx1xxxx", }, "M2PCIe.AG0_AD_CRD_ACQUIRED0.TGR2": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxxx1xx", }, "M2PCIe.AG0_AD_CRD_ACQUIRED0.TGR0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxxxxx1", }, "M2PCIe.AG0_AD_CRD_ACQUIRED0.TGR1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxxxx1x", }, "M2PCIe.AG0_AD_CRD_ACQUIRED1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 129, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected. Extension not used by ICX.", }, "M2PCIe.AG0_AD_CRD_ACQUIRED1.TGR10": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 129, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected. Extension not used by ICX.", "Umask": "bxxxxx1xx", }, "M2PCIe.AG0_AD_CRD_ACQUIRED1.TGR9": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 129, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected. Extension not used by ICX.", "Umask": "bxxxxxx1x", }, "M2PCIe.AG0_AD_CRD_ACQUIRED1.TGR8": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 129, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected. Extension not used by ICX.", "Umask": "bxxxxxxx1", }, "M2PCIe.AG0_AD_CRD_OCCUPANCY0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", }, "M2PCIe.AG0_AD_CRD_OCCUPANCY0.TGR3": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b00001000", }, "M2PCIe.AG0_AD_CRD_OCCUPANCY0.TGR7": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b10000000", }, "M2PCIe.AG0_AD_CRD_OCCUPANCY0.TGR6": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b01000000", }, "M2PCIe.AG0_AD_CRD_OCCUPANCY0.TGR5": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b00100000", }, "M2PCIe.AG0_AD_CRD_OCCUPANCY0.TGR4": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b00010000", }, "M2PCIe.AG0_AD_CRD_OCCUPANCY0.TGR2": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b00000100", }, "M2PCIe.AG0_AD_CRD_OCCUPANCY0.TGR1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b00000010", }, "M2PCIe.AG0_AD_CRD_OCCUPANCY0.TGR0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b00000001", }, "M2PCIe.AG0_AD_CRD_OCCUPANCY1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 131, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected. Extension not used by ICX.", }, "M2PCIe.AG0_AD_CRD_OCCUPANCY1.TGR9": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 131, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected. Extension not used by ICX.", "Umask": "b00000010", }, "M2PCIe.AG0_AD_CRD_OCCUPANCY1.TGR10": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 131, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected. Extension not used by ICX.", "Umask": "b00000100", }, "M2PCIe.AG0_AD_CRD_OCCUPANCY1.TGR8": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 131, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected. Extension not used by ICX.", "Umask": "b00000001", }, "M2PCIe.AG0_BL_CRD_ACQUIRED0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", }, "M2PCIe.AG0_BL_CRD_ACQUIRED0.TGR2": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.AG0_BL_CRD_ACQUIRED0.TGR1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.AG0_BL_CRD_ACQUIRED0.TGR0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.AG0_BL_CRD_ACQUIRED0.TGR7": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "b1xxxxxxx", }, "M2PCIe.AG0_BL_CRD_ACQUIRED0.TGR3": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2PCIe.AG0_BL_CRD_ACQUIRED0.TGR6": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2PCIe.AG0_BL_CRD_ACQUIRED0.TGR4": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2PCIe.AG0_BL_CRD_ACQUIRED0.TGR5": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2PCIe.AG0_BL_CRD_ACQUIRED1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 137, "ExtSel": "", "Notes": "Extension not used by ICX.", }, "M2PCIe.AG0_BL_CRD_ACQUIRED1.TGR8": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 137, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxxxx1", }, "M2PCIe.AG0_BL_CRD_ACQUIRED1.TGR9": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 137, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxxx1x", }, "M2PCIe.AG0_BL_CRD_ACQUIRED1.TGR10": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 137, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxx1xx", }, "M2PCIe.AG0_BL_CRD_OCCUPANCY0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", }, "M2PCIe.AG0_BL_CRD_OCCUPANCY0.TGR2": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "b00000100", }, "M2PCIe.AG0_BL_CRD_OCCUPANCY0.TGR0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "b00000001", }, "M2PCIe.AG0_BL_CRD_OCCUPANCY0.TGR1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "b00000010", }, "M2PCIe.AG0_BL_CRD_OCCUPANCY0.TGR6": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "b01000000", }, "M2PCIe.AG0_BL_CRD_OCCUPANCY0.TGR7": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "b10000000", }, "M2PCIe.AG0_BL_CRD_OCCUPANCY0.TGR3": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "b00001000", }, "M2PCIe.AG0_BL_CRD_OCCUPANCY0.TGR4": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "b00010000", }, "M2PCIe.AG0_BL_CRD_OCCUPANCY0.TGR5": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "b00100000", }, "M2PCIe.AG0_BL_CRD_OCCUPANCY1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 139, "ExtSel": "", "Notes": "Extension not used by ICX.", }, "M2PCIe.AG0_BL_CRD_OCCUPANCY1.TGR8": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 139, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "b00000001", }, "M2PCIe.AG0_BL_CRD_OCCUPANCY1.TGR10": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 139, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "b00000100", }, "M2PCIe.AG0_BL_CRD_OCCUPANCY1.TGR9": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 139, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "b00000010", }, "M2PCIe.AG1_AD_CRD_ACQUIRED0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", }, "M2PCIe.AG1_AD_CRD_ACQUIRED0.TGR7": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "b1xxxxxxx", }, "M2PCIe.AG1_AD_CRD_ACQUIRED0.TGR3": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxx1xxx", }, "M2PCIe.AG1_AD_CRD_ACQUIRED0.TGR6": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bx1xxxxxx", }, "M2PCIe.AG1_AD_CRD_ACQUIRED0.TGR4": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxx1xxxx", }, "M2PCIe.AG1_AD_CRD_ACQUIRED0.TGR5": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxx1xxxxx", }, "M2PCIe.AG1_AD_CRD_ACQUIRED0.TGR2": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxxx1xx", }, "M2PCIe.AG1_AD_CRD_ACQUIRED0.TGR1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxxxx1x", }, "M2PCIe.AG1_AD_CRD_ACQUIRED0.TGR0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxxxxx1", }, "M2PCIe.AG1_AD_CRD_ACQUIRED1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 133, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected. Extension not used by ICX.", }, "M2PCIe.AG1_AD_CRD_ACQUIRED1.TGR10": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 133, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected. Extension not used by ICX.", "Umask": "bxxxxx1xx", }, "M2PCIe.AG1_AD_CRD_ACQUIRED1.TGR9": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 133, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected. Extension not used by ICX.", "Umask": "bxxxxxx1x", }, "M2PCIe.AG1_AD_CRD_ACQUIRED1.TGR8": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 133, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected. Extension not used by ICX.", "Umask": "bxxxxxxx1", }, "M2PCIe.AG1_AD_CRD_OCCUPANCY0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", }, "M2PCIe.AG1_AD_CRD_OCCUPANCY0.TGR5": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b00100000", }, "M2PCIe.AG1_AD_CRD_OCCUPANCY0.TGR4": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b00010000", }, "M2PCIe.AG1_AD_CRD_OCCUPANCY0.TGR3": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b00001000", }, "M2PCIe.AG1_AD_CRD_OCCUPANCY0.TGR7": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b10000000", }, "M2PCIe.AG1_AD_CRD_OCCUPANCY0.TGR6": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b01000000", }, "M2PCIe.AG1_AD_CRD_OCCUPANCY0.TGR1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b00000010", }, "M2PCIe.AG1_AD_CRD_OCCUPANCY0.TGR0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b00000001", }, "M2PCIe.AG1_AD_CRD_OCCUPANCY0.TGR2": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b00000100", }, "M2PCIe.AG1_AD_CRD_OCCUPANCY1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 135, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected. Extension not used by ICX.", }, "M2PCIe.AG1_AD_CRD_OCCUPANCY1.TGR10": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 135, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected. Extension not used by ICX.", "Umask": "b00000100", }, "M2PCIe.AG1_AD_CRD_OCCUPANCY1.TGR9": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 135, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected. Extension not used by ICX.", "Umask": "b00000010", }, "M2PCIe.AG1_AD_CRD_OCCUPANCY1.TGR8": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 135, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected. Extension not used by ICX.", "Umask": "b00000001", }, "M2PCIe.AG1_BL_CRD_ACQUIRED0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", }, "M2PCIe.AG1_BL_CRD_ACQUIRED0.TGR6": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2PCIe.AG1_BL_CRD_ACQUIRED0.TGR7": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "b1xxxxxxx", }, "M2PCIe.AG1_BL_CRD_ACQUIRED0.TGR3": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2PCIe.AG1_BL_CRD_ACQUIRED0.TGR5": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2PCIe.AG1_BL_CRD_ACQUIRED0.TGR4": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2PCIe.AG1_BL_CRD_ACQUIRED0.TGR2": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.AG1_BL_CRD_ACQUIRED0.TGR0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.AG1_BL_CRD_ACQUIRED0.TGR1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.AG1_BL_CRD_ACQUIRED1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 141, "ExtSel": "", "Notes": "Extension not used by ICX.", }, "M2PCIe.AG1_BL_CRD_ACQUIRED1.TGR8": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 141, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxxxx1", }, "M2PCIe.AG1_BL_CRD_ACQUIRED1.TGR10": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 141, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxx1xx", }, "M2PCIe.AG1_BL_CRD_ACQUIRED1.TGR9": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 141, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxxx1x", }, "M2PCIe.AG1_BL_CRD_OCCUPANCY0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", }, "M2PCIe.AG1_BL_CRD_OCCUPANCY0.TGR2": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "b00000100", }, "M2PCIe.AG1_BL_CRD_OCCUPANCY0.TGR0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "b00000001", }, "M2PCIe.AG1_BL_CRD_OCCUPANCY0.TGR1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "b00000010", }, "M2PCIe.AG1_BL_CRD_OCCUPANCY0.TGR6": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "b01000000", }, "M2PCIe.AG1_BL_CRD_OCCUPANCY0.TGR3": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "b00001000", }, "M2PCIe.AG1_BL_CRD_OCCUPANCY0.TGR7": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "b10000000", }, "M2PCIe.AG1_BL_CRD_OCCUPANCY0.TGR5": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "b00100000", }, "M2PCIe.AG1_BL_CRD_OCCUPANCY0.TGR4": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "b00010000", }, "M2PCIe.AG1_BL_CRD_OCCUPANCY1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 143, "ExtSel": "", "Notes": "Extension not used by ICX.", }, "M2PCIe.AG1_BL_CRD_OCCUPANCY1.TGR8": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 143, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "b00000001", }, "M2PCIe.AG1_BL_CRD_OCCUPANCY1.TGR10": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 143, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "b00000100", }, "M2PCIe.AG1_BL_CRD_OCCUPANCY1.TGR9": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 143, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "b00000010", }, "M2PCIe.CLOCKTICKS": { "Box": "M2PCIe", "Category": "M2PCIe UCLK Events", "Counters": "0-3", "Defn": "Counts the number of uclks in the M3 uclk domain. This could be slightly different than the count in the Ubox because of enable/freeze delays. However, because the M3 is close to the Ubox, they generally should not diverge by more than a handful of cycles.", "Desc": "Clockticks of the mesh to PCI (M2P)", "EvSel": 1, "ExtSel": "", }, "M2PCIe.CMS_CLOCKTICKS": { "Box": "M2PCIe", "Category": "M2PCIe Misc Events", "Desc": "CMS Clockticks", "EvSel": 192, "ExtSel": "", }, "M2PCIe.DISTRESS_ASSERTED": { "Box": "M2PCIe", "Category": "M2PCIe Horizontal RING Events", "Defn": "Counts the number of cycles either the local or incoming distress signals are asserted.", "Desc": "Distress signal asserted", "EvSel": 175, "ExtSel": "", }, "M2PCIe.DISTRESS_ASSERTED.DPT_STALL_IV": { "Box": "M2PCIe", "Category": "M2PCIe Horizontal RING Events", "Defn": "Counts the number of cycles either the local or incoming distress signals are asserted.", "Desc": "Distress signal asserted", "EvSel": 175, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2PCIe.DISTRESS_ASSERTED.PMM_LOCAL": { "Box": "M2PCIe", "Category": "M2PCIe Horizontal RING Events", "Defn": "Counts the number of cycles either the local or incoming distress signals are asserted.", "Desc": "Distress signal asserted", "EvSel": 175, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2PCIe.DISTRESS_ASSERTED.DPT_STALL_NOCRD": { "Box": "M2PCIe", "Category": "M2PCIe Horizontal RING Events", "Defn": "Counts the number of cycles either the local or incoming distress signals are asserted.", "Desc": "Distress signal asserted", "EvSel": 175, "ExtSel": "", "Umask": "b1xxxxxxx", }, "M2PCIe.DISTRESS_ASSERTED.DPT_LOCAL": { "Box": "M2PCIe", "Category": "M2PCIe Horizontal RING Events", "Defn": "Counts the number of cycles either the local or incoming distress signals are asserted.", "Desc": "Distress signal asserted", "EvSel": 175, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.DISTRESS_ASSERTED.HORZ": { "Box": "M2PCIe", "Category": "M2PCIe Horizontal RING Events", "Defn": "Counts the number of cycles either the local or incoming distress signals are asserted.", "Desc": "Distress signal asserted", "EvSel": 175, "ExtSel": "", "Umask": "b00000010", }, "M2PCIe.DISTRESS_ASSERTED.VERT": { "Box": "M2PCIe", "Category": "M2PCIe Horizontal RING Events", "Defn": "Counts the number of cycles either the local or incoming distress signals are asserted.", "Desc": "Distress signal asserted", "EvSel": 175, "ExtSel": "", "Umask": "b00000001", }, "M2PCIe.DISTRESS_ASSERTED.DPT_NONLOCAL": { "Box": "M2PCIe", "Category": "M2PCIe Horizontal RING Events", "Defn": "Counts the number of cycles either the local or incoming distress signals are asserted.", "Desc": "Distress signal asserted", "EvSel": 175, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2PCIe.DISTRESS_ASSERTED.PMM_NONLOCAL": { "Box": "M2PCIe", "Category": "M2PCIe Horizontal RING Events", "Defn": "Counts the number of cycles either the local or incoming distress signals are asserted.", "Desc": "Distress signal asserted", "EvSel": 175, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2PCIe.EGRESS_ORDERING": { "Box": "M2PCIe", "Category": "M2PCIe Horizontal In Use RING Events", "Defn": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements", "Desc": "Egress Blocking due to Ordering requirements", "EvSel": 186, "ExtSel": "", }, "M2PCIe.EGRESS_ORDERING.IV_SNOOPGO_DN": { "Box": "M2PCIe", "Category": "M2PCIe Horizontal In Use RING Events", "Defn": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements", "Desc": "Egress Blocking due to Ordering requirements", "EvSel": 186, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.EGRESS_ORDERING.IV_SNOOPGO_UP": { "Box": "M2PCIe", "Category": "M2PCIe Horizontal In Use RING Events", "Defn": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements", "Desc": "Egress Blocking due to Ordering requirements", "EvSel": 186, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.HORZ_RING_AD_IN_USE": { "Box": "M2PCIe", "Category": "M2PCIe Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AD Ring In Use", "EvSel": 182, "ExtSel": "", }, "M2PCIe.HORZ_RING_AD_IN_USE.RIGHT_ODD": { "Box": "M2PCIe", "Category": "M2PCIe Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AD Ring In Use", "EvSel": 182, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2PCIe.HORZ_RING_AD_IN_USE.RIGHT_EVEN": { "Box": "M2PCIe", "Category": "M2PCIe Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AD Ring In Use", "EvSel": 182, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.HORZ_RING_AD_IN_USE.LEFT_ODD": { "Box": "M2PCIe", "Category": "M2PCIe Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AD Ring In Use", "EvSel": 182, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.HORZ_RING_AD_IN_USE.LEFT_EVEN": { "Box": "M2PCIe", "Category": "M2PCIe Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AD Ring In Use", "EvSel": 182, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.HORZ_RING_AKC_IN_USE": { "Box": "M2PCIe", "Category": "M2PCIe Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 187, "ExtSel": "", }, "M2PCIe.HORZ_RING_AKC_IN_USE.LEFT_EVEN": { "Box": "M2PCIe", "Category": "M2PCIe Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 187, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.HORZ_RING_AKC_IN_USE.LEFT_ODD": { "Box": "M2PCIe", "Category": "M2PCIe Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 187, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.HORZ_RING_AKC_IN_USE.RIGHT_EVEN": { "Box": "M2PCIe", "Category": "M2PCIe Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 187, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.HORZ_RING_AKC_IN_USE.RIGHT_ODD": { "Box": "M2PCIe", "Category": "M2PCIe Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 187, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2PCIe.HORZ_RING_AK_IN_USE": { "Box": "M2PCIe", "Category": "M2PCIe Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 183, "ExtSel": "", }, "M2PCIe.HORZ_RING_AK_IN_USE.LEFT_EVEN": { "Box": "M2PCIe", "Category": "M2PCIe Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 183, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.HORZ_RING_AK_IN_USE.LEFT_ODD": { "Box": "M2PCIe", "Category": "M2PCIe Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 183, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.HORZ_RING_AK_IN_USE.RIGHT_EVEN": { "Box": "M2PCIe", "Category": "M2PCIe Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 183, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.HORZ_RING_AK_IN_USE.RIGHT_ODD": { "Box": "M2PCIe", "Category": "M2PCIe Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 183, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2PCIe.HORZ_RING_BL_IN_USE": { "Box": "M2PCIe", "Category": "M2PCIe Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal BL Ring in Use", "EvSel": 184, "ExtSel": "", }, "M2PCIe.HORZ_RING_BL_IN_USE.RIGHT_ODD": { "Box": "M2PCIe", "Category": "M2PCIe Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal BL Ring in Use", "EvSel": 184, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2PCIe.HORZ_RING_BL_IN_USE.LEFT_EVEN": { "Box": "M2PCIe", "Category": "M2PCIe Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal BL Ring in Use", "EvSel": 184, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.HORZ_RING_BL_IN_USE.LEFT_ODD": { "Box": "M2PCIe", "Category": "M2PCIe Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal BL Ring in Use", "EvSel": 184, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.HORZ_RING_BL_IN_USE.RIGHT_EVEN": { "Box": "M2PCIe", "Category": "M2PCIe Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal BL Ring in Use", "EvSel": 184, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.HORZ_RING_IV_IN_USE": { "Box": "M2PCIe", "Category": "M2PCIe Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "Horizontal IV Ring in Use", "EvSel": 185, "ExtSel": "", }, "M2PCIe.HORZ_RING_IV_IN_USE.RIGHT": { "Box": "M2PCIe", "Category": "M2PCIe Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "Horizontal IV Ring in Use", "EvSel": 185, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.HORZ_RING_IV_IN_USE.LEFT": { "Box": "M2PCIe", "Category": "M2PCIe Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "Horizontal IV Ring in Use", "EvSel": 185, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.IIO_CREDITS_ACQUIRED": { "Box": "M2PCIe", "Category": "M2PCIe IIO_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).", "Desc": "M2PCIe IIO Credit Acquired", "EvSel": 51, "ExtSel": "", }, "M2PCIe.IIO_CREDITS_ACQUIRED.NCB_0": { "Box": "M2PCIe", "Category": "M2PCIe IIO_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).", "Desc": "M2PCIe IIO Credit Acquired", "EvSel": 51, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.IIO_CREDITS_ACQUIRED.NCB_1": { "Box": "M2PCIe", "Category": "M2PCIe IIO_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).", "Desc": "M2PCIe IIO Credit Acquired", "EvSel": 51, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2PCIe.IIO_CREDITS_ACQUIRED.NCS_0": { "Box": "M2PCIe", "Category": "M2PCIe IIO_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).", "Desc": "M2PCIe IIO Credit Acquired", "EvSel": 51, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2PCIe.IIO_CREDITS_ACQUIRED.DRS_0": { "Box": "M2PCIe", "Category": "M2PCIe IIO_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).", "Desc": "M2PCIe IIO Credit Acquired", "EvSel": 51, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.IIO_CREDITS_ACQUIRED.NCS_1": { "Box": "M2PCIe", "Category": "M2PCIe IIO_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).", "Desc": "M2PCIe IIO Credit Acquired", "EvSel": 51, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2PCIe.IIO_CREDITS_ACQUIRED.DRS_1": { "Box": "M2PCIe", "Category": "M2PCIe IIO_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).", "Desc": "M2PCIe IIO Credit Acquired", "EvSel": 51, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.IIO_CREDITS_REJECT": { "Box": "M2PCIe", "Category": "M2PCIe IIO_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of times that a request pending in the BL Ingress attempted to acquire either a NCB or NCS credit to transmit into the IIO, but was rejected because no credits were available. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).", "Desc": "M2PCIe IIO Failed to Acquire a Credit", "EvSel": 52, "ExtSel": "", }, "M2PCIe.IIO_CREDITS_REJECT.DRS": { "Box": "M2PCIe", "Category": "M2PCIe IIO_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of times that a request pending in the BL Ingress attempted to acquire either a NCB or NCS credit to transmit into the IIO, but was rejected because no credits were available. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).", "Desc": "M2PCIe IIO Failed to Acquire a Credit", "EvSel": 52, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2PCIe.IIO_CREDITS_REJECT.NCS": { "Box": "M2PCIe", "Category": "M2PCIe IIO_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of times that a request pending in the BL Ingress attempted to acquire either a NCB or NCS credit to transmit into the IIO, but was rejected because no credits were available. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).", "Desc": "M2PCIe IIO Failed to Acquire a Credit", "EvSel": 52, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2PCIe.IIO_CREDITS_REJECT.NCB": { "Box": "M2PCIe", "Category": "M2PCIe IIO_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of times that a request pending in the BL Ingress attempted to acquire either a NCB or NCS credit to transmit into the IIO, but was rejected because no credits were available. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).", "Desc": "M2PCIe IIO Failed to Acquire a Credit", "EvSel": 52, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2PCIe.IIO_CREDITS_USED": { "Box": "M2PCIe", "Category": "M2PCIe IIO_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).", "Desc": "M2PCIe IIO Credits in Use", "EvSel": 50, "ExtSel": "", }, "M2PCIe.IIO_CREDITS_USED.DRS_0": { "Box": "M2PCIe", "Category": "M2PCIe IIO_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).", "Desc": "M2PCIe IIO Credits in Use", "EvSel": 50, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.IIO_CREDITS_USED.NCS_0": { "Box": "M2PCIe", "Category": "M2PCIe IIO_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).", "Desc": "M2PCIe IIO Credits in Use", "EvSel": 50, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2PCIe.IIO_CREDITS_USED.DRS_1": { "Box": "M2PCIe", "Category": "M2PCIe IIO_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).", "Desc": "M2PCIe IIO Credits in Use", "EvSel": 50, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.IIO_CREDITS_USED.NCS_1": { "Box": "M2PCIe", "Category": "M2PCIe IIO_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).", "Desc": "M2PCIe IIO Credits in Use", "EvSel": 50, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2PCIe.IIO_CREDITS_USED.NCB_0": { "Box": "M2PCIe", "Category": "M2PCIe IIO_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).", "Desc": "M2PCIe IIO Credits in Use", "EvSel": 50, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.IIO_CREDITS_USED.NCB_1": { "Box": "M2PCIe", "Category": "M2PCIe IIO_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).", "Desc": "M2PCIe IIO Credits in Use", "EvSel": 50, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2PCIe.LOCAL_DED_P2P_CRD_TAKEN_0": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local Dedicated P2P Credit Taken - 0", "EvSel": 70, "ExtSel": "", "MaxIncCyc": 3, }, "M2PCIe.LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF0_NCB": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local Dedicated P2P Credit Taken - 0", "EvSel": 70, "ExtSel": "", "MaxIncCyc": 3, "Umask": "bxxxxxxx1", }, "M2PCIe.LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF0_NCS": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local Dedicated P2P Credit Taken - 0", "EvSel": 70, "ExtSel": "", "MaxIncCyc": 3, "Umask": "bxxxxxx1x", }, "M2PCIe.LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF2_NCB": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local Dedicated P2P Credit Taken - 0", "EvSel": 70, "ExtSel": "", "MaxIncCyc": 3, "Umask": "bxxx1xxxx", }, "M2PCIe.LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF3_NCS": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local Dedicated P2P Credit Taken - 0", "EvSel": 70, "ExtSel": "", "MaxIncCyc": 3, "Umask": "b1xxxxxxx", }, "M2PCIe.LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF3_NCB": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local Dedicated P2P Credit Taken - 0", "EvSel": 70, "ExtSel": "", "MaxIncCyc": 3, "Umask": "bx1xxxxxx", }, "M2PCIe.LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF2_NCS": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local Dedicated P2P Credit Taken - 0", "EvSel": 70, "ExtSel": "", "MaxIncCyc": 3, "Umask": "bxx1xxxxx", }, "M2PCIe.LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF1_NCB": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local Dedicated P2P Credit Taken - 0", "EvSel": 70, "ExtSel": "", "MaxIncCyc": 3, "Umask": "bxxxxx1xx", }, "M2PCIe.LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF1_NCS": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local Dedicated P2P Credit Taken - 0", "EvSel": 70, "ExtSel": "", "MaxIncCyc": 3, "Umask": "bxxxx1xxx", }, "M2PCIe.LOCAL_DED_P2P_CRD_TAKEN_1": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local Dedicated P2P Credit Taken - 1", "EvSel": 71, "ExtSel": "", }, "M2PCIe.LOCAL_DED_P2P_CRD_TAKEN_1.M2IOSF5_NCS": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local Dedicated P2P Credit Taken - 1", "EvSel": 71, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2PCIe.LOCAL_DED_P2P_CRD_TAKEN_1.M2IOSF4_NCB": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local Dedicated P2P Credit Taken - 1", "EvSel": 71, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.LOCAL_DED_P2P_CRD_TAKEN_1.M2IOSF5_NCB": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local Dedicated P2P Credit Taken - 1", "EvSel": 71, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.LOCAL_DED_P2P_CRD_TAKEN_1.M2IOSF4_NCS": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local Dedicated P2P Credit Taken - 1", "EvSel": 71, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.LOCAL_P2P_DED_RETURNED_0": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local P2P Dedicated Credits Returned - 0", "EvSel": 25, "ExtSel": "", }, "M2PCIe.LOCAL_P2P_DED_RETURNED_0.MS2IOSF0_NCB": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local P2P Dedicated Credits Returned - 0", "EvSel": 25, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.LOCAL_P2P_DED_RETURNED_0.MS2IOSF0_NCS": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local P2P Dedicated Credits Returned - 0", "EvSel": 25, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.LOCAL_P2P_DED_RETURNED_0.MS2IOSF1_NCB": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local P2P Dedicated Credits Returned - 0", "EvSel": 25, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.LOCAL_P2P_DED_RETURNED_0.MS2IOSF3_NCS": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local P2P Dedicated Credits Returned - 0", "EvSel": 25, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2PCIe.LOCAL_P2P_DED_RETURNED_0.MS2IOSF2_NCS": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local P2P Dedicated Credits Returned - 0", "EvSel": 25, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2PCIe.LOCAL_P2P_DED_RETURNED_0.MS2IOSF1_NCS": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local P2P Dedicated Credits Returned - 0", "EvSel": 25, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2PCIe.LOCAL_P2P_DED_RETURNED_0.MS2IOSF2_NCB": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local P2P Dedicated Credits Returned - 0", "EvSel": 25, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2PCIe.LOCAL_P2P_DED_RETURNED_0.MS2IOSF3_NCB": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local P2P Dedicated Credits Returned - 0", "EvSel": 25, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2PCIe.LOCAL_P2P_DED_RETURNED_1": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local P2P Dedicated Credits Returned - 1", "EvSel": 26, "ExtSel": "", }, "M2PCIe.LOCAL_P2P_DED_RETURNED_1.MS2IOSF4_NCS": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local P2P Dedicated Credits Returned - 1", "EvSel": 26, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.LOCAL_P2P_DED_RETURNED_1.MS2IOSF5_NCS": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local P2P Dedicated Credits Returned - 1", "EvSel": 26, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2PCIe.LOCAL_P2P_DED_RETURNED_1.MS2IOSF4_NCB": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local P2P Dedicated Credits Returned - 1", "EvSel": 26, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.LOCAL_P2P_DED_RETURNED_1.MS2IOSF5_NCB": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local P2P Dedicated Credits Returned - 1", "EvSel": 26, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.LOCAL_P2P_SHAR_RETURNED": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local P2P Shared Credits Returned", "EvSel": 23, "ExtSel": "", "MaxIncCyc": 3, }, "M2PCIe.LOCAL_P2P_SHAR_RETURNED.AGENT_0": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local P2P Shared Credits Returned", "EvSel": 23, "ExtSel": "", "MaxIncCyc": 3, "Umask": "bxxxxxxx1", }, "M2PCIe.LOCAL_P2P_SHAR_RETURNED.AGENT_2": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local P2P Shared Credits Returned", "EvSel": 23, "ExtSel": "", "MaxIncCyc": 3, "Umask": "bxxxxx1xx", }, "M2PCIe.LOCAL_P2P_SHAR_RETURNED.AGENT_1": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local P2P Shared Credits Returned", "EvSel": 23, "ExtSel": "", "MaxIncCyc": 3, "Umask": "bxxxxxx1x", }, "M2PCIe.LOCAL_SHAR_P2P_CRD_RETURNED": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local Shared P2P Credit Returned to credit ring", "EvSel": 68, "ExtSel": "", }, "M2PCIe.LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_3": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local Shared P2P Credit Returned to credit ring", "EvSel": 68, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2PCIe.LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_2": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local Shared P2P Credit Returned to credit ring", "EvSel": 68, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_1": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local Shared P2P Credit Returned to credit ring", "EvSel": 68, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_4": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local Shared P2P Credit Returned to credit ring", "EvSel": 68, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2PCIe.LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_5": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local Shared P2P Credit Returned to credit ring", "EvSel": 68, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2PCIe.LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_0": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local Shared P2P Credit Returned to credit ring", "EvSel": 68, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.LOCAL_SHAR_P2P_CRD_TAKEN_0": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local Shared P2P Credit Taken - 0", "EvSel": 64, "ExtSel": "", }, "M2PCIe.LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF0_NCS": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local Shared P2P Credit Taken - 0", "EvSel": 64, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF0_NCB": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local Shared P2P Credit Taken - 0", "EvSel": 64, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF2_NCS": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local Shared P2P Credit Taken - 0", "EvSel": 64, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2PCIe.LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF3_NCB": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local Shared P2P Credit Taken - 0", "EvSel": 64, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2PCIe.LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF2_NCB": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local Shared P2P Credit Taken - 0", "EvSel": 64, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2PCIe.LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF3_NCS": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local Shared P2P Credit Taken - 0", "EvSel": 64, "ExtSel": "", "Umask": "b1xxxxxxx", }, "M2PCIe.LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF1_NCS": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local Shared P2P Credit Taken - 0", "EvSel": 64, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2PCIe.LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF1_NCB": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local Shared P2P Credit Taken - 0", "EvSel": 64, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.LOCAL_SHAR_P2P_CRD_TAKEN_1": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local Shared P2P Credit Taken - 1", "EvSel": 65, "ExtSel": "", }, "M2PCIe.LOCAL_SHAR_P2P_CRD_TAKEN_1.M2IOSF4_NCB": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local Shared P2P Credit Taken - 1", "EvSel": 65, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.LOCAL_SHAR_P2P_CRD_TAKEN_1.M2IOSF5_NCS": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local Shared P2P Credit Taken - 1", "EvSel": 65, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2PCIe.LOCAL_SHAR_P2P_CRD_TAKEN_1.M2IOSF5_NCB": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local Shared P2P Credit Taken - 1", "EvSel": 65, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.LOCAL_SHAR_P2P_CRD_TAKEN_1.M2IOSF4_NCS": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Local Shared P2P Credit Taken - 1", "EvSel": 65, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.LOCAL_SHAR_P2P_CRD_WAIT_0": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Waiting on Local Shared P2P Credit - 0", "EvSel": 74, "ExtSel": "", }, "M2PCIe.LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF0_NCS": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Waiting on Local Shared P2P Credit - 0", "EvSel": 74, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF0_NCB": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Waiting on Local Shared P2P Credit - 0", "EvSel": 74, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF3_NCB": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Waiting on Local Shared P2P Credit - 0", "EvSel": 74, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2PCIe.LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF2_NCS": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Waiting on Local Shared P2P Credit - 0", "EvSel": 74, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2PCIe.LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF3_NCS": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Waiting on Local Shared P2P Credit - 0", "EvSel": 74, "ExtSel": "", "Umask": "b1xxxxxxx", }, "M2PCIe.LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF2_NCB": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Waiting on Local Shared P2P Credit - 0", "EvSel": 74, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2PCIe.LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF1_NCS": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Waiting on Local Shared P2P Credit - 0", "EvSel": 74, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2PCIe.LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF1_NCB": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Waiting on Local Shared P2P Credit - 0", "EvSel": 74, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.LOCAL_SHAR_P2P_CRD_WAIT_1": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Waiting on Local Shared P2P Credit - 1", "EvSel": 75, "ExtSel": "", }, "M2PCIe.LOCAL_SHAR_P2P_CRD_WAIT_1.M2IOSF4_NCB": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Waiting on Local Shared P2P Credit - 1", "EvSel": 75, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.LOCAL_SHAR_P2P_CRD_WAIT_1.M2IOSF5_NCS": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Waiting on Local Shared P2P Credit - 1", "EvSel": 75, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2PCIe.LOCAL_SHAR_P2P_CRD_WAIT_1.M2IOSF4_NCS": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Waiting on Local Shared P2P Credit - 1", "EvSel": 75, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.LOCAL_SHAR_P2P_CRD_WAIT_1.M2IOSF5_NCB": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Waiting on Local Shared P2P Credit - 1", "EvSel": 75, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.MISC_EXTERNAL": { "Box": "M2PCIe", "Category": "M2PCIe External Misc Events (eg. From MS2IDI)", "Desc": "Miscellaneous Events (mostly from MS2IDI)", "EvSel": 230, "ExtSel": "", "Notes": "ONLY relevant to the CHA's CMS", }, "M2PCIe.MISC_EXTERNAL.MBE_INST1": { "Box": "M2PCIe", "Category": "M2PCIe External Misc Events (eg. From MS2IDI)", "Desc": "Miscellaneous Events (mostly from MS2IDI)", "EvSel": 230, "ExtSel": "", "Notes": "ONLY relevant to the CHA's CMS", "Umask": "bxxxxxx1x", }, "M2PCIe.MISC_EXTERNAL.MBE_INST0": { "Box": "M2PCIe", "Category": "M2PCIe External Misc Events (eg. From MS2IDI)", "Desc": "Miscellaneous Events (mostly from MS2IDI)", "EvSel": 230, "ExtSel": "", "Notes": "ONLY relevant to the CHA's CMS", "Umask": "bxxxxxxx1", }, "M2PCIe.P2P_CRD_OCCUPANCY": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS P2P Credit Events", "Counters": "0-1", "Desc": "P2P Credit Occupancy", "EvSel": 20, "ExtSel": "", "MaxIncCyc": 127, }, "M2PCIe.P2P_CRD_OCCUPANCY.ALL": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS P2P Credit Events", "Counters": "0-1", "Desc": "P2P Credit Occupancy", "EvSel": 20, "ExtSel": "", "MaxIncCyc": 127, "Umask": "bxxx1xxxx", }, "M2PCIe.P2P_CRD_OCCUPANCY.REMOTE_NCS": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS P2P Credit Events", "Counters": "0-1", "Desc": "P2P Credit Occupancy", "EvSel": 20, "ExtSel": "", "MaxIncCyc": 127, "Umask": "bxxxx1xxx", }, "M2PCIe.P2P_CRD_OCCUPANCY.LOCAL_NCB": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS P2P Credit Events", "Counters": "0-1", "Desc": "P2P Credit Occupancy", "EvSel": 20, "ExtSel": "", "MaxIncCyc": 127, "Umask": "bxxxxxxx1", }, "M2PCIe.P2P_CRD_OCCUPANCY.REMOTE_NCB": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS P2P Credit Events", "Counters": "0-1", "Desc": "P2P Credit Occupancy", "EvSel": 20, "ExtSel": "", "MaxIncCyc": 127, "Umask": "bxxxxx1xx", }, "M2PCIe.P2P_CRD_OCCUPANCY.LOCAL_NCS": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS P2P Credit Events", "Counters": "0-1", "Desc": "P2P Credit Occupancy", "EvSel": 20, "ExtSel": "", "MaxIncCyc": 127, "Umask": "bxxxxxx1x", }, "M2PCIe.P2P_DED_RECEIVED": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Dedicated Credits Received", "EvSel": 22, "ExtSel": "", }, "M2PCIe.P2P_DED_RECEIVED.LOCAL_NCS": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Dedicated Credits Received", "EvSel": 22, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.P2P_DED_RECEIVED.REMOTE_NCB": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Dedicated Credits Received", "EvSel": 22, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.P2P_DED_RECEIVED.LOCAL_NCB": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Dedicated Credits Received", "EvSel": 22, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.P2P_DED_RECEIVED.ALL": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Dedicated Credits Received", "EvSel": 22, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2PCIe.P2P_DED_RECEIVED.REMOTE_NCS": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Dedicated Credits Received", "EvSel": 22, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2PCIe.P2P_SHAR_RECEIVED": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Shared Credits Received", "EvSel": 21, "ExtSel": "", }, "M2PCIe.P2P_SHAR_RECEIVED.REMOTE_NCB": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Shared Credits Received", "EvSel": 21, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.P2P_SHAR_RECEIVED.LOCAL_NCS": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Shared Credits Received", "EvSel": 21, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.P2P_SHAR_RECEIVED.REMOTE_NCS": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Shared Credits Received", "EvSel": 21, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2PCIe.P2P_SHAR_RECEIVED.ALL": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Shared Credits Received", "EvSel": 21, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2PCIe.P2P_SHAR_RECEIVED.LOCAL_NCB": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Shared Credits Received", "EvSel": 21, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.REMOTE_DED_P2P_CRD_TAKEN_0": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Remote Dedicated P2P Credit Taken - 0", "EvSel": 72, "ExtSel": "", }, "M2PCIe.REMOTE_DED_P2P_CRD_TAKEN_0.UPI0_NCB": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Remote Dedicated P2P Credit Taken - 0", "EvSel": 72, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.REMOTE_DED_P2P_CRD_TAKEN_0.UPI0_DRS": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Remote Dedicated P2P Credit Taken - 0", "EvSel": 72, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.REMOTE_DED_P2P_CRD_TAKEN_0.UPI1_NCB": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Remote Dedicated P2P Credit Taken - 0", "EvSel": 72, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2PCIe.REMOTE_DED_P2P_CRD_TAKEN_0.UPI1_DRS": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Remote Dedicated P2P Credit Taken - 0", "EvSel": 72, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2PCIe.REMOTE_DED_P2P_CRD_TAKEN_0.UPI0_NCS": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Remote Dedicated P2P Credit Taken - 0", "EvSel": 72, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.REMOTE_DED_P2P_CRD_TAKEN_0.UPI1_NCS": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Remote Dedicated P2P Credit Taken - 0", "EvSel": 72, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2PCIe.REMOTE_DED_P2P_CRD_TAKEN_1": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Remote Dedicated P2P Credit Taken - 1", "EvSel": 73, "ExtSel": "", }, "M2PCIe.REMOTE_DED_P2P_CRD_TAKEN_1.UPI2_NCS": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Remote Dedicated P2P Credit Taken - 1", "EvSel": 73, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.REMOTE_DED_P2P_CRD_TAKEN_1.UPI2_DRS": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Remote Dedicated P2P Credit Taken - 1", "EvSel": 73, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.REMOTE_DED_P2P_CRD_TAKEN_1.UPI2_NCB": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Remote Dedicated P2P Credit Taken - 1", "EvSel": 73, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.REMOTE_P2P_DED_RETURNED": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Remote P2P Dedicated Credits Returned", "EvSel": 27, "ExtSel": "", }, "M2PCIe.REMOTE_P2P_DED_RETURNED.UPI2_NCB": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Remote P2P Dedicated Credits Returned", "EvSel": 27, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2PCIe.REMOTE_P2P_DED_RETURNED.UPI1_NCS": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Remote P2P Dedicated Credits Returned", "EvSel": 27, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2PCIe.REMOTE_P2P_DED_RETURNED.UPI0_NCS": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Remote P2P Dedicated Credits Returned", "EvSel": 27, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.REMOTE_P2P_DED_RETURNED.UPI1_NCB": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Remote P2P Dedicated Credits Returned", "EvSel": 27, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.REMOTE_P2P_DED_RETURNED.UPI0_NCB": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Remote P2P Dedicated Credits Returned", "EvSel": 27, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.REMOTE_P2P_DED_RETURNED.UPI2_NCS": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Remote P2P Dedicated Credits Returned", "EvSel": 27, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2PCIe.REMOTE_P2P_SHAR_RETURNED": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Remote P2P Shared Credits Returned", "EvSel": 24, "ExtSel": "", "MaxIncCyc": 3, }, "M2PCIe.REMOTE_P2P_SHAR_RETURNED.AGENT_1": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Remote P2P Shared Credits Returned", "EvSel": 24, "ExtSel": "", "MaxIncCyc": 3, "Umask": "bxxxxxx1x", }, "M2PCIe.REMOTE_P2P_SHAR_RETURNED.AGENT_2": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Remote P2P Shared Credits Returned", "EvSel": 24, "ExtSel": "", "MaxIncCyc": 3, "Umask": "bxxxxx1xx", }, "M2PCIe.REMOTE_P2P_SHAR_RETURNED.AGENT_0": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Remote P2P Shared Credits Returned", "EvSel": 24, "ExtSel": "", "MaxIncCyc": 3, "Umask": "bxxxxxxx1", }, "M2PCIe.REMOTE_SHAR_P2P_CRD_RETURNED": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Remote Shared P2P Credit Returned to credit ring", "EvSel": 69, "ExtSel": "", "MaxIncCyc": 3, }, "M2PCIe.REMOTE_SHAR_P2P_CRD_RETURNED.AGENT_2": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Remote Shared P2P Credit Returned to credit ring", "EvSel": 69, "ExtSel": "", "MaxIncCyc": 3, "Umask": "bxxxxx1xx", }, "M2PCIe.REMOTE_SHAR_P2P_CRD_RETURNED.AGENT_0": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Remote Shared P2P Credit Returned to credit ring", "EvSel": 69, "ExtSel": "", "MaxIncCyc": 3, "Umask": "bxxxxxxx1", }, "M2PCIe.REMOTE_SHAR_P2P_CRD_RETURNED.AGENT_1": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Remote Shared P2P Credit Returned to credit ring", "EvSel": 69, "ExtSel": "", "MaxIncCyc": 3, "Umask": "bxxxxxx1x", }, "M2PCIe.REMOTE_SHAR_P2P_CRD_TAKEN_0": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Remote Shared P2P Credit Taken - 0", "EvSel": 66, "ExtSel": "", }, "M2PCIe.REMOTE_SHAR_P2P_CRD_TAKEN_0.UPI0_NCB": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Remote Shared P2P Credit Taken - 0", "EvSel": 66, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.REMOTE_SHAR_P2P_CRD_TAKEN_0.UPI0_DRS": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Remote Shared P2P Credit Taken - 0", "EvSel": 66, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.REMOTE_SHAR_P2P_CRD_TAKEN_0.UPI1_DRS": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Remote Shared P2P Credit Taken - 0", "EvSel": 66, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2PCIe.REMOTE_SHAR_P2P_CRD_TAKEN_0.UPI1_NCB": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Remote Shared P2P Credit Taken - 0", "EvSel": 66, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2PCIe.REMOTE_SHAR_P2P_CRD_TAKEN_0.UPI0_NCS": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Remote Shared P2P Credit Taken - 0", "EvSel": 66, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.REMOTE_SHAR_P2P_CRD_TAKEN_0.UPI1_NCS": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Remote Shared P2P Credit Taken - 0", "EvSel": 66, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2PCIe.REMOTE_SHAR_P2P_CRD_TAKEN_1": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Remote Shared P2P Credit Taken - 1", "EvSel": 67, "ExtSel": "", }, "M2PCIe.REMOTE_SHAR_P2P_CRD_TAKEN_1.UPI2_DRS": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Remote Shared P2P Credit Taken - 1", "EvSel": 67, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.REMOTE_SHAR_P2P_CRD_TAKEN_1.UPI2_NCB": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Remote Shared P2P Credit Taken - 1", "EvSel": 67, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.REMOTE_SHAR_P2P_CRD_TAKEN_1.UPI2_NCS": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Remote Shared P2P Credit Taken - 1", "EvSel": 67, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.REMOTE_SHAR_P2P_CRD_WAIT_0": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Waiting on Remote Shared P2P Credit - 0", "EvSel": 76, "ExtSel": "", }, "M2PCIe.REMOTE_SHAR_P2P_CRD_WAIT_0.UPI1_DRS": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Waiting on Remote Shared P2P Credit - 0", "EvSel": 76, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2PCIe.REMOTE_SHAR_P2P_CRD_WAIT_0.UPI1_NCB": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Waiting on Remote Shared P2P Credit - 0", "EvSel": 76, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2PCIe.REMOTE_SHAR_P2P_CRD_WAIT_0.UPI0_NCB": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Waiting on Remote Shared P2P Credit - 0", "EvSel": 76, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.REMOTE_SHAR_P2P_CRD_WAIT_0.UPI0_DRS": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Waiting on Remote Shared P2P Credit - 0", "EvSel": 76, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.REMOTE_SHAR_P2P_CRD_WAIT_0.UPI1_NCS": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Waiting on Remote Shared P2P Credit - 0", "EvSel": 76, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2PCIe.REMOTE_SHAR_P2P_CRD_WAIT_0.UPI0_NCS": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Waiting on Remote Shared P2P Credit - 0", "EvSel": 76, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.REMOTE_SHAR_P2P_CRD_WAIT_1": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Waiting on Remote Shared P2P Credit - 1", "EvSel": 77, "ExtSel": "", }, "M2PCIe.REMOTE_SHAR_P2P_CRD_WAIT_1.UPI2_NCS": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Waiting on Remote Shared P2P Credit - 1", "EvSel": 77, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.REMOTE_SHAR_P2P_CRD_WAIT_1.UPI2_DRS": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Waiting on Remote Shared P2P Credit - 1", "EvSel": 77, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.REMOTE_SHAR_P2P_CRD_WAIT_1.UPI2_NCB": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS P2P Credit Events", "Counters": "0-3", "Desc": "Waiting on Remote Shared P2P Credit - 1", "EvSel": 77, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.RING_BOUNCES_HORZ": { "Box": "M2PCIe", "Category": "M2PCIe Horizontal RING Events", "Defn": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Horizontal Ring.", "EvSel": 172, "ExtSel": "", }, "M2PCIe.RING_BOUNCES_HORZ.IV": { "Box": "M2PCIe", "Category": "M2PCIe Horizontal RING Events", "Defn": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Horizontal Ring.", "EvSel": 172, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2PCIe.RING_BOUNCES_HORZ.BL": { "Box": "M2PCIe", "Category": "M2PCIe Horizontal RING Events", "Defn": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Horizontal Ring.", "EvSel": 172, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.RING_BOUNCES_HORZ.AK": { "Box": "M2PCIe", "Category": "M2PCIe Horizontal RING Events", "Defn": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Horizontal Ring.", "EvSel": 172, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.RING_BOUNCES_HORZ.AD": { "Box": "M2PCIe", "Category": "M2PCIe Horizontal RING Events", "Defn": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Horizontal Ring.", "EvSel": 172, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.RING_BOUNCES_VERT": { "Box": "M2PCIe", "Category": "M2PCIe Vertical RING Events", "Defn": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Vertical Ring.", "EvSel": 170, "ExtSel": "", "MaxIncCyc": 2, }, "M2PCIe.RING_BOUNCES_VERT.AD": { "Box": "M2PCIe", "Category": "M2PCIe Vertical RING Events", "Defn": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Vertical Ring.", "EvSel": 170, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxxxxx1", }, "M2PCIe.RING_BOUNCES_VERT.BL": { "Box": "M2PCIe", "Category": "M2PCIe Vertical RING Events", "Defn": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Vertical Ring.", "EvSel": 170, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxxx1xx", }, "M2PCIe.RING_BOUNCES_VERT.IV": { "Box": "M2PCIe", "Category": "M2PCIe Vertical RING Events", "Defn": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Vertical Ring.", "EvSel": 170, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxx1xxx", }, "M2PCIe.RING_BOUNCES_VERT.AKC": { "Box": "M2PCIe", "Category": "M2PCIe Vertical RING Events", "Defn": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Vertical Ring.", "EvSel": 170, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxx1xxxx", }, "M2PCIe.RING_BOUNCES_VERT.AK": { "Box": "M2PCIe", "Category": "M2PCIe Vertical RING Events", "Defn": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Vertical Ring.", "EvSel": 170, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxxxx1x", }, "M2PCIe.RING_SINK_STARVED_HORZ": { "Box": "M2PCIe", "Category": "M2PCIe Horizontal RING Events", "Desc": "Sink Starvation on Horizontal Ring", "EvSel": 173, "ExtSel": "", }, "M2PCIe.RING_SINK_STARVED_HORZ.AD": { "Box": "M2PCIe", "Category": "M2PCIe Horizontal RING Events", "Desc": "Sink Starvation on Horizontal Ring", "EvSel": 173, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.RING_SINK_STARVED_HORZ.BL": { "Box": "M2PCIe", "Category": "M2PCIe Horizontal RING Events", "Desc": "Sink Starvation on Horizontal Ring", "EvSel": 173, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.RING_SINK_STARVED_HORZ.IV": { "Box": "M2PCIe", "Category": "M2PCIe Horizontal RING Events", "Desc": "Sink Starvation on Horizontal Ring", "EvSel": 173, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2PCIe.RING_SINK_STARVED_HORZ.AK": { "Box": "M2PCIe", "Category": "M2PCIe Horizontal RING Events", "Desc": "Sink Starvation on Horizontal Ring", "EvSel": 173, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.RING_SINK_STARVED_HORZ.AK_AG1": { "Box": "M2PCIe", "Category": "M2PCIe Horizontal RING Events", "Desc": "Sink Starvation on Horizontal Ring", "EvSel": 173, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2PCIe.RING_SINK_STARVED_VERT": { "Box": "M2PCIe", "Category": "M2PCIe Vertical RING Events", "Desc": "Sink Starvation on Vertical Ring", "EvSel": 171, "ExtSel": "", }, "M2PCIe.RING_SINK_STARVED_VERT.AD": { "Box": "M2PCIe", "Category": "M2PCIe Vertical RING Events", "Desc": "Sink Starvation on Vertical Ring", "EvSel": 171, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.RING_SINK_STARVED_VERT.AKC": { "Box": "M2PCIe", "Category": "M2PCIe Vertical RING Events", "Desc": "Sink Starvation on Vertical Ring", "EvSel": 171, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2PCIe.RING_SINK_STARVED_VERT.AK": { "Box": "M2PCIe", "Category": "M2PCIe Vertical RING Events", "Desc": "Sink Starvation on Vertical Ring", "EvSel": 171, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.RING_SINK_STARVED_VERT.IV": { "Box": "M2PCIe", "Category": "M2PCIe Vertical RING Events", "Desc": "Sink Starvation on Vertical Ring", "EvSel": 171, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2PCIe.RING_SINK_STARVED_VERT.BL": { "Box": "M2PCIe", "Category": "M2PCIe Vertical RING Events", "Desc": "Sink Starvation on Vertical Ring", "EvSel": 171, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.RING_SRC_THRTL": { "Box": "M2PCIe", "Category": "M2PCIe Horizontal RING Events", "Desc": "Source Throttle", "EvSel": 174, "ExtSel": "", }, "M2PCIe.RxC_CYCLES_NE": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the M2PCIe Ingress is not empty.", "Desc": "Ingress (from CMS) Queue Cycles Not Empty", "EvSel": 16, "ExtSel": "", }, "M2PCIe.RxC_CYCLES_NE.ALL": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the M2PCIe Ingress is not empty.", "Desc": "Ingress (from CMS) Queue Cycles Not Empty", "EvSel": 16, "ExtSel": "", "Umask": "b1xxxxxxx", }, "M2PCIe.RxC_CYCLES_NE.CHA_NCB": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the M2PCIe Ingress is not empty.", "Desc": "Ingress (from CMS) Queue Cycles Not Empty", "EvSel": 16, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.RxC_CYCLES_NE.IIO_NCB": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the M2PCIe Ingress is not empty.", "Desc": "Ingress (from CMS) Queue Cycles Not Empty", "EvSel": 16, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2PCIe.RxC_CYCLES_NE.CHA_IDI": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the M2PCIe Ingress is not empty.", "Desc": "Ingress (from CMS) Queue Cycles Not Empty", "EvSel": 16, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.RxC_CYCLES_NE.UPI_NCB": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the M2PCIe Ingress is not empty.", "Desc": "Ingress (from CMS) Queue Cycles Not Empty", "EvSel": 16, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2PCIe.RxC_CYCLES_NE.CHA_NCS": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the M2PCIe Ingress is not empty.", "Desc": "Ingress (from CMS) Queue Cycles Not Empty", "EvSel": 16, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.RxC_CYCLES_NE.IIO_NCS": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the M2PCIe Ingress is not empty.", "Desc": "Ingress (from CMS) Queue Cycles Not Empty", "EvSel": 16, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2PCIe.RxC_CYCLES_NE.UPI_NCS": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the M2PCIe Ingress is not empty.", "Desc": "Ingress (from CMS) Queue Cycles Not Empty", "EvSel": 16, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2PCIe.RxC_INSERTS": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS Events", "Counters": "0-3", "Defn": "Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.", "Desc": "Ingress (from CMS) Queue Inserts", "EvSel": 17, "ExtSel": "", }, "M2PCIe.RxC_INSERTS.UPI_NCB": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS Events", "Counters": "0-3", "Defn": "Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.", "Desc": "Ingress (from CMS) Queue Inserts", "EvSel": 17, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2PCIe.RxC_INSERTS.CHA_IDI": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS Events", "Counters": "0-3", "Defn": "Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.", "Desc": "Ingress (from CMS) Queue Inserts", "EvSel": 17, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.RxC_INSERTS.IIO_NCB": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS Events", "Counters": "0-3", "Defn": "Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.", "Desc": "Ingress (from CMS) Queue Inserts", "EvSel": 17, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2PCIe.RxC_INSERTS.ALL": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS Events", "Counters": "0-3", "Defn": "Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.", "Desc": "Ingress (from CMS) Queue Inserts", "EvSel": 17, "ExtSel": "", "Umask": "b1xxxxxxx", }, "M2PCIe.RxC_INSERTS.CHA_NCB": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS Events", "Counters": "0-3", "Defn": "Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.", "Desc": "Ingress (from CMS) Queue Inserts", "EvSel": 17, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.RxC_INSERTS.UPI_NCS": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS Events", "Counters": "0-3", "Defn": "Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.", "Desc": "Ingress (from CMS) Queue Inserts", "EvSel": 17, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2PCIe.RxC_INSERTS.IIO_NCS": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS Events", "Counters": "0-3", "Defn": "Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.", "Desc": "Ingress (from CMS) Queue Inserts", "EvSel": 17, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2PCIe.RxC_INSERTS.CHA_NCS": { "Box": "M2PCIe", "Category": "M2PCIe INGRESS Events", "Counters": "0-3", "Defn": "Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.", "Desc": "Ingress (from CMS) Queue Inserts", "EvSel": 17, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.RxR_BUSY_STARVED": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority", "Desc": "Transgress Injection Starvation", "EvSel": 229, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. Selecting multiple ring types NOT supported", }, "M2PCIe.RxR_BUSY_STARVED.BL_ALL": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority", "Desc": "Transgress Injection Starvation", "EvSel": 229, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. Selecting multiple ring types NOT supported", "Umask": "b01000100", }, "M2PCIe.RxR_BUSY_STARVED.AD_CRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority", "Desc": "Transgress Injection Starvation", "EvSel": 229, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. Selecting multiple ring types NOT supported", "Umask": "b00010000", }, "M2PCIe.RxR_BUSY_STARVED.AD_UNCRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority", "Desc": "Transgress Injection Starvation", "EvSel": 229, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. Selecting multiple ring types NOT supported", "Umask": "b00000001", }, "M2PCIe.RxR_BUSY_STARVED.BL_UNCRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority", "Desc": "Transgress Injection Starvation", "EvSel": 229, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. Selecting multiple ring types NOT supported", "Umask": "b00000100", }, "M2PCIe.RxR_BUSY_STARVED.BL_CRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority", "Desc": "Transgress Injection Starvation", "EvSel": 229, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. Selecting multiple ring types NOT supported", "Umask": "b01000000", }, "M2PCIe.RxR_BUSY_STARVED.AD_ALL": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority", "Desc": "Transgress Injection Starvation", "EvSel": 229, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. Selecting multiple ring types NOT supported", "Umask": "b00010001", }, "M2PCIe.RxR_BYPASS": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 226, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", }, "M2PCIe.RxR_BYPASS.IV": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 226, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00001000", }, "M2PCIe.RxR_BYPASS.AKC_UNCRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 226, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b10000000", }, "M2PCIe.RxR_BYPASS.BL_UNCRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 226, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00000100", }, "M2PCIe.RxR_BYPASS.AD_UNCRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 226, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00000001", }, "M2PCIe.RxR_BYPASS.BL_CRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 226, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b01000000", }, "M2PCIe.RxR_BYPASS.AD_ALL": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 226, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00010001", }, "M2PCIe.RxR_BYPASS.AK": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 226, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00000010", }, "M2PCIe.RxR_BYPASS.BL_ALL": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 226, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b01000100", }, "M2PCIe.RxR_BYPASS.AD_CRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 226, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00010000", }, "M2PCIe.RxR_CRD_STARVED": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 227, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", }, "M2PCIe.RxR_CRD_STARVED.BL_ALL": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 227, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "b01000100", }, "M2PCIe.RxR_CRD_STARVED.AD_CRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 227, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "b00010000", }, "M2PCIe.RxR_CRD_STARVED.BL_CRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 227, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "b01000000", }, "M2PCIe.RxR_CRD_STARVED.AK": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 227, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "b00000010", }, "M2PCIe.RxR_CRD_STARVED.AD_ALL": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 227, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "b00010001", }, "M2PCIe.RxR_CRD_STARVED.IFV": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 227, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "b10000000", }, "M2PCIe.RxR_CRD_STARVED.AD_UNCRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 227, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "b00000001", }, "M2PCIe.RxR_CRD_STARVED.BL_UNCRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 227, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "b00000100", }, "M2PCIe.RxR_CRD_STARVED.IV": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 227, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "b00001000", }, "M2PCIe.RxR_CRD_STARVED_1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 228, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", }, "M2PCIe.RxR_INSERTS": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 225, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", }, "M2PCIe.RxR_INSERTS.BL_ALL": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 225, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b01000100", }, "M2PCIe.RxR_INSERTS.AD_CRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 225, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00010000", }, "M2PCIe.RxR_INSERTS.BL_CRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 225, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b01000000", }, "M2PCIe.RxR_INSERTS.AD_ALL": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 225, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00010001", }, "M2PCIe.RxR_INSERTS.AK": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 225, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00000010", }, "M2PCIe.RxR_INSERTS.AD_UNCRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 225, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00000001", }, "M2PCIe.RxR_INSERTS.AKC_UNCRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 225, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b10000000", }, "M2PCIe.RxR_INSERTS.IV": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 225, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00001000", }, "M2PCIe.RxR_INSERTS.BL_UNCRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 225, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00000100", }, "M2PCIe.RxR_OCCUPANCY": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 224, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", }, "M2PCIe.RxR_OCCUPANCY.BL_CRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 224, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00100000", }, "M2PCIe.RxR_OCCUPANCY.AK": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 224, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00000010", }, "M2PCIe.RxR_OCCUPANCY.AD_ALL": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 224, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00010001", }, "M2PCIe.RxR_OCCUPANCY.BL_ALL": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 224, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b01000100", }, "M2PCIe.RxR_OCCUPANCY.AD_CRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 224, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00010000", }, "M2PCIe.RxR_OCCUPANCY.IV": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 224, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00001000", }, "M2PCIe.RxR_OCCUPANCY.AKC_UNCRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 224, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b10000000", }, "M2PCIe.RxR_OCCUPANCY.BL_UNCRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 224, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00000100", }, "M2PCIe.RxR_OCCUPANCY.AD_UNCRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 224, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00000001", }, "M2PCIe.STALL0_NO_TxR_HORZ_CRD_AD_AG0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", }, "M2PCIe.STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR2": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR7": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "b1xxxxxxx", }, "M2PCIe.STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR3": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2PCIe.STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR6": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2PCIe.STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR4": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2PCIe.STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR5": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2PCIe.STALL0_NO_TxR_HORZ_CRD_AD_AG1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", }, "M2PCIe.STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR2": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR5": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2PCIe.STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR4": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2PCIe.STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR6": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2PCIe.STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR3": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2PCIe.STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR7": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "b1xxxxxxx", }, "M2PCIe.STALL0_NO_TxR_HORZ_CRD_BL_AG0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", }, "M2PCIe.STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR2": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR3": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2PCIe.STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR7": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "b1xxxxxxx", }, "M2PCIe.STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR6": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2PCIe.STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR4": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2PCIe.STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR5": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2PCIe.STALL0_NO_TxR_HORZ_CRD_BL_AG1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", }, "M2PCIe.STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR2": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR6": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2PCIe.STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR7": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "b1xxxxxxx", }, "M2PCIe.STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR3": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2PCIe.STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR4": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2PCIe.STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR5": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2PCIe.STALL1_NO_TxR_HORZ_CRD_AD_AG0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 209, "ExtSel": "", "Notes": "Extension not used by ICX.", }, "M2PCIe.STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR10": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 209, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxx1xx", }, "M2PCIe.STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR9": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 209, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxxx1x", }, "M2PCIe.STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR8": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 209, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxxxx1", }, "M2PCIe.STALL1_NO_TxR_HORZ_CRD_AD_AG1_1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 211, "ExtSel": "", "Notes": "Extension not used by ICX.", }, "M2PCIe.STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR8": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 211, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxxxx1", }, "M2PCIe.STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR10": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 211, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxx1xx", }, "M2PCIe.STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR9": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 211, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxxx1x", }, "M2PCIe.STALL1_NO_TxR_HORZ_CRD_BL_AG0_1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 213, "ExtSel": "", "Notes": "Extension not used by ICX.", }, "M2PCIe.STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR8": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 213, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxxxx1", }, "M2PCIe.STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR10": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 213, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxx1xx", }, "M2PCIe.STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR9": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 213, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxxx1x", }, "M2PCIe.STALL1_NO_TxR_HORZ_CRD_BL_AG1_1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 215, "ExtSel": "", "Notes": "Extension not used by ICX.", }, "M2PCIe.STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR10": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 215, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxx1xx", }, "M2PCIe.STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR9": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 215, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxxx1x", }, "M2PCIe.STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR8": { "Box": "M2PCIe", "Category": "M2PCIe CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 215, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxxxx1", }, "M2PCIe.TxC_CREDITS": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS Events", "Counters": "0-1", "EvSel": 45, "ExtSel": "", "MaxIncCyc": 4, }, "M2PCIe.TxC_CREDITS.PMM": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS Events", "Counters": "0-1", "EvSel": 45, "ExtSel": "", "MaxIncCyc": 4, "Umask": "bxxxxxx1x", }, "M2PCIe.TxC_CREDITS.PRQ": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS Events", "Counters": "0-1", "EvSel": 45, "ExtSel": "", "MaxIncCyc": 4, "Umask": "bxxxxxxx1", }, "M2PCIe.TxC_CYCLES_FULL": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the M2PCIe Egress is full. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent.", "Desc": "Egress (to CMS) Cycles Full", "EvSel": 37, "ExtSel": "", }, "M2PCIe.TxC_CYCLES_FULL.AD_1": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the M2PCIe Egress is full. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent.", "Desc": "Egress (to CMS) Cycles Full", "EvSel": 37, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2PCIe.TxC_CYCLES_FULL.AD_0": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the M2PCIe Egress is full. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent.", "Desc": "Egress (to CMS) Cycles Full", "EvSel": 37, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.TxC_CYCLES_FULL.PMM_BLOCK_1": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the M2PCIe Egress is full. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent.", "Desc": "Egress (to CMS) Cycles Full", "EvSel": 37, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2PCIe.TxC_CYCLES_FULL.BL_1": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the M2PCIe Egress is full. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent.", "Desc": "Egress (to CMS) Cycles Full", "EvSel": 37, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2PCIe.TxC_CYCLES_FULL.PMM_BLOCK_0": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the M2PCIe Egress is full. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent.", "Desc": "Egress (to CMS) Cycles Full", "EvSel": 37, "ExtSel": "", "Umask": "b1xxxxxxx", }, "M2PCIe.TxC_CYCLES_FULL.BL_0": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the M2PCIe Egress is full. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent.", "Desc": "Egress (to CMS) Cycles Full", "EvSel": 37, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.TxC_CYCLES_FULL.AK_1": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the M2PCIe Egress is full. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent.", "Desc": "Egress (to CMS) Cycles Full", "EvSel": 37, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2PCIe.TxC_CYCLES_FULL.AK_0": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the M2PCIe Egress is full. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent.", "Desc": "Egress (to CMS) Cycles Full", "EvSel": 37, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.TxC_CYCLES_NE": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the M2PCIe Egress is not empty. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple egress buffers can be tracked at a given time using multiple counters.", "Desc": "Egress (to CMS) Cycles Not Empty", "EvSel": 35, "ExtSel": "", }, "M2PCIe.TxC_CYCLES_NE.PMM_DISTRESS_1": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the M2PCIe Egress is not empty. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple egress buffers can be tracked at a given time using multiple counters.", "Desc": "Egress (to CMS) Cycles Not Empty", "EvSel": 35, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2PCIe.TxC_CYCLES_NE.AK_1": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the M2PCIe Egress is not empty. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple egress buffers can be tracked at a given time using multiple counters.", "Desc": "Egress (to CMS) Cycles Not Empty", "EvSel": 35, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2PCIe.TxC_CYCLES_NE.PMM_DISTRESS_0": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the M2PCIe Egress is not empty. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple egress buffers can be tracked at a given time using multiple counters.", "Desc": "Egress (to CMS) Cycles Not Empty", "EvSel": 35, "ExtSel": "", "Umask": "b1xxxxxxx", }, "M2PCIe.TxC_CYCLES_NE.AK_0": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the M2PCIe Egress is not empty. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple egress buffers can be tracked at a given time using multiple counters.", "Desc": "Egress (to CMS) Cycles Not Empty", "EvSel": 35, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.TxC_CYCLES_NE.AD_1": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the M2PCIe Egress is not empty. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple egress buffers can be tracked at a given time using multiple counters.", "Desc": "Egress (to CMS) Cycles Not Empty", "EvSel": 35, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2PCIe.TxC_CYCLES_NE.AD_0": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the M2PCIe Egress is not empty. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple egress buffers can be tracked at a given time using multiple counters.", "Desc": "Egress (to CMS) Cycles Not Empty", "EvSel": 35, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.TxC_CYCLES_NE.BL_1": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the M2PCIe Egress is not empty. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple egress buffers can be tracked at a given time using multiple counters.", "Desc": "Egress (to CMS) Cycles Not Empty", "EvSel": 35, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2PCIe.TxC_CYCLES_NE.BL_0": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the M2PCIe Egress is not empty. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple egress buffers can be tracked at a given time using multiple counters.", "Desc": "Egress (to CMS) Cycles Not Empty", "EvSel": 35, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.TxC_INSERTS": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS Events", "Counters": "0-3", "Defn": "Counts the number of number of messages inserted into the the M2PCIe Egress queue. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy.", "Desc": "Egress (to CMS) Ingress", "EvSel": 36, "ExtSel": "", }, "M2PCIe.TxC_INSERTS.AD_0": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS Events", "Counters": "0-3", "Defn": "Counts the number of number of messages inserted into the the M2PCIe Egress queue. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy.", "Desc": "Egress (to CMS) Ingress", "EvSel": 36, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.TxC_INSERTS.AD_1": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS Events", "Counters": "0-3", "Defn": "Counts the number of number of messages inserted into the the M2PCIe Egress queue. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy.", "Desc": "Egress (to CMS) Ingress", "EvSel": 36, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2PCIe.TxC_INSERTS.BL_0": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS Events", "Counters": "0-3", "Defn": "Counts the number of number of messages inserted into the the M2PCIe Egress queue. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy.", "Desc": "Egress (to CMS) Ingress", "EvSel": 36, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.TxC_INSERTS.BL_1": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS Events", "Counters": "0-3", "Defn": "Counts the number of number of messages inserted into the the M2PCIe Egress queue. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy.", "Desc": "Egress (to CMS) Ingress", "EvSel": 36, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2PCIe.TxC_INSERTS.AK_CRD_1": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS Events", "Counters": "0-3", "Defn": "Counts the number of number of messages inserted into the the M2PCIe Egress queue. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy.", "Desc": "Egress (to CMS) Ingress", "EvSel": 36, "ExtSel": "", "Umask": "b1xxxxxxx", }, "M2PCIe.TxC_INSERTS.AK_CRD_0": { "Box": "M2PCIe", "Category": "M2PCIe EGRESS Events", "Counters": "0-3", "Defn": "Counts the number of number of messages inserted into the the M2PCIe Egress queue. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy.", "Desc": "Egress (to CMS) Ingress", "EvSel": 36, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2PCIe.TxR_HORZ_ADS_USED": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal ADS Used", "EvSel": 166, "ExtSel": "", }, "M2PCIe.TxR_HORZ_ADS_USED.AD_ALL": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal ADS Used", "EvSel": 166, "ExtSel": "", "Umask": "b00010001", }, "M2PCIe.TxR_HORZ_ADS_USED.BL_UNCRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal ADS Used", "EvSel": 166, "ExtSel": "", "Umask": "b00000100", }, "M2PCIe.TxR_HORZ_ADS_USED.BL_CRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal ADS Used", "EvSel": 166, "ExtSel": "", "Umask": "b01000000", }, "M2PCIe.TxR_HORZ_ADS_USED.AD_UNCRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal ADS Used", "EvSel": 166, "ExtSel": "", "Umask": "b00000001", }, "M2PCIe.TxR_HORZ_ADS_USED.AD_CRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal ADS Used", "EvSel": 166, "ExtSel": "", "Umask": "b00010000", }, "M2PCIe.TxR_HORZ_ADS_USED.BL_ALL": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal ADS Used", "EvSel": 166, "ExtSel": "", "Umask": "b01000100", }, "M2PCIe.TxR_HORZ_BYPASS": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 167, "ExtSel": "", }, "M2PCIe.TxR_HORZ_BYPASS.BL_CRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 167, "ExtSel": "", "Umask": "b01000000", }, "M2PCIe.TxR_HORZ_BYPASS.AK": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 167, "ExtSel": "", "Umask": "b00000010", }, "M2PCIe.TxR_HORZ_BYPASS.AD_ALL": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 167, "ExtSel": "", "Umask": "b00010001", }, "M2PCIe.TxR_HORZ_BYPASS.BL_ALL": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 167, "ExtSel": "", "Umask": "b01000100", }, "M2PCIe.TxR_HORZ_BYPASS.AD_CRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 167, "ExtSel": "", "Umask": "b00010000", }, "M2PCIe.TxR_HORZ_BYPASS.BL_UNCRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 167, "ExtSel": "", "Umask": "b00000100", }, "M2PCIe.TxR_HORZ_BYPASS.AKC_UNCRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 167, "ExtSel": "", "Umask": "b10000000", }, "M2PCIe.TxR_HORZ_BYPASS.IV": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 167, "ExtSel": "", "Umask": "b00001000", }, "M2PCIe.TxR_HORZ_BYPASS.AD_UNCRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 167, "ExtSel": "", "Umask": "b00000001", }, "M2PCIe.TxR_HORZ_CYCLES_FULL": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 162, "ExtSel": "", }, "M2PCIe.TxR_HORZ_CYCLES_FULL.AD_UNCRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 162, "ExtSel": "", "Umask": "b00000001", }, "M2PCIe.TxR_HORZ_CYCLES_FULL.IV": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 162, "ExtSel": "", "Umask": "b00001000", }, "M2PCIe.TxR_HORZ_CYCLES_FULL.AKC_UNCRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 162, "ExtSel": "", "Umask": "b10000000", }, "M2PCIe.TxR_HORZ_CYCLES_FULL.BL_UNCRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 162, "ExtSel": "", "Umask": "b00000100", }, "M2PCIe.TxR_HORZ_CYCLES_FULL.BL_ALL": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 162, "ExtSel": "", "Umask": "b01000100", }, "M2PCIe.TxR_HORZ_CYCLES_FULL.AD_CRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 162, "ExtSel": "", "Umask": "b00010000", }, "M2PCIe.TxR_HORZ_CYCLES_FULL.BL_CRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 162, "ExtSel": "", "Umask": "b01000000", }, "M2PCIe.TxR_HORZ_CYCLES_FULL.AD_ALL": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 162, "ExtSel": "", "Umask": "b00010001", }, "M2PCIe.TxR_HORZ_CYCLES_FULL.AK": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 162, "ExtSel": "", "Umask": "b00000010", }, "M2PCIe.TxR_HORZ_CYCLES_NE": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 163, "ExtSel": "", }, "M2PCIe.TxR_HORZ_CYCLES_NE.AD_UNCRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 163, "ExtSel": "", "Umask": "b00000001", }, "M2PCIe.TxR_HORZ_CYCLES_NE.IV": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 163, "ExtSel": "", "Umask": "b00001000", }, "M2PCIe.TxR_HORZ_CYCLES_NE.AKC_UNCRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 163, "ExtSel": "", "Umask": "b10000000", }, "M2PCIe.TxR_HORZ_CYCLES_NE.BL_UNCRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 163, "ExtSel": "", "Umask": "b00000100", }, "M2PCIe.TxR_HORZ_CYCLES_NE.AD_CRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 163, "ExtSel": "", "Umask": "b00010000", }, "M2PCIe.TxR_HORZ_CYCLES_NE.BL_ALL": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 163, "ExtSel": "", "Umask": "b01000100", }, "M2PCIe.TxR_HORZ_CYCLES_NE.AK": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 163, "ExtSel": "", "Umask": "b00000010", }, "M2PCIe.TxR_HORZ_CYCLES_NE.AD_ALL": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 163, "ExtSel": "", "Umask": "b00010001", }, "M2PCIe.TxR_HORZ_CYCLES_NE.BL_CRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 163, "ExtSel": "", "Umask": "b01000000", }, "M2PCIe.TxR_HORZ_INSERTS": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 161, "ExtSel": "", }, "M2PCIe.TxR_HORZ_INSERTS.AD_UNCRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 161, "ExtSel": "", "Umask": "b00000001", }, "M2PCIe.TxR_HORZ_INSERTS.AKC_UNCRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 161, "ExtSel": "", "Umask": "b10000000", }, "M2PCIe.TxR_HORZ_INSERTS.IV": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 161, "ExtSel": "", "Umask": "b00001000", }, "M2PCIe.TxR_HORZ_INSERTS.BL_UNCRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 161, "ExtSel": "", "Umask": "b00000100", }, "M2PCIe.TxR_HORZ_INSERTS.BL_ALL": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 161, "ExtSel": "", "Umask": "b01000100", }, "M2PCIe.TxR_HORZ_INSERTS.AD_CRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 161, "ExtSel": "", "Umask": "b00010000", }, "M2PCIe.TxR_HORZ_INSERTS.BL_CRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 161, "ExtSel": "", "Umask": "b01000000", }, "M2PCIe.TxR_HORZ_INSERTS.AD_ALL": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 161, "ExtSel": "", "Umask": "b00010001", }, "M2PCIe.TxR_HORZ_INSERTS.AK": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 161, "ExtSel": "", "Umask": "b00000010", }, "M2PCIe.TxR_HORZ_NACK": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 164, "ExtSel": "", }, "M2PCIe.TxR_HORZ_NACK.BL_ALL": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 164, "ExtSel": "", "Umask": "b01000100", }, "M2PCIe.TxR_HORZ_NACK.AD_CRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 164, "ExtSel": "", "Umask": "b00010000", }, "M2PCIe.TxR_HORZ_NACK.BL_CRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 164, "ExtSel": "", "Umask": "b01000000", }, "M2PCIe.TxR_HORZ_NACK.AD_ALL": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 164, "ExtSel": "", "Umask": "b00010001", }, "M2PCIe.TxR_HORZ_NACK.AK": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 164, "ExtSel": "", "Umask": "b00000010", }, "M2PCIe.TxR_HORZ_NACK.AD_UNCRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 164, "ExtSel": "", "Umask": "b00000001", }, "M2PCIe.TxR_HORZ_NACK.AKC_UNCRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 164, "ExtSel": "", "Umask": "b10000000", }, "M2PCIe.TxR_HORZ_NACK.BL_UNCRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 164, "ExtSel": "", "Umask": "b00000100", }, "M2PCIe.TxR_HORZ_NACK.IV": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 164, "ExtSel": "", "Umask": "b00001000", }, "M2PCIe.TxR_HORZ_OCCUPANCY": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 160, "ExtSel": "", }, "M2PCIe.TxR_HORZ_OCCUPANCY.AD_CRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 160, "ExtSel": "", "Umask": "b00010000", }, "M2PCIe.TxR_HORZ_OCCUPANCY.BL_ALL": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 160, "ExtSel": "", "Umask": "b01000100", }, "M2PCIe.TxR_HORZ_OCCUPANCY.AK": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 160, "ExtSel": "", "Umask": "b00000010", }, "M2PCIe.TxR_HORZ_OCCUPANCY.AD_ALL": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 160, "ExtSel": "", "Umask": "b00010001", }, "M2PCIe.TxR_HORZ_OCCUPANCY.BL_CRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 160, "ExtSel": "", "Umask": "b01000000", }, "M2PCIe.TxR_HORZ_OCCUPANCY.AD_UNCRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 160, "ExtSel": "", "Umask": "b00000001", }, "M2PCIe.TxR_HORZ_OCCUPANCY.AKC_UNCRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 160, "ExtSel": "", "Umask": "b10000000", }, "M2PCIe.TxR_HORZ_OCCUPANCY.IV": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 160, "ExtSel": "", "Umask": "b00001000", }, "M2PCIe.TxR_HORZ_OCCUPANCY.BL_UNCRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 160, "ExtSel": "", "Umask": "b00000100", }, "M2PCIe.TxR_HORZ_STARVED": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.", "Desc": "CMS Horizontal Egress Injection Starvation", "EvSel": 165, "ExtSel": "", }, "M2PCIe.TxR_HORZ_STARVED.AK": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.", "Desc": "CMS Horizontal Egress Injection Starvation", "EvSel": 165, "ExtSel": "", "Umask": "b00000010", }, "M2PCIe.TxR_HORZ_STARVED.AD_ALL": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.", "Desc": "CMS Horizontal Egress Injection Starvation", "EvSel": 165, "ExtSel": "", "Umask": "b00000001", }, "M2PCIe.TxR_HORZ_STARVED.BL_ALL": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.", "Desc": "CMS Horizontal Egress Injection Starvation", "EvSel": 165, "ExtSel": "", "Umask": "b00000100", }, "M2PCIe.TxR_HORZ_STARVED.AKC_UNCRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.", "Desc": "CMS Horizontal Egress Injection Starvation", "EvSel": 165, "ExtSel": "", "Umask": "b10000000", }, "M2PCIe.TxR_HORZ_STARVED.IV": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.", "Desc": "CMS Horizontal Egress Injection Starvation", "EvSel": 165, "ExtSel": "", "Umask": "b00001000", }, "M2PCIe.TxR_HORZ_STARVED.BL_UNCRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.", "Desc": "CMS Horizontal Egress Injection Starvation", "EvSel": 165, "ExtSel": "", "Umask": "b00000100", }, "M2PCIe.TxR_HORZ_STARVED.AD_UNCRD": { "Box": "M2PCIe", "Category": "M2PCIe CMS Horizontal EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.", "Desc": "CMS Horizontal Egress Injection Starvation", "EvSel": 165, "ExtSel": "", "Umask": "b00000001", }, "M2PCIe.TxR_VERT_ADS_USED": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 156, "ExtSel": "", }, "M2PCIe.TxR_VERT_ADS_USED.BL_AG1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 156, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2PCIe.TxR_VERT_ADS_USED.AD_AG0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 156, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.TxR_VERT_ADS_USED.BL_AG0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 156, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.TxR_VERT_ADS_USED.AD_AG1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 156, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2PCIe.TxR_VERT_BYPASS": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 157, "ExtSel": "", }, "M2PCIe.TxR_VERT_BYPASS.BL_AG0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 157, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.TxR_VERT_BYPASS.AD_AG0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 157, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.TxR_VERT_BYPASS.AK_AG1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 157, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2PCIe.TxR_VERT_BYPASS.AD_AG1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 157, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2PCIe.TxR_VERT_BYPASS.AK_AG0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 157, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.TxR_VERT_BYPASS.IV_AG1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 157, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2PCIe.TxR_VERT_BYPASS.BL_AG1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 157, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2PCIe.TxR_VERT_BYPASS_1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 158, "ExtSel": "", }, "M2PCIe.TxR_VERT_BYPASS_1.AKC_AG0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 158, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.TxR_VERT_BYPASS_1.AKC_AG1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 158, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.TxR_VERT_CYCLES_FULL0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 148, "ExtSel": "", }, "M2PCIe.TxR_VERT_CYCLES_FULL0.AK_AG1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 148, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2PCIe.TxR_VERT_CYCLES_FULL0.AD_AG0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 148, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.TxR_VERT_CYCLES_FULL0.IV_AG0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 148, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2PCIe.TxR_VERT_CYCLES_FULL0.BL_AG0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 148, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.TxR_VERT_CYCLES_FULL0.BL_AG1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 148, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2PCIe.TxR_VERT_CYCLES_FULL0.AD_AG1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 148, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2PCIe.TxR_VERT_CYCLES_FULL0.AK_AG0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 148, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.TxR_VERT_CYCLES_FULL1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 149, "ExtSel": "", }, "M2PCIe.TxR_VERT_CYCLES_FULL1.AKC_AG0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 149, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.TxR_VERT_CYCLES_FULL1.AKC_AG1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 149, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.TxR_VERT_CYCLES_NE0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 150, "ExtSel": "", }, "M2PCIe.TxR_VERT_CYCLES_NE0.BL_AG1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 150, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2PCIe.TxR_VERT_CYCLES_NE0.AD_AG1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 150, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2PCIe.TxR_VERT_CYCLES_NE0.AK_AG0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 150, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.TxR_VERT_CYCLES_NE0.AK_AG1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 150, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2PCIe.TxR_VERT_CYCLES_NE0.AD_AG0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 150, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.TxR_VERT_CYCLES_NE0.IV_AG0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 150, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2PCIe.TxR_VERT_CYCLES_NE0.BL_AG0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 150, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.TxR_VERT_CYCLES_NE1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 151, "ExtSel": "", }, "M2PCIe.TxR_VERT_CYCLES_NE1.AKC_AG1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 151, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.TxR_VERT_CYCLES_NE1.AKC_AG0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 151, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.TxR_VERT_INSERTS0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 146, "ExtSel": "", }, "M2PCIe.TxR_VERT_INSERTS0.IV_AG0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 146, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2PCIe.TxR_VERT_INSERTS0.BL_AG0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 146, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.TxR_VERT_INSERTS0.AD_AG0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 146, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.TxR_VERT_INSERTS0.AK_AG1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 146, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2PCIe.TxR_VERT_INSERTS0.AD_AG1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 146, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2PCIe.TxR_VERT_INSERTS0.AK_AG0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 146, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.TxR_VERT_INSERTS0.BL_AG1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 146, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2PCIe.TxR_VERT_INSERTS1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 147, "ExtSel": "", }, "M2PCIe.TxR_VERT_INSERTS1.AKC_AG1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 147, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.TxR_VERT_INSERTS1.AKC_AG0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 147, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.TxR_VERT_NACK0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", }, "M2PCIe.TxR_VERT_NACK0.IV_AG0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2PCIe.TxR_VERT_NACK0.BL_AG0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.TxR_VERT_NACK0.AK_AG1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2PCIe.TxR_VERT_NACK0.AD_AG0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.TxR_VERT_NACK0.AD_AG1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2PCIe.TxR_VERT_NACK0.AK_AG0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.TxR_VERT_NACK0.BL_AG1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2PCIe.TxR_VERT_NACK1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 153, "ExtSel": "", }, "M2PCIe.TxR_VERT_NACK1.AKC_AG1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 153, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.TxR_VERT_NACK1.AKC_AG0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 153, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.TxR_VERT_OCCUPANCY0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", }, "M2PCIe.TxR_VERT_OCCUPANCY0.BL_AG0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.TxR_VERT_OCCUPANCY0.IV_AG0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2PCIe.TxR_VERT_OCCUPANCY0.AK_AG1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2PCIe.TxR_VERT_OCCUPANCY0.AD_AG0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.TxR_VERT_OCCUPANCY0.AK_AG0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.TxR_VERT_OCCUPANCY0.AD_AG1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2PCIe.TxR_VERT_OCCUPANCY0.BL_AG1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2PCIe.TxR_VERT_OCCUPANCY1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 145, "ExtSel": "", }, "M2PCIe.TxR_VERT_OCCUPANCY1.AKC_AG0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 145, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.TxR_VERT_OCCUPANCY1.AKC_AG1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 145, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.TxR_VERT_STARVED0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", }, "M2PCIe.TxR_VERT_STARVED0.AD_AG0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.TxR_VERT_STARVED0.AK_AG1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2PCIe.TxR_VERT_STARVED0.BL_AG0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.TxR_VERT_STARVED0.IV_AG0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2PCIe.TxR_VERT_STARVED0.BL_AG1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2PCIe.TxR_VERT_STARVED0.AD_AG1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2PCIe.TxR_VERT_STARVED0.AK_AG0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.TxR_VERT_STARVED1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 155, "ExtSel": "", }, "M2PCIe.TxR_VERT_STARVED1.AKC_AG0": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 155, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.TxR_VERT_STARVED1.AKC_AG1": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 155, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.TxR_VERT_STARVED1.TGC": { "Box": "M2PCIe", "Category": "M2PCIe CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 155, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.VERT_RING_AD_IN_USE": { "Box": "M2PCIe", "Category": "M2PCIe Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AD Ring In Use", "EvSel": 176, "ExtSel": "", }, "M2PCIe.VERT_RING_AD_IN_USE.UP_EVEN": { "Box": "M2PCIe", "Category": "M2PCIe Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AD Ring In Use", "EvSel": 176, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.VERT_RING_AD_IN_USE.DN_ODD": { "Box": "M2PCIe", "Category": "M2PCIe Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AD Ring In Use", "EvSel": 176, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2PCIe.VERT_RING_AD_IN_USE.DN_EVEN": { "Box": "M2PCIe", "Category": "M2PCIe Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AD Ring In Use", "EvSel": 176, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.VERT_RING_AD_IN_USE.UP_ODD": { "Box": "M2PCIe", "Category": "M2PCIe Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AD Ring In Use", "EvSel": 176, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.VERT_RING_AKC_IN_USE": { "Box": "M2PCIe", "Category": "M2PCIe Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AKC Ring In Use", "EvSel": 180, "ExtSel": "", }, "M2PCIe.VERT_RING_AKC_IN_USE.UP_EVEN": { "Box": "M2PCIe", "Category": "M2PCIe Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AKC Ring In Use", "EvSel": 180, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.VERT_RING_AKC_IN_USE.DN_ODD": { "Box": "M2PCIe", "Category": "M2PCIe Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AKC Ring In Use", "EvSel": 180, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2PCIe.VERT_RING_AKC_IN_USE.DN_EVEN": { "Box": "M2PCIe", "Category": "M2PCIe Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AKC Ring In Use", "EvSel": 180, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.VERT_RING_AKC_IN_USE.UP_ODD": { "Box": "M2PCIe", "Category": "M2PCIe Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AKC Ring In Use", "EvSel": 180, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.VERT_RING_AK_IN_USE": { "Box": "M2PCIe", "Category": "M2PCIe Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AK Ring In Use", "EvSel": 177, "ExtSel": "", }, "M2PCIe.VERT_RING_AK_IN_USE.DN_ODD": { "Box": "M2PCIe", "Category": "M2PCIe Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AK Ring In Use", "EvSel": 177, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2PCIe.VERT_RING_AK_IN_USE.UP_EVEN": { "Box": "M2PCIe", "Category": "M2PCIe Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AK Ring In Use", "EvSel": 177, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.VERT_RING_AK_IN_USE.UP_ODD": { "Box": "M2PCIe", "Category": "M2PCIe Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AK Ring In Use", "EvSel": 177, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.VERT_RING_AK_IN_USE.DN_EVEN": { "Box": "M2PCIe", "Category": "M2PCIe Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AK Ring In Use", "EvSel": 177, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.VERT_RING_BL_IN_USE": { "Box": "M2PCIe", "Category": "M2PCIe Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical BL Ring in Use", "EvSel": 178, "ExtSel": "", }, "M2PCIe.VERT_RING_BL_IN_USE.DN_ODD": { "Box": "M2PCIe", "Category": "M2PCIe Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical BL Ring in Use", "EvSel": 178, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2PCIe.VERT_RING_BL_IN_USE.UP_EVEN": { "Box": "M2PCIe", "Category": "M2PCIe Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical BL Ring in Use", "EvSel": 178, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.VERT_RING_BL_IN_USE.UP_ODD": { "Box": "M2PCIe", "Category": "M2PCIe Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical BL Ring in Use", "EvSel": 178, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.VERT_RING_BL_IN_USE.DN_EVEN": { "Box": "M2PCIe", "Category": "M2PCIe Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical BL Ring in Use", "EvSel": 178, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.VERT_RING_IV_IN_USE": { "Box": "M2PCIe", "Category": "M2PCIe Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "Vertical IV Ring in Use", "EvSel": 179, "ExtSel": "", }, "M2PCIe.VERT_RING_IV_IN_USE.DN": { "Box": "M2PCIe", "Category": "M2PCIe Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "Vertical IV Ring in Use", "EvSel": 179, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2PCIe.VERT_RING_IV_IN_USE.UP": { "Box": "M2PCIe", "Category": "M2PCIe Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "Vertical IV Ring in Use", "EvSel": 179, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.VERT_RING_TGC_IN_USE": { "Box": "M2PCIe", "Category": "M2PCIe Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical TGC Ring In Use", "EvSel": 181, "ExtSel": "", }, "M2PCIe.VERT_RING_TGC_IN_USE.DN_ODD": { "Box": "M2PCIe", "Category": "M2PCIe Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical TGC Ring In Use", "EvSel": 181, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2PCIe.VERT_RING_TGC_IN_USE.UP_EVEN": { "Box": "M2PCIe", "Category": "M2PCIe Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical TGC Ring In Use", "EvSel": 181, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2PCIe.VERT_RING_TGC_IN_USE.UP_ODD": { "Box": "M2PCIe", "Category": "M2PCIe Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical TGC Ring In Use", "EvSel": 181, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2PCIe.VERT_RING_TGC_IN_USE.DN_EVEN": { "Box": "M2PCIe", "Category": "M2PCIe Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical TGC Ring In Use", "EvSel": 181, "ExtSel": "", "Umask": "bxxxxx1xx", }, # UBOX: "UBOX.EVENT_MSG": { "Box": "UBOX", "Category": "UBOX EVENT_MSG Events", "Counters": "0-1", "Desc": "Message Received", "EvSel": 66, "ExtSel": "", }, "UBOX.EVENT_MSG.VLW_RCVD": { "Box": "UBOX", "Category": "UBOX EVENT_MSG Events", "Counters": "0-1", "Desc": "Message Received", "EvSel": 66, "ExtSel": "", "Umask": "bxxxxxxx1", }, "UBOX.EVENT_MSG.INT_PRIO": { "Box": "UBOX", "Category": "UBOX EVENT_MSG Events", "Counters": "0-1", "Desc": "Message Received", "EvSel": 66, "ExtSel": "", "Umask": "bxxx1xxxx", }, "UBOX.EVENT_MSG.MSI_RCVD": { "Box": "UBOX", "Category": "UBOX EVENT_MSG Events", "Counters": "0-1", "Desc": "Message Received", "EvSel": 66, "ExtSel": "", "Umask": "bxxxxxx1x", }, "UBOX.EVENT_MSG.DOORBELL_RCVD": { "Box": "UBOX", "Category": "UBOX EVENT_MSG Events", "Counters": "0-1", "Desc": "Message Received", "EvSel": 66, "ExtSel": "", "Umask": "bxxxx1xxx", }, "UBOX.EVENT_MSG.IPI_RCVD": { "Box": "UBOX", "Category": "UBOX EVENT_MSG Events", "Counters": "0-1", "Desc": "Message Received", "EvSel": 66, "ExtSel": "", "Umask": "bxxxxx1xx", }, "UBOX.LOCK_CYCLES": { "Box": "UBOX", "Category": "UBOX LOCK Events", "Counters": "0-1", "Defn": "Number of times an IDI Lock/SplitLock sequence was started", "Desc": "IDI Lock/SplitLock Cycles", "EvSel": 68, "ExtSel": "", }, "UBOX.M2U_MISC1": { "Box": "UBOX", "Category": "UBOX M2U Events", "EvSel": 77, "ExtSel": "", }, "UBOX.M2U_MISC1.RxC_CYCLES_NE_UPI_NCS": { "Box": "UBOX", "Category": "UBOX M2U Events", "EvSel": 77, "ExtSel": "", "Umask": "bxxxx1xxx", }, "UBOX.M2U_MISC1.RxC_CYCLES_NE_CBO_NCS": { "Box": "UBOX", "Category": "UBOX M2U Events", "EvSel": 77, "ExtSel": "", "Umask": "bxxxxxx1x", }, "UBOX.M2U_MISC1.TxC_CYCLES_CRD_OVF_CBO_NCS": { "Box": "UBOX", "Category": "UBOX M2U Events", "EvSel": 77, "ExtSel": "", "Umask": "bxx1xxxxx", }, "UBOX.M2U_MISC1.TxC_CYCLES_CRD_OVF_UPI_NCS": { "Box": "UBOX", "Category": "UBOX M2U Events", "EvSel": 77, "ExtSel": "", "Umask": "b1xxxxxxx", }, "UBOX.M2U_MISC1.RxC_CYCLES_NE_CBO_NCB": { "Box": "UBOX", "Category": "UBOX M2U Events", "EvSel": 77, "ExtSel": "", "Umask": "bxxxxxxx1", }, "UBOX.M2U_MISC1.TxC_CYCLES_CRD_OVF_UPI_NCB": { "Box": "UBOX", "Category": "UBOX M2U Events", "EvSel": 77, "ExtSel": "", "Umask": "bx1xxxxxx", }, "UBOX.M2U_MISC1.RxC_CYCLES_NE_UPI_NCB": { "Box": "UBOX", "Category": "UBOX M2U Events", "EvSel": 77, "ExtSel": "", "Umask": "bxxxxx1xx", }, "UBOX.M2U_MISC1.TxC_CYCLES_CRD_OVF_CBO_NCB": { "Box": "UBOX", "Category": "UBOX M2U Events", "EvSel": 77, "ExtSel": "", "Umask": "bxxx1xxxx", }, "UBOX.M2U_MISC2": { "Box": "UBOX", "Category": "UBOX M2U Events", "EvSel": 78, "ExtSel": "", }, "UBOX.M2U_MISC2.RxC_CYCLES_EMPTY_BL": { "Box": "UBOX", "Category": "UBOX M2U Events", "EvSel": 78, "ExtSel": "", "Umask": "bxxxxxx1x", }, "UBOX.M2U_MISC2.TxC_CYCLES_FULL_BL": { "Box": "UBOX", "Category": "UBOX M2U Events", "EvSel": 78, "ExtSel": "", "Umask": "b1xxxxxxx", }, "UBOX.M2U_MISC2.TxC_CYCLES_CRD_OVF_VN0_NCB": { "Box": "UBOX", "Category": "UBOX M2U Events", "EvSel": 78, "ExtSel": "", "Umask": "bxxxxx1xx", }, "UBOX.M2U_MISC2.TxC_CYCLES_EMPTY_AK": { "Box": "UBOX", "Category": "UBOX M2U Events", "EvSel": 78, "ExtSel": "", "Umask": "bxx1xxxxx", }, "UBOX.M2U_MISC2.TxC_CYCLES_EMPTY_BL": { "Box": "UBOX", "Category": "UBOX M2U Events", "EvSel": 78, "ExtSel": "", "Umask": "bxxx1xxxx", }, "UBOX.M2U_MISC2.RxC_CYCLES_FULL_BL": { "Box": "UBOX", "Category": "UBOX M2U Events", "EvSel": 78, "ExtSel": "", "Umask": "bxxxxxxx1", }, "UBOX.M2U_MISC2.TxC_CYCLES_CRD_OVF_VN0_NCS": { "Box": "UBOX", "Category": "UBOX M2U Events", "EvSel": 78, "ExtSel": "", "Umask": "bxxxx1xxx", }, "UBOX.M2U_MISC2.TxC_CYCLES_EMPTY_AKC": { "Box": "UBOX", "Category": "UBOX M2U Events", "EvSel": 78, "ExtSel": "", "Umask": "bx1xxxxxx", }, "UBOX.M2U_MISC3": { "Box": "UBOX", "Category": "UBOX M2U Events", "EvSel": 79, "ExtSel": "", }, "UBOX.M2U_MISC3.TxC_CYCLES_FULL_AK": { "Box": "UBOX", "Category": "UBOX M2U Events", "EvSel": 79, "ExtSel": "", "Umask": "bxxxxxxx1", }, "UBOX.M2U_MISC3.TxC_CYCLES_FULL_AKC": { "Box": "UBOX", "Category": "UBOX M2U Events", "EvSel": 79, "ExtSel": "", "Umask": "bxxxxxx1x", }, "UBOX.PHOLD_CYCLES": { "Box": "UBOX", "Category": "UBOX PHOLD Events", "Counters": "0-1", "Defn": "PHOLD cycles.", "Desc": "Cycles PHOLD Assert to Ack", "EvSel": 69, "ExtSel": "", }, "UBOX.PHOLD_CYCLES.ASSERT_TO_ACK": { "Box": "UBOX", "Category": "UBOX PHOLD Events", "Counters": "0-1", "Defn": "PHOLD cycles.", "Desc": "Cycles PHOLD Assert to Ack", "EvSel": 69, "ExtSel": "", "Umask": "bxxxxxxx1", }, "UBOX.RACU_DRNG": { "Box": "UBOX", "Category": "UBOX RACU Events", "EvSel": 76, "ExtSel": "", }, "UBOX.RACU_DRNG.PFTCH_BUF_EMPTY": { "Box": "UBOX", "Category": "UBOX RACU Events", "EvSel": 76, "ExtSel": "", "Umask": "bxxxxx1xx", }, "UBOX.RACU_DRNG.RDSEED": { "Box": "UBOX", "Category": "UBOX RACU Events", "EvSel": 76, "ExtSel": "", "Umask": "bxxxxxx1x", }, "UBOX.RACU_DRNG.RDRAND": { "Box": "UBOX", "Category": "UBOX RACU Events", "EvSel": 76, "ExtSel": "", "Umask": "bxxxxxxx1", }, "UBOX.RACU_REQUESTS": { "Box": "UBOX", "Category": "UBOX RACU Events", "Counters": "0-1", "Defn": "Number outstanding register requests within message channel tracker", "Desc": "RACU Request", "EvSel": 70, "ExtSel": "", "Notes": "RACU = Register Access Control Unit", }, # M3UPI: "M3UPI.AG0_AD_CRD_ACQUIRED0": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", }, "M3UPI.AG0_AD_CRD_ACQUIRED0.TGR4": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxx1xxxx", }, "M3UPI.AG0_AD_CRD_ACQUIRED0.TGR5": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxx1xxxxx", }, "M3UPI.AG0_AD_CRD_ACQUIRED0.TGR7": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "b1xxxxxxx", }, "M3UPI.AG0_AD_CRD_ACQUIRED0.TGR3": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxx1xxx", }, "M3UPI.AG0_AD_CRD_ACQUIRED0.TGR6": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bx1xxxxxx", }, "M3UPI.AG0_AD_CRD_ACQUIRED0.TGR1": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxxxx1x", }, "M3UPI.AG0_AD_CRD_ACQUIRED0.TGR0": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxxxxx1", }, "M3UPI.AG0_AD_CRD_ACQUIRED0.TGR2": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxxx1xx", }, "M3UPI.AG0_AD_CRD_ACQUIRED1": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 129, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected. Extension not used by ICX.", }, "M3UPI.AG0_AD_CRD_ACQUIRED1.TGR8": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 129, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected. Extension not used by ICX.", "Umask": "bxxxxxxx1", }, "M3UPI.AG0_AD_CRD_ACQUIRED1.TGR10": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 129, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected. Extension not used by ICX.", "Umask": "bxxxxx1xx", }, "M3UPI.AG0_AD_CRD_ACQUIRED1.TGR9": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 129, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected. Extension not used by ICX.", "Umask": "bxxxxxx1x", }, "M3UPI.AG0_AD_CRD_OCCUPANCY0": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", }, "M3UPI.AG0_AD_CRD_OCCUPANCY0.TGR1": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b00000010", }, "M3UPI.AG0_AD_CRD_OCCUPANCY0.TGR0": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b00000001", }, "M3UPI.AG0_AD_CRD_OCCUPANCY0.TGR2": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b00000100", }, "M3UPI.AG0_AD_CRD_OCCUPANCY0.TGR4": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b00010000", }, "M3UPI.AG0_AD_CRD_OCCUPANCY0.TGR5": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b00100000", }, "M3UPI.AG0_AD_CRD_OCCUPANCY0.TGR3": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b00001000", }, "M3UPI.AG0_AD_CRD_OCCUPANCY0.TGR7": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b10000000", }, "M3UPI.AG0_AD_CRD_OCCUPANCY0.TGR6": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b01000000", }, "M3UPI.AG0_AD_CRD_OCCUPANCY1": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 131, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected. Extension not used by ICX.", }, "M3UPI.AG0_AD_CRD_OCCUPANCY1.TGR8": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 131, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected. Extension not used by ICX.", "Umask": "b00000001", }, "M3UPI.AG0_AD_CRD_OCCUPANCY1.TGR10": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 131, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected. Extension not used by ICX.", "Umask": "b00000100", }, "M3UPI.AG0_AD_CRD_OCCUPANCY1.TGR9": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 131, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected. Extension not used by ICX.", "Umask": "b00000010", }, "M3UPI.AG0_BL_CRD_ACQUIRED0": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", }, "M3UPI.AG0_BL_CRD_ACQUIRED0.TGR5": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.AG0_BL_CRD_ACQUIRED0.TGR4": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.AG0_BL_CRD_ACQUIRED0.TGR7": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "b1xxxxxxx", }, "M3UPI.AG0_BL_CRD_ACQUIRED0.TGR3": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.AG0_BL_CRD_ACQUIRED0.TGR6": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.AG0_BL_CRD_ACQUIRED0.TGR1": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.AG0_BL_CRD_ACQUIRED0.TGR0": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.AG0_BL_CRD_ACQUIRED0.TGR2": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.AG0_BL_CRD_ACQUIRED1": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 137, "ExtSel": "", "Notes": "Extension not used by ICX.", }, "M3UPI.AG0_BL_CRD_ACQUIRED1.TGR9": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 137, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxxx1x", }, "M3UPI.AG0_BL_CRD_ACQUIRED1.TGR10": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 137, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxx1xx", }, "M3UPI.AG0_BL_CRD_ACQUIRED1.TGR8": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 137, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxxxx1", }, "M3UPI.AG0_BL_CRD_OCCUPANCY0": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", }, "M3UPI.AG0_BL_CRD_OCCUPANCY0.TGR6": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "b01000000", }, "M3UPI.AG0_BL_CRD_OCCUPANCY0.TGR7": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "b10000000", }, "M3UPI.AG0_BL_CRD_OCCUPANCY0.TGR3": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "b00001000", }, "M3UPI.AG0_BL_CRD_OCCUPANCY0.TGR5": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "b00100000", }, "M3UPI.AG0_BL_CRD_OCCUPANCY0.TGR4": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "b00010000", }, "M3UPI.AG0_BL_CRD_OCCUPANCY0.TGR2": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "b00000100", }, "M3UPI.AG0_BL_CRD_OCCUPANCY0.TGR0": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "b00000001", }, "M3UPI.AG0_BL_CRD_OCCUPANCY0.TGR1": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "b00000010", }, "M3UPI.AG0_BL_CRD_OCCUPANCY1": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 139, "ExtSel": "", "Notes": "Extension not used by ICX.", }, "M3UPI.AG0_BL_CRD_OCCUPANCY1.TGR9": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 139, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "b00000010", }, "M3UPI.AG0_BL_CRD_OCCUPANCY1.TGR10": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 139, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "b00000100", }, "M3UPI.AG0_BL_CRD_OCCUPANCY1.TGR8": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 139, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "b00000001", }, "M3UPI.AG1_AD_CRD_ACQUIRED0": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", }, "M3UPI.AG1_AD_CRD_ACQUIRED0.TGR1": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxxxx1x", }, "M3UPI.AG1_AD_CRD_ACQUIRED0.TGR0": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxxxxx1", }, "M3UPI.AG1_AD_CRD_ACQUIRED0.TGR2": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxxx1xx", }, "M3UPI.AG1_AD_CRD_ACQUIRED0.TGR5": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxx1xxxxx", }, "M3UPI.AG1_AD_CRD_ACQUIRED0.TGR4": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxx1xxxx", }, "M3UPI.AG1_AD_CRD_ACQUIRED0.TGR7": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "b1xxxxxxx", }, "M3UPI.AG1_AD_CRD_ACQUIRED0.TGR3": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxx1xxx", }, "M3UPI.AG1_AD_CRD_ACQUIRED0.TGR6": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bx1xxxxxx", }, "M3UPI.AG1_AD_CRD_ACQUIRED1": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 133, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected. Extension not used by ICX.", }, "M3UPI.AG1_AD_CRD_ACQUIRED1.TGR8": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 133, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected. Extension not used by ICX.", "Umask": "bxxxxxxx1", }, "M3UPI.AG1_AD_CRD_ACQUIRED1.TGR9": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 133, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected. Extension not used by ICX.", "Umask": "bxxxxxx1x", }, "M3UPI.AG1_AD_CRD_ACQUIRED1.TGR10": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 133, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected. Extension not used by ICX.", "Umask": "bxxxxx1xx", }, "M3UPI.AG1_AD_CRD_OCCUPANCY0": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", }, "M3UPI.AG1_AD_CRD_OCCUPANCY0.TGR4": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b00010000", }, "M3UPI.AG1_AD_CRD_OCCUPANCY0.TGR5": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b00100000", }, "M3UPI.AG1_AD_CRD_OCCUPANCY0.TGR7": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b10000000", }, "M3UPI.AG1_AD_CRD_OCCUPANCY0.TGR3": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b00001000", }, "M3UPI.AG1_AD_CRD_OCCUPANCY0.TGR6": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b01000000", }, "M3UPI.AG1_AD_CRD_OCCUPANCY0.TGR1": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b00000010", }, "M3UPI.AG1_AD_CRD_OCCUPANCY0.TGR0": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b00000001", }, "M3UPI.AG1_AD_CRD_OCCUPANCY0.TGR2": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "b00000100", }, "M3UPI.AG1_AD_CRD_OCCUPANCY1": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 135, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected. Extension not used by ICX.", }, "M3UPI.AG1_AD_CRD_OCCUPANCY1.TGR8": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 135, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected. Extension not used by ICX.", "Umask": "b00000001", }, "M3UPI.AG1_AD_CRD_OCCUPANCY1.TGR9": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 135, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected. Extension not used by ICX.", "Umask": "b00000010", }, "M3UPI.AG1_AD_CRD_OCCUPANCY1.TGR10": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 135, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected. Extension not used by ICX.", "Umask": "b00000100", }, "M3UPI.AG1_BL_CRD_ACQUIRED0": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", }, "M3UPI.AG1_BL_CRD_ACQUIRED0.TGR3": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.AG1_BL_CRD_ACQUIRED0.TGR7": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "b1xxxxxxx", }, "M3UPI.AG1_BL_CRD_ACQUIRED0.TGR6": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.AG1_BL_CRD_ACQUIRED0.TGR4": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.AG1_BL_CRD_ACQUIRED0.TGR5": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.AG1_BL_CRD_ACQUIRED0.TGR2": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.AG1_BL_CRD_ACQUIRED0.TGR1": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.AG1_BL_CRD_ACQUIRED0.TGR0": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.AG1_BL_CRD_ACQUIRED1": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 141, "ExtSel": "", "Notes": "Extension not used by ICX.", }, "M3UPI.AG1_BL_CRD_ACQUIRED1.TGR8": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 141, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxxxx1", }, "M3UPI.AG1_BL_CRD_ACQUIRED1.TGR9": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 141, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxxx1x", }, "M3UPI.AG1_BL_CRD_ACQUIRED1.TGR10": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 141, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxx1xx", }, "M3UPI.AG1_BL_CRD_OCCUPANCY0": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", }, "M3UPI.AG1_BL_CRD_OCCUPANCY0.TGR0": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "b00000001", }, "M3UPI.AG1_BL_CRD_OCCUPANCY0.TGR1": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "b00000010", }, "M3UPI.AG1_BL_CRD_OCCUPANCY0.TGR2": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "b00000100", }, "M3UPI.AG1_BL_CRD_OCCUPANCY0.TGR5": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "b00100000", }, "M3UPI.AG1_BL_CRD_OCCUPANCY0.TGR4": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "b00010000", }, "M3UPI.AG1_BL_CRD_OCCUPANCY0.TGR6": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "b01000000", }, "M3UPI.AG1_BL_CRD_OCCUPANCY0.TGR7": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "b10000000", }, "M3UPI.AG1_BL_CRD_OCCUPANCY0.TGR3": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "b00001000", }, "M3UPI.AG1_BL_CRD_OCCUPANCY1": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 143, "ExtSel": "", "Notes": "Extension not used by ICX.", }, "M3UPI.AG1_BL_CRD_OCCUPANCY1.TGR8": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 143, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "b00000001", }, "M3UPI.AG1_BL_CRD_OCCUPANCY1.TGR9": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 143, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "b00000010", }, "M3UPI.AG1_BL_CRD_OCCUPANCY1.TGR10": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 143, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "b00000100", }, "M3UPI.CHA_AD_CREDITS_EMPTY": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 34, "ExtSel": "", }, "M3UPI.CHA_AD_CREDITS_EMPTY.WB": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 34, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.CHA_AD_CREDITS_EMPTY.SNP": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 34, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.CHA_AD_CREDITS_EMPTY.REQ": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 34, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.CHA_AD_CREDITS_EMPTY.VNA": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 34, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.CLOCKTICKS": { "Box": "M3UPI", "Category": "M3UPI UCLK Events", "Counters": "0-3", "Defn": "Counts the number of uclks in the M3 uclk domain. This could be slightly different than the count in the Ubox because of enable/freeze delays. However, because the M3 is close to the Ubox, they generally should not diverge by more than a handful of cycles.", "Desc": "Clockticks of the mesh to UPI (M3UPI)", "EvSel": 1, "ExtSel": "", }, "M3UPI.CMS_CLOCKTICKS": { "Box": "M3UPI", "Category": "M3UPI Misc Events", "Desc": "CMS Clockticks", "EvSel": 192, "ExtSel": "", }, "M3UPI.D2C_SENT": { "Box": "M3UPI", "Category": "M3UPI Special Egress Events", "Counters": "0-3", "Defn": "Count cases BL sends direct to core", "Desc": "D2C Sent", "EvSel": 43, "ExtSel": "", }, "M3UPI.D2U_SENT": { "Box": "M3UPI", "Category": "M3UPI Special Egress Events", "Counters": "0-3", "Defn": "Cases where SMI3 sends D2U command", "Desc": "D2U Sent", "EvSel": 42, "ExtSel": "", "Notes": "NOT required anymore", }, "M3UPI.DISTRESS_ASSERTED": { "Box": "M3UPI", "Category": "M3UPI Horizontal RING Events", "Defn": "Counts the number of cycles either the local or incoming distress signals are asserted.", "Desc": "Distress signal asserted", "EvSel": 175, "ExtSel": "", }, "M3UPI.DISTRESS_ASSERTED.DPT_NONLOCAL": { "Box": "M3UPI", "Category": "M3UPI Horizontal RING Events", "Defn": "Counts the number of cycles either the local or incoming distress signals are asserted.", "Desc": "Distress signal asserted", "EvSel": 175, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.DISTRESS_ASSERTED.VERT": { "Box": "M3UPI", "Category": "M3UPI Horizontal RING Events", "Defn": "Counts the number of cycles either the local or incoming distress signals are asserted.", "Desc": "Distress signal asserted", "EvSel": 175, "ExtSel": "", "Umask": "b00000001", }, "M3UPI.DISTRESS_ASSERTED.PMM_NONLOCAL": { "Box": "M3UPI", "Category": "M3UPI Horizontal RING Events", "Defn": "Counts the number of cycles either the local or incoming distress signals are asserted.", "Desc": "Distress signal asserted", "EvSel": 175, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.DISTRESS_ASSERTED.HORZ": { "Box": "M3UPI", "Category": "M3UPI Horizontal RING Events", "Defn": "Counts the number of cycles either the local or incoming distress signals are asserted.", "Desc": "Distress signal asserted", "EvSel": 175, "ExtSel": "", "Umask": "b00000010", }, "M3UPI.DISTRESS_ASSERTED.PMM_LOCAL": { "Box": "M3UPI", "Category": "M3UPI Horizontal RING Events", "Defn": "Counts the number of cycles either the local or incoming distress signals are asserted.", "Desc": "Distress signal asserted", "EvSel": 175, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.DISTRESS_ASSERTED.DPT_LOCAL": { "Box": "M3UPI", "Category": "M3UPI Horizontal RING Events", "Defn": "Counts the number of cycles either the local or incoming distress signals are asserted.", "Desc": "Distress signal asserted", "EvSel": 175, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.DISTRESS_ASSERTED.DPT_STALL_NOCRD": { "Box": "M3UPI", "Category": "M3UPI Horizontal RING Events", "Defn": "Counts the number of cycles either the local or incoming distress signals are asserted.", "Desc": "Distress signal asserted", "EvSel": 175, "ExtSel": "", "Umask": "b1xxxxxxx", }, "M3UPI.DISTRESS_ASSERTED.DPT_STALL_IV": { "Box": "M3UPI", "Category": "M3UPI Horizontal RING Events", "Defn": "Counts the number of cycles either the local or incoming distress signals are asserted.", "Desc": "Distress signal asserted", "EvSel": 175, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.EGRESS_ORDERING": { "Box": "M3UPI", "Category": "M3UPI Horizontal In Use RING Events", "Defn": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements", "Desc": "Egress Blocking due to Ordering requirements", "EvSel": 186, "ExtSel": "", }, "M3UPI.EGRESS_ORDERING.IV_SNOOPGO_UP": { "Box": "M3UPI", "Category": "M3UPI Horizontal In Use RING Events", "Defn": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements", "Desc": "Egress Blocking due to Ordering requirements", "EvSel": 186, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.EGRESS_ORDERING.IV_SNOOPGO_DN": { "Box": "M3UPI", "Category": "M3UPI Horizontal In Use RING Events", "Defn": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements", "Desc": "Egress Blocking due to Ordering requirements", "EvSel": 186, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.HORZ_RING_AD_IN_USE": { "Box": "M3UPI", "Category": "M3UPI Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AD Ring In Use", "EvSel": 182, "ExtSel": "", }, "M3UPI.HORZ_RING_AD_IN_USE.LEFT_ODD": { "Box": "M3UPI", "Category": "M3UPI Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AD Ring In Use", "EvSel": 182, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.HORZ_RING_AD_IN_USE.LEFT_EVEN": { "Box": "M3UPI", "Category": "M3UPI Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AD Ring In Use", "EvSel": 182, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.HORZ_RING_AD_IN_USE.RIGHT_EVEN": { "Box": "M3UPI", "Category": "M3UPI Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AD Ring In Use", "EvSel": 182, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.HORZ_RING_AD_IN_USE.RIGHT_ODD": { "Box": "M3UPI", "Category": "M3UPI Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AD Ring In Use", "EvSel": 182, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.HORZ_RING_AKC_IN_USE": { "Box": "M3UPI", "Category": "M3UPI Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 187, "ExtSel": "", }, "M3UPI.HORZ_RING_AKC_IN_USE.RIGHT_ODD": { "Box": "M3UPI", "Category": "M3UPI Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 187, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.HORZ_RING_AKC_IN_USE.LEFT_ODD": { "Box": "M3UPI", "Category": "M3UPI Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 187, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.HORZ_RING_AKC_IN_USE.LEFT_EVEN": { "Box": "M3UPI", "Category": "M3UPI Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 187, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.HORZ_RING_AKC_IN_USE.RIGHT_EVEN": { "Box": "M3UPI", "Category": "M3UPI Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 187, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.HORZ_RING_AK_IN_USE": { "Box": "M3UPI", "Category": "M3UPI Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 183, "ExtSel": "", }, "M3UPI.HORZ_RING_AK_IN_USE.RIGHT_ODD": { "Box": "M3UPI", "Category": "M3UPI Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 183, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.HORZ_RING_AK_IN_USE.LEFT_ODD": { "Box": "M3UPI", "Category": "M3UPI Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 183, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.HORZ_RING_AK_IN_USE.LEFT_EVEN": { "Box": "M3UPI", "Category": "M3UPI Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 183, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.HORZ_RING_AK_IN_USE.RIGHT_EVEN": { "Box": "M3UPI", "Category": "M3UPI Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 183, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.HORZ_RING_BL_IN_USE": { "Box": "M3UPI", "Category": "M3UPI Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal BL Ring in Use", "EvSel": 184, "ExtSel": "", }, "M3UPI.HORZ_RING_BL_IN_USE.RIGHT_EVEN": { "Box": "M3UPI", "Category": "M3UPI Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal BL Ring in Use", "EvSel": 184, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.HORZ_RING_BL_IN_USE.LEFT_ODD": { "Box": "M3UPI", "Category": "M3UPI Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal BL Ring in Use", "EvSel": 184, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.HORZ_RING_BL_IN_USE.LEFT_EVEN": { "Box": "M3UPI", "Category": "M3UPI Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal BL Ring in Use", "EvSel": 184, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.HORZ_RING_BL_IN_USE.RIGHT_ODD": { "Box": "M3UPI", "Category": "M3UPI Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal BL Ring in Use", "EvSel": 184, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.HORZ_RING_IV_IN_USE": { "Box": "M3UPI", "Category": "M3UPI Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "Horizontal IV Ring in Use", "EvSel": 185, "ExtSel": "", }, "M3UPI.HORZ_RING_IV_IN_USE.LEFT": { "Box": "M3UPI", "Category": "M3UPI Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "Horizontal IV Ring in Use", "EvSel": 185, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.HORZ_RING_IV_IN_USE.RIGHT": { "Box": "M3UPI", "Category": "M3UPI Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "Horizontal IV Ring in Use", "EvSel": 185, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.M2_BL_CREDITS_EMPTY": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No vn0 and vna credits available to send to M2", "Desc": "M2 BL Credits Empty", "EvSel": 35, "ExtSel": "", }, "M3UPI.M2_BL_CREDITS_EMPTY.IIO4_NCB": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No vn0 and vna credits available to send to M2", "Desc": "M2 BL Credits Empty", "EvSel": 35, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.M2_BL_CREDITS_EMPTY.IIO5_NCB": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No vn0 and vna credits available to send to M2", "Desc": "M2 BL Credits Empty", "EvSel": 35, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.M2_BL_CREDITS_EMPTY.IIO3_NCB": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No vn0 and vna credits available to send to M2", "Desc": "M2 BL Credits Empty", "EvSel": 35, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.M2_BL_CREDITS_EMPTY.NCS_SEL": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No vn0 and vna credits available to send to M2", "Desc": "M2 BL Credits Empty", "EvSel": 35, "ExtSel": "", "Umask": "b1xxxxxxx", }, "M3UPI.M2_BL_CREDITS_EMPTY.IIO1_NCB": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No vn0 and vna credits available to send to M2", "Desc": "M2 BL Credits Empty", "EvSel": 35, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.M2_BL_CREDITS_EMPTY.NCS": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No vn0 and vna credits available to send to M2", "Desc": "M2 BL Credits Empty", "EvSel": 35, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.M2_BL_CREDITS_EMPTY.UBOX_NCB": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No vn0 and vna credits available to send to M2", "Desc": "M2 BL Credits Empty", "EvSel": 35, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.M2_BL_CREDITS_EMPTY.IIO2_NCB": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No vn0 and vna credits available to send to M2", "Desc": "M2 BL Credits Empty", "EvSel": 35, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.MISC_EXTERNAL": { "Box": "M3UPI", "Category": "M3UPI External Misc Events (eg. From MS2IDI)", "Desc": "Miscellaneous Events (mostly from MS2IDI)", "EvSel": 230, "ExtSel": "", "Notes": "ONLY relevant to the CHA's CMS", }, "M3UPI.MISC_EXTERNAL.MBE_INST0": { "Box": "M3UPI", "Category": "M3UPI External Misc Events (eg. From MS2IDI)", "Desc": "Miscellaneous Events (mostly from MS2IDI)", "EvSel": 230, "ExtSel": "", "Notes": "ONLY relevant to the CHA's CMS", "Umask": "bxxxxxxx1", }, "M3UPI.MISC_EXTERNAL.MBE_INST1": { "Box": "M3UPI", "Category": "M3UPI External Misc Events (eg. From MS2IDI)", "Desc": "Miscellaneous Events (mostly from MS2IDI)", "EvSel": 230, "ExtSel": "", "Notes": "ONLY relevant to the CHA's CMS", "Umask": "bxxxxxx1x", }, "M3UPI.MULTI_SLOT_RCVD": { "Box": "M3UPI", "Category": "M3UPI Special Egress Events", "Counters": "0-3", "Defn": "Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)", "Desc": "Multi Slot Flit Received", "EvSel": 62, "ExtSel": "", "MaxIncCyc": 3, "Notes": "subevents added to 5b?", }, "M3UPI.MULTI_SLOT_RCVD.AK_SLOT2": { "Box": "M3UPI", "Category": "M3UPI Special Egress Events", "Counters": "0-3", "Defn": "Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)", "Desc": "Multi Slot Flit Received", "EvSel": 62, "ExtSel": "", "MaxIncCyc": 3, "Notes": "subevents added to 5b?", "Umask": "bxx1xxxxx", }, "M3UPI.MULTI_SLOT_RCVD.AD_SLOT0": { "Box": "M3UPI", "Category": "M3UPI Special Egress Events", "Counters": "0-3", "Defn": "Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)", "Desc": "Multi Slot Flit Received", "EvSel": 62, "ExtSel": "", "MaxIncCyc": 3, "Notes": "subevents added to 5b?", "Umask": "bxxxxxxx1", }, "M3UPI.MULTI_SLOT_RCVD.BL_SLOT0": { "Box": "M3UPI", "Category": "M3UPI Special Egress Events", "Counters": "0-3", "Defn": "Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)", "Desc": "Multi Slot Flit Received", "EvSel": 62, "ExtSel": "", "MaxIncCyc": 3, "Notes": "subevents added to 5b?", "Umask": "bxxxx1xxx", }, "M3UPI.MULTI_SLOT_RCVD.AD_SLOT2": { "Box": "M3UPI", "Category": "M3UPI Special Egress Events", "Counters": "0-3", "Defn": "Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)", "Desc": "Multi Slot Flit Received", "EvSel": 62, "ExtSel": "", "MaxIncCyc": 3, "Notes": "subevents added to 5b?", "Umask": "bxxxxx1xx", }, "M3UPI.MULTI_SLOT_RCVD.AK_SLOT0": { "Box": "M3UPI", "Category": "M3UPI Special Egress Events", "Counters": "0-3", "Defn": "Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)", "Desc": "Multi Slot Flit Received", "EvSel": 62, "ExtSel": "", "MaxIncCyc": 3, "Notes": "subevents added to 5b?", "Umask": "bxxx1xxxx", }, "M3UPI.MULTI_SLOT_RCVD.AD_SLOT1": { "Box": "M3UPI", "Category": "M3UPI Special Egress Events", "Counters": "0-3", "Defn": "Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)", "Desc": "Multi Slot Flit Received", "EvSel": 62, "ExtSel": "", "MaxIncCyc": 3, "Notes": "subevents added to 5b?", "Umask": "bxxxxxx1x", }, "M3UPI.RING_BOUNCES_HORZ": { "Box": "M3UPI", "Category": "M3UPI Horizontal RING Events", "Defn": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Horizontal Ring.", "EvSel": 172, "ExtSel": "", }, "M3UPI.RING_BOUNCES_HORZ.AK": { "Box": "M3UPI", "Category": "M3UPI Horizontal RING Events", "Defn": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Horizontal Ring.", "EvSel": 172, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RING_BOUNCES_HORZ.IV": { "Box": "M3UPI", "Category": "M3UPI Horizontal RING Events", "Defn": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Horizontal Ring.", "EvSel": 172, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RING_BOUNCES_HORZ.BL": { "Box": "M3UPI", "Category": "M3UPI Horizontal RING Events", "Defn": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Horizontal Ring.", "EvSel": 172, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RING_BOUNCES_HORZ.AD": { "Box": "M3UPI", "Category": "M3UPI Horizontal RING Events", "Defn": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Horizontal Ring.", "EvSel": 172, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RING_BOUNCES_VERT": { "Box": "M3UPI", "Category": "M3UPI Vertical RING Events", "Defn": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Vertical Ring.", "EvSel": 170, "ExtSel": "", "MaxIncCyc": 2, }, "M3UPI.RING_BOUNCES_VERT.AD": { "Box": "M3UPI", "Category": "M3UPI Vertical RING Events", "Defn": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Vertical Ring.", "EvSel": 170, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxxxxx1", }, "M3UPI.RING_BOUNCES_VERT.AK": { "Box": "M3UPI", "Category": "M3UPI Vertical RING Events", "Defn": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Vertical Ring.", "EvSel": 170, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxxxx1x", }, "M3UPI.RING_BOUNCES_VERT.AKC": { "Box": "M3UPI", "Category": "M3UPI Vertical RING Events", "Defn": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Vertical Ring.", "EvSel": 170, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxx1xxxx", }, "M3UPI.RING_BOUNCES_VERT.IV": { "Box": "M3UPI", "Category": "M3UPI Vertical RING Events", "Defn": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Vertical Ring.", "EvSel": 170, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxx1xxx", }, "M3UPI.RING_BOUNCES_VERT.BL": { "Box": "M3UPI", "Category": "M3UPI Vertical RING Events", "Defn": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Vertical Ring.", "EvSel": 170, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxxx1xx", }, "M3UPI.RING_SINK_STARVED_HORZ": { "Box": "M3UPI", "Category": "M3UPI Horizontal RING Events", "Desc": "Sink Starvation on Horizontal Ring", "EvSel": 173, "ExtSel": "", }, "M3UPI.RING_SINK_STARVED_HORZ.AD": { "Box": "M3UPI", "Category": "M3UPI Horizontal RING Events", "Desc": "Sink Starvation on Horizontal Ring", "EvSel": 173, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RING_SINK_STARVED_HORZ.AK_AG1": { "Box": "M3UPI", "Category": "M3UPI Horizontal RING Events", "Desc": "Sink Starvation on Horizontal Ring", "EvSel": 173, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RING_SINK_STARVED_HORZ.AK": { "Box": "M3UPI", "Category": "M3UPI Horizontal RING Events", "Desc": "Sink Starvation on Horizontal Ring", "EvSel": 173, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RING_SINK_STARVED_HORZ.IV": { "Box": "M3UPI", "Category": "M3UPI Horizontal RING Events", "Desc": "Sink Starvation on Horizontal Ring", "EvSel": 173, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RING_SINK_STARVED_HORZ.BL": { "Box": "M3UPI", "Category": "M3UPI Horizontal RING Events", "Desc": "Sink Starvation on Horizontal Ring", "EvSel": 173, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RING_SINK_STARVED_VERT": { "Box": "M3UPI", "Category": "M3UPI Vertical RING Events", "Desc": "Sink Starvation on Vertical Ring", "EvSel": 171, "ExtSel": "", }, "M3UPI.RING_SINK_STARVED_VERT.AKC": { "Box": "M3UPI", "Category": "M3UPI Vertical RING Events", "Desc": "Sink Starvation on Vertical Ring", "EvSel": 171, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RING_SINK_STARVED_VERT.AK": { "Box": "M3UPI", "Category": "M3UPI Vertical RING Events", "Desc": "Sink Starvation on Vertical Ring", "EvSel": 171, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RING_SINK_STARVED_VERT.IV": { "Box": "M3UPI", "Category": "M3UPI Vertical RING Events", "Desc": "Sink Starvation on Vertical Ring", "EvSel": 171, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RING_SINK_STARVED_VERT.BL": { "Box": "M3UPI", "Category": "M3UPI Vertical RING Events", "Desc": "Sink Starvation on Vertical Ring", "EvSel": 171, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RING_SINK_STARVED_VERT.AD": { "Box": "M3UPI", "Category": "M3UPI Vertical RING Events", "Desc": "Sink Starvation on Vertical Ring", "EvSel": 171, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RING_SRC_THRTL": { "Box": "M3UPI", "Category": "M3UPI Horizontal RING Events", "Desc": "Source Throttle", "EvSel": 174, "ExtSel": "", }, "M3UPI.RxC_ARB_LOST_VN0": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN0 message requested but lost arbitration", "Desc": "Lost Arb for VN0", "EvSel": 75, "ExtSel": "", }, "M3UPI.RxC_ARB_LOST_VN0.BL_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN0 message requested but lost arbitration", "Desc": "Lost Arb for VN0", "EvSel": 75, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_ARB_LOST_VN0.BL_NCS": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN0 message requested but lost arbitration", "Desc": "Lost Arb for VN0", "EvSel": 75, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.RxC_ARB_LOST_VN0.BL_WB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN0 message requested but lost arbitration", "Desc": "Lost Arb for VN0", "EvSel": 75, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_ARB_LOST_VN0.AD_REQ": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN0 message requested but lost arbitration", "Desc": "Lost Arb for VN0", "EvSel": 75, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_ARB_LOST_VN0.AD_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN0 message requested but lost arbitration", "Desc": "Lost Arb for VN0", "EvSel": 75, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_ARB_LOST_VN0.BL_NCB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN0 message requested but lost arbitration", "Desc": "Lost Arb for VN0", "EvSel": 75, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_ARB_LOST_VN0.AD_SNP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN0 message requested but lost arbitration", "Desc": "Lost Arb for VN0", "EvSel": 75, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_ARB_LOST_VN1": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN1 message requested but lost arbitration", "Desc": "Lost Arb for VN1", "EvSel": 76, "ExtSel": "", }, "M3UPI.RxC_ARB_LOST_VN1.AD_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN1 message requested but lost arbitration", "Desc": "Lost Arb for VN1", "EvSel": 76, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_ARB_LOST_VN1.AD_REQ": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN1 message requested but lost arbitration", "Desc": "Lost Arb for VN1", "EvSel": 76, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_ARB_LOST_VN1.BL_NCS": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN1 message requested but lost arbitration", "Desc": "Lost Arb for VN1", "EvSel": 76, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.RxC_ARB_LOST_VN1.BL_WB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN1 message requested but lost arbitration", "Desc": "Lost Arb for VN1", "EvSel": 76, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_ARB_LOST_VN1.BL_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN1 message requested but lost arbitration", "Desc": "Lost Arb for VN1", "EvSel": 76, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_ARB_LOST_VN1.AD_SNP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN1 message requested but lost arbitration", "Desc": "Lost Arb for VN1", "EvSel": 76, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_ARB_LOST_VN1.BL_NCB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN1 message requested but lost arbitration", "Desc": "Lost Arb for VN1", "EvSel": 76, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_ARB_MISC": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Desc": "Arb Miscellaneous", "EvSel": 77, "ExtSel": "", }, "M3UPI.RxC_ARB_MISC.ALL_PARALLEL_WIN": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Desc": "Arb Miscellaneous", "EvSel": 77, "ExtSel": "", "Umask": "b1xxxxxxx", }, "M3UPI.RxC_ARB_MISC.NO_PROG_AD_VN1": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Desc": "Arb Miscellaneous", "EvSel": 77, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_ARB_MISC.NO_PROG_AD_VN0": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Desc": "Arb Miscellaneous", "EvSel": 77, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_ARB_MISC.ADBL_PARALLEL_WIN_VN1": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Desc": "Arb Miscellaneous", "EvSel": 77, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_ARB_MISC.ADBL_PARALLEL_WIN_VN0": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Desc": "Arb Miscellaneous", "EvSel": 77, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_ARB_MISC.NO_PROG_BL_VN1": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Desc": "Arb Miscellaneous", "EvSel": 77, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_ARB_MISC.VN01_PARALLEL_WIN": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Desc": "Arb Miscellaneous", "EvSel": 77, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.RxC_ARB_MISC.NO_PROG_BL_VN0": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Desc": "Arb Miscellaneous", "EvSel": 77, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_ARB_NOCRD_VN0": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN0 message is blocked from requesting arbitration due to lack of remote UPI credits", "Desc": "No Credits to Arb for VN0", "EvSel": 71, "ExtSel": "", }, "M3UPI.RxC_ARB_NOCRD_VN0.AD_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN0 message is blocked from requesting arbitration due to lack of remote UPI credits", "Desc": "No Credits to Arb for VN0", "EvSel": 71, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_ARB_NOCRD_VN0.AD_REQ": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN0 message is blocked from requesting arbitration due to lack of remote UPI credits", "Desc": "No Credits to Arb for VN0", "EvSel": 71, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_ARB_NOCRD_VN0.BL_WB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN0 message is blocked from requesting arbitration due to lack of remote UPI credits", "Desc": "No Credits to Arb for VN0", "EvSel": 71, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_ARB_NOCRD_VN0.BL_NCS": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN0 message is blocked from requesting arbitration due to lack of remote UPI credits", "Desc": "No Credits to Arb for VN0", "EvSel": 71, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.RxC_ARB_NOCRD_VN0.BL_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN0 message is blocked from requesting arbitration due to lack of remote UPI credits", "Desc": "No Credits to Arb for VN0", "EvSel": 71, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_ARB_NOCRD_VN0.AD_SNP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN0 message is blocked from requesting arbitration due to lack of remote UPI credits", "Desc": "No Credits to Arb for VN0", "EvSel": 71, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_ARB_NOCRD_VN0.BL_NCB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN0 message is blocked from requesting arbitration due to lack of remote UPI credits", "Desc": "No Credits to Arb for VN0", "EvSel": 71, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_ARB_NOCRD_VN1": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN1 message is blocked from requesting arbitration due to lack of remote UPI credits", "Desc": "No Credits to Arb for VN1", "EvSel": 72, "ExtSel": "", }, "M3UPI.RxC_ARB_NOCRD_VN1.AD_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN1 message is blocked from requesting arbitration due to lack of remote UPI credits", "Desc": "No Credits to Arb for VN1", "EvSel": 72, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_ARB_NOCRD_VN1.AD_REQ": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN1 message is blocked from requesting arbitration due to lack of remote UPI credits", "Desc": "No Credits to Arb for VN1", "EvSel": 72, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_ARB_NOCRD_VN1.BL_NCS": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN1 message is blocked from requesting arbitration due to lack of remote UPI credits", "Desc": "No Credits to Arb for VN1", "EvSel": 72, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.RxC_ARB_NOCRD_VN1.BL_WB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN1 message is blocked from requesting arbitration due to lack of remote UPI credits", "Desc": "No Credits to Arb for VN1", "EvSel": 72, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_ARB_NOCRD_VN1.BL_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN1 message is blocked from requesting arbitration due to lack of remote UPI credits", "Desc": "No Credits to Arb for VN1", "EvSel": 72, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_ARB_NOCRD_VN1.AD_SNP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN1 message is blocked from requesting arbitration due to lack of remote UPI credits", "Desc": "No Credits to Arb for VN1", "EvSel": 72, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_ARB_NOCRD_VN1.BL_NCB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN1 message is blocked from requesting arbitration due to lack of remote UPI credits", "Desc": "No Credits to Arb for VN1", "EvSel": 72, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_ARB_NOREQ_VN0": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN0 message was not able to request arbitration while some other message won arbitration", "Desc": "Can't Arb for VN0", "EvSel": 73, "ExtSel": "", }, "M3UPI.RxC_ARB_NOREQ_VN0.BL_NCB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN0 message was not able to request arbitration while some other message won arbitration", "Desc": "Can't Arb for VN0", "EvSel": 73, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_ARB_NOREQ_VN0.AD_SNP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN0 message was not able to request arbitration while some other message won arbitration", "Desc": "Can't Arb for VN0", "EvSel": 73, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_ARB_NOREQ_VN0.BL_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN0 message was not able to request arbitration while some other message won arbitration", "Desc": "Can't Arb for VN0", "EvSel": 73, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_ARB_NOREQ_VN0.BL_NCS": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN0 message was not able to request arbitration while some other message won arbitration", "Desc": "Can't Arb for VN0", "EvSel": 73, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.RxC_ARB_NOREQ_VN0.BL_WB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN0 message was not able to request arbitration while some other message won arbitration", "Desc": "Can't Arb for VN0", "EvSel": 73, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_ARB_NOREQ_VN0.AD_REQ": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN0 message was not able to request arbitration while some other message won arbitration", "Desc": "Can't Arb for VN0", "EvSel": 73, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_ARB_NOREQ_VN0.AD_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN0 message was not able to request arbitration while some other message won arbitration", "Desc": "Can't Arb for VN0", "EvSel": 73, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_ARB_NOREQ_VN1": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN1 message was not able to request arbitration while some other message won arbitration", "Desc": "Can't Arb for VN1", "EvSel": 74, "ExtSel": "", }, "M3UPI.RxC_ARB_NOREQ_VN1.AD_SNP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN1 message was not able to request arbitration while some other message won arbitration", "Desc": "Can't Arb for VN1", "EvSel": 74, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_ARB_NOREQ_VN1.BL_NCB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN1 message was not able to request arbitration while some other message won arbitration", "Desc": "Can't Arb for VN1", "EvSel": 74, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_ARB_NOREQ_VN1.AD_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN1 message was not able to request arbitration while some other message won arbitration", "Desc": "Can't Arb for VN1", "EvSel": 74, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_ARB_NOREQ_VN1.AD_REQ": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN1 message was not able to request arbitration while some other message won arbitration", "Desc": "Can't Arb for VN1", "EvSel": 74, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_ARB_NOREQ_VN1.BL_WB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN1 message was not able to request arbitration while some other message won arbitration", "Desc": "Can't Arb for VN1", "EvSel": 74, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_ARB_NOREQ_VN1.BL_NCS": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN1 message was not able to request arbitration while some other message won arbitration", "Desc": "Can't Arb for VN1", "EvSel": 74, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.RxC_ARB_NOREQ_VN1.BL_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN1 message was not able to request arbitration while some other message won arbitration", "Desc": "Can't Arb for VN1", "EvSel": 74, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_BYPASSED": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Counters": "0-2", "Defn": "Number ot times message is bypassed around the Ingress Queue", "Desc": "Ingress Queue Bypasses", "EvSel": 64, "ExtSel": "", }, "M3UPI.RxC_BYPASSED.AD_S1_BL_SLOT": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Counters": "0-2", "Defn": "Number ot times message is bypassed around the Ingress Queue", "Desc": "Ingress Queue Bypasses", "EvSel": 64, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_BYPASSED.AD_S0_BL_ARB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Counters": "0-2", "Defn": "Number ot times message is bypassed around the Ingress Queue", "Desc": "Ingress Queue Bypasses", "EvSel": 64, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_BYPASSED.AD_S0_IDLE": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Counters": "0-2", "Defn": "Number ot times message is bypassed around the Ingress Queue", "Desc": "Ingress Queue Bypasses", "EvSel": 64, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_BYPASSED.AD_S2_BL_SLOT": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Counters": "0-2", "Defn": "Number ot times message is bypassed around the Ingress Queue", "Desc": "Ingress Queue Bypasses", "EvSel": 64, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_CRD_MISC": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "Desc": "Miscellaneous Credit Events", "EvSel": 95, "ExtSel": "", }, "M3UPI.RxC_CRD_MISC.ANY_BGF_PATH": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "Desc": "Miscellaneous Credit Events", "EvSel": 95, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_CRD_MISC.LT2_FOR_D2K": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "Desc": "Miscellaneous Credit Events", "EvSel": 95, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_CRD_MISC.ANY_BGF_FIFO": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "Desc": "Miscellaneous Credit Events", "EvSel": 95, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_CRD_MISC.LT1_FOR_D2K": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "Desc": "Miscellaneous Credit Events", "EvSel": 95, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_CRD_MISC.VN1_NO_D2K_FOR_ARB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "Desc": "Miscellaneous Credit Events", "EvSel": 95, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_CRD_MISC.VN0_NO_D2K_FOR_ARB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "Desc": "Miscellaneous Credit Events", "EvSel": 95, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_CRD_OCC": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "Desc": "Credit Occupancy", "EvSel": 96, "ExtSel": "", }, "M3UPI.RxC_CRD_OCC.D2K_CRD": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "Desc": "Credit Occupancy", "EvSel": 96, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_CRD_OCC.TxQ_CRD": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "Desc": "Credit Occupancy", "EvSel": 96, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_CRD_OCC.VNA_IN_USE": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "Desc": "Credit Occupancy", "EvSel": 96, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_CRD_OCC.P1P_FIFO": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "Desc": "Credit Occupancy", "EvSel": 96, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.RxC_CRD_OCC.P1P_TOTAL": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "Desc": "Credit Occupancy", "EvSel": 96, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_CRD_OCC.FLITS_IN_FIFO": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "Desc": "Credit Occupancy", "EvSel": 96, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_CRD_OCC.FLITS_IN_PATH": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "Desc": "Credit Occupancy", "EvSel": 96, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_CRD_OCC.CONSUMED": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "Desc": "Credit Occupancy", "EvSel": 96, "ExtSel": "", "Umask": "b1xxxxxxx", }, "M3UPI.RxC_CYCLES_NE_VN0": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN0 Ingress (from CMS) Queue - Cycles Not Empty", "EvSel": 67, "ExtSel": "", }, "M3UPI.RxC_CYCLES_NE_VN0.BL_NCB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN0 Ingress (from CMS) Queue - Cycles Not Empty", "EvSel": 67, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_CYCLES_NE_VN0.AD_SNP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN0 Ingress (from CMS) Queue - Cycles Not Empty", "EvSel": 67, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_CYCLES_NE_VN0.AD_REQ": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN0 Ingress (from CMS) Queue - Cycles Not Empty", "EvSel": 67, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_CYCLES_NE_VN0.AD_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN0 Ingress (from CMS) Queue - Cycles Not Empty", "EvSel": 67, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_CYCLES_NE_VN0.BL_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN0 Ingress (from CMS) Queue - Cycles Not Empty", "EvSel": 67, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_CYCLES_NE_VN0.BL_WB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN0 Ingress (from CMS) Queue - Cycles Not Empty", "EvSel": 67, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_CYCLES_NE_VN0.BL_NCS": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN0 Ingress (from CMS) Queue - Cycles Not Empty", "EvSel": 67, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.RxC_CYCLES_NE_VN1": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress (from CMS) Queue - Cycles Not Empty", "EvSel": 68, "ExtSel": "", }, "M3UPI.RxC_CYCLES_NE_VN1.AD_SNP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress (from CMS) Queue - Cycles Not Empty", "EvSel": 68, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_CYCLES_NE_VN1.BL_NCB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress (from CMS) Queue - Cycles Not Empty", "EvSel": 68, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_CYCLES_NE_VN1.BL_NCS": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress (from CMS) Queue - Cycles Not Empty", "EvSel": 68, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.RxC_CYCLES_NE_VN1.BL_WB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress (from CMS) Queue - Cycles Not Empty", "EvSel": 68, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_CYCLES_NE_VN1.BL_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress (from CMS) Queue - Cycles Not Empty", "EvSel": 68, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_CYCLES_NE_VN1.AD_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress (from CMS) Queue - Cycles Not Empty", "EvSel": 68, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_CYCLES_NE_VN1.AD_REQ": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress (from CMS) Queue - Cycles Not Empty", "EvSel": 68, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_DATA_FLITS_NOT_SENT": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "Data flit is ready for transmission but could not be sent", "Desc": "Data Flit Not Sent", "EvSel": 85, "ExtSel": "", }, "M3UPI.RxC_DATA_FLITS_NOT_SENT.NO_BGF": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "Data flit is ready for transmission but could not be sent", "Desc": "Data Flit Not Sent", "EvSel": 85, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_DATA_FLITS_NOT_SENT.NO_TXQ": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "Data flit is ready for transmission but could not be sent", "Desc": "Data Flit Not Sent", "EvSel": 85, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_DATA_FLITS_NOT_SENT.TSV_HI": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "Data flit is ready for transmission but could not be sent", "Desc": "Data Flit Not Sent", "EvSel": 85, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_DATA_FLITS_NOT_SENT.ALL": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "Data flit is ready for transmission but could not be sent", "Desc": "Data Flit Not Sent", "EvSel": 85, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_DATA_FLITS_NOT_SENT.VALID_FOR_FLIT": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "Data flit is ready for transmission but could not be sent", "Desc": "Data Flit Not Sent", "EvSel": 85, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_FLITS_GEN_BL": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Desc": "Generating BL Data Flit Sequence", "EvSel": 87, "ExtSel": "", }, "M3UPI.RxC_FLITS_GEN_BL.P1P_HOLD_P0": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Desc": "Generating BL Data Flit Sequence", "EvSel": 87, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_FLITS_GEN_BL.P1P_BUSY": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Desc": "Generating BL Data Flit Sequence", "EvSel": 87, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_FLITS_GEN_BL.P0_WAIT": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Desc": "Generating BL Data Flit Sequence", "EvSel": 87, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_FLITS_GEN_BL.P1P_AT_LIMIT": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Desc": "Generating BL Data Flit Sequence", "EvSel": 87, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_FLITS_GEN_BL.P1_WAIT": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Desc": "Generating BL Data Flit Sequence", "EvSel": 87, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_FLITS_GEN_BL.P1P_TO_LIMBO": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Desc": "Generating BL Data Flit Sequence", "EvSel": 87, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_FLITS_GEN_BL.P1P_FIFO_FULL": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Desc": "Generating BL Data Flit Sequence", "EvSel": 87, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.RxC_FLITS_MISC": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "EvSel": 88, "ExtSel": "", }, "M3UPI.RxC_FLITS_MISC.S2REQ_WITHDRAWN": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "EvSel": 88, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_FLITS_MISC.S2REQ_RECEIVED": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "EvSel": 88, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_FLITS_MISC.S2REQ_IN_SERVICE": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "EvSel": 88, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_FLITS_MISC.S2REQ_IN_HOLDOFF": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "EvSel": 88, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_FLITS_SLOT_BL": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Desc": "Slotting BL Message Into Header Flit", "EvSel": 86, "ExtSel": "", }, "M3UPI.RxC_FLITS_SLOT_BL.ALL": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Desc": "Slotting BL Message Into Header Flit", "EvSel": 86, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_FLITS_SLOT_BL.P1_NOT_REQ": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Desc": "Slotting BL Message Into Header Flit", "EvSel": 86, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_FLITS_SLOT_BL.P0_WAIT": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Desc": "Slotting BL Message Into Header Flit", "EvSel": 86, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_FLITS_SLOT_BL.P1_WAIT": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Desc": "Slotting BL Message Into Header Flit", "EvSel": 86, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_FLITS_SLOT_BL.P1_NOT_REQ_BUT_BUBBLE": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Desc": "Slotting BL Message Into Header Flit", "EvSel": 86, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_FLITS_SLOT_BL.NEED_DATA": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Desc": "Slotting BL Message Into Header Flit", "EvSel": 86, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_FLITS_SLOT_BL.P1_NOT_REQ_NOT_AVAIL": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Desc": "Slotting BL Message Into Header Flit", "EvSel": 86, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.RxC_FLIT_GEN_HDR1": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "Events related to Header Flit Generation - Set 1", "Desc": "Flit Gen - Header 1", "EvSel": 81, "ExtSel": "", }, "M3UPI.RxC_FLIT_GEN_HDR1.ACCUM_WASTED": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "Events related to Header Flit Generation - Set 1", "Desc": "Flit Gen - Header 1", "EvSel": 81, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_FLIT_GEN_HDR1.AHEAD_MSG2_SENT": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "Events related to Header Flit Generation - Set 1", "Desc": "Flit Gen - Header 1", "EvSel": 81, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.RxC_FLIT_GEN_HDR1.AHEAD_MSG1_DURING": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "Events related to Header Flit Generation - Set 1", "Desc": "Flit Gen - Header 1", "EvSel": 81, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_FLIT_GEN_HDR1.AHEAD_MSG2_AFTER": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "Events related to Header Flit Generation - Set 1", "Desc": "Flit Gen - Header 1", "EvSel": 81, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_FLIT_GEN_HDR1.ACCUM": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "Events related to Header Flit Generation - Set 1", "Desc": "Flit Gen - Header 1", "EvSel": 81, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_FLIT_GEN_HDR1.ACCUM_READ": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "Events related to Header Flit Generation - Set 1", "Desc": "Flit Gen - Header 1", "EvSel": 81, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_FLIT_GEN_HDR1.AHEAD_MSG1_AFTER": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "Events related to Header Flit Generation - Set 1", "Desc": "Flit Gen - Header 1", "EvSel": 81, "ExtSel": "", "Umask": "b1xxxxxxx", }, "M3UPI.RxC_FLIT_GEN_HDR1.AHEAD_BLOCKED": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "Events related to Header Flit Generation - Set 1", "Desc": "Flit Gen - Header 1", "EvSel": 81, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_FLIT_GEN_HDR2": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "Events related to Header Flit Generation - Set 2", "Desc": "Flit Gen - Header 2", "EvSel": 82, "ExtSel": "", }, "M3UPI.RxC_FLIT_GEN_HDR2.PAR": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "Events related to Header Flit Generation - Set 2", "Desc": "Flit Gen - Header 2", "EvSel": 82, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_FLIT_GEN_HDR2.PAR_MSG": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "Events related to Header Flit Generation - Set 2", "Desc": "Flit Gen - Header 2", "EvSel": 82, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_FLIT_GEN_HDR2.PAR_FLIT": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "Events related to Header Flit Generation - Set 2", "Desc": "Flit Gen - Header 2", "EvSel": 82, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_FLIT_GEN_HDR2.RMSTALL_NOMSG": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "Events related to Header Flit Generation - Set 2", "Desc": "Flit Gen - Header 2", "EvSel": 82, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_FLIT_GEN_HDR2.RMSTALL": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "Events related to Header Flit Generation - Set 2", "Desc": "Flit Gen - Header 2", "EvSel": 82, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_HDR_FLITS_SENT": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Desc": "Sent Header Flit", "EvSel": 84, "ExtSel": "", }, "M3UPI.RxC_HDR_FLITS_SENT.1_MSG": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Desc": "Sent Header Flit", "EvSel": 84, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_HDR_FLITS_SENT.2_MSGS": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Desc": "Sent Header Flit", "EvSel": 84, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_HDR_FLITS_SENT.1_MSG_VNX": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Desc": "Sent Header Flit", "EvSel": 84, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_HDR_FLITS_SENT.SLOTS_3": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Desc": "Sent Header Flit", "EvSel": 84, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.RxC_HDR_FLITS_SENT.SLOTS_2": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Desc": "Sent Header Flit", "EvSel": 84, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_HDR_FLITS_SENT.3_MSGS": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Desc": "Sent Header Flit", "EvSel": 84, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_HDR_FLITS_SENT.SLOTS_1": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Desc": "Sent Header Flit", "EvSel": 84, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_HDR_FLIT_NOT_SENT": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "header flit is ready for transmission but could not be sent", "Desc": "Header Not Sent", "EvSel": 83, "ExtSel": "", }, "M3UPI.RxC_HDR_FLIT_NOT_SENT.ALL": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "header flit is ready for transmission but could not be sent", "Desc": "Header Not Sent", "EvSel": 83, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_HDR_FLIT_NOT_SENT.NO_TXQ_NO_MSG": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "header flit is ready for transmission but could not be sent", "Desc": "Header Not Sent", "EvSel": 83, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.RxC_HDR_FLIT_NOT_SENT.VALID_FOR_FLIT": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "header flit is ready for transmission but could not be sent", "Desc": "Header Not Sent", "EvSel": 83, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_HDR_FLIT_NOT_SENT.NO_BGF_NO_MSG": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "header flit is ready for transmission but could not be sent", "Desc": "Header Not Sent", "EvSel": 83, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_HDR_FLIT_NOT_SENT.NO_BGF_CRD": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "header flit is ready for transmission but could not be sent", "Desc": "Header Not Sent", "EvSel": 83, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_HDR_FLIT_NOT_SENT.TSV_HI": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "header flit is ready for transmission but could not be sent", "Desc": "Header Not Sent", "EvSel": 83, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_HDR_FLIT_NOT_SENT.NO_TXQ_CRD": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "header flit is ready for transmission but could not be sent", "Desc": "Header Not Sent", "EvSel": 83, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_HELD": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Desc": "Message Held", "EvSel": 80, "ExtSel": "", }, "M3UPI.RxC_HELD.VN0": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Desc": "Message Held", "EvSel": 80, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_HELD.CANT_SLOT_AD": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Desc": "Message Held", "EvSel": 80, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_HELD.VN1": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Desc": "Message Held", "EvSel": 80, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_HELD.PARALLEL_SUCCESS": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Desc": "Message Held", "EvSel": 80, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_HELD.PARALLEL_ATTEMPT": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Desc": "Message Held", "EvSel": 80, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_HELD.CANT_SLOT_BL": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Desc": "Message Held", "EvSel": 80, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_INSERTS_VN0": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN0 Ingress (from CMS) Queue - Inserts", "EvSel": 65, "ExtSel": "", "Notes": "subevents added to 5b?", }, "M3UPI.RxC_INSERTS_VN0.BL_NCS": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN0 Ingress (from CMS) Queue - Inserts", "EvSel": 65, "ExtSel": "", "Notes": "subevents added to 5b?", "Umask": "bx1xxxxxx", }, "M3UPI.RxC_INSERTS_VN0.BL_WB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN0 Ingress (from CMS) Queue - Inserts", "EvSel": 65, "ExtSel": "", "Notes": "subevents added to 5b?", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_INSERTS_VN0.BL_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN0 Ingress (from CMS) Queue - Inserts", "EvSel": 65, "ExtSel": "", "Notes": "subevents added to 5b?", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_INSERTS_VN0.AD_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN0 Ingress (from CMS) Queue - Inserts", "EvSel": 65, "ExtSel": "", "Notes": "subevents added to 5b?", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_INSERTS_VN0.AD_REQ": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN0 Ingress (from CMS) Queue - Inserts", "EvSel": 65, "ExtSel": "", "Notes": "subevents added to 5b?", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_INSERTS_VN0.AD_SNP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN0 Ingress (from CMS) Queue - Inserts", "EvSel": 65, "ExtSel": "", "Notes": "subevents added to 5b?", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_INSERTS_VN0.BL_NCB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN0 Ingress (from CMS) Queue - Inserts", "EvSel": 65, "ExtSel": "", "Notes": "subevents added to 5b?", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_INSERTS_VN1": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress (from CMS) Queue - Inserts", "EvSel": 66, "ExtSel": "", "Notes": "subevents added to 5b?", }, "M3UPI.RxC_INSERTS_VN1.BL_NCB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress (from CMS) Queue - Inserts", "EvSel": 66, "ExtSel": "", "Notes": "subevents added to 5b?", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_INSERTS_VN1.AD_SNP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress (from CMS) Queue - Inserts", "EvSel": 66, "ExtSel": "", "Notes": "subevents added to 5b?", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_INSERTS_VN1.BL_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress (from CMS) Queue - Inserts", "EvSel": 66, "ExtSel": "", "Notes": "subevents added to 5b?", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_INSERTS_VN1.BL_WB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress (from CMS) Queue - Inserts", "EvSel": 66, "ExtSel": "", "Notes": "subevents added to 5b?", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_INSERTS_VN1.BL_NCS": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress (from CMS) Queue - Inserts", "EvSel": 66, "ExtSel": "", "Notes": "subevents added to 5b?", "Umask": "bx1xxxxxx", }, "M3UPI.RxC_INSERTS_VN1.AD_REQ": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress (from CMS) Queue - Inserts", "EvSel": 66, "ExtSel": "", "Notes": "subevents added to 5b?", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_INSERTS_VN1.AD_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress (from CMS) Queue - Inserts", "EvSel": 66, "ExtSel": "", "Notes": "subevents added to 5b?", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_OCCUPANCY_VN0": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.", "Desc": "VN0 Ingress (from CMS) Queue - Occupancy", "EvSel": 69, "ExtSel": "", }, "M3UPI.RxC_OCCUPANCY_VN0.AD_SNP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.", "Desc": "VN0 Ingress (from CMS) Queue - Occupancy", "EvSel": 69, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_OCCUPANCY_VN0.BL_NCB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.", "Desc": "VN0 Ingress (from CMS) Queue - Occupancy", "EvSel": 69, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_OCCUPANCY_VN0.BL_NCS": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.", "Desc": "VN0 Ingress (from CMS) Queue - Occupancy", "EvSel": 69, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.RxC_OCCUPANCY_VN0.BL_WB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.", "Desc": "VN0 Ingress (from CMS) Queue - Occupancy", "EvSel": 69, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_OCCUPANCY_VN0.BL_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.", "Desc": "VN0 Ingress (from CMS) Queue - Occupancy", "EvSel": 69, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_OCCUPANCY_VN0.AD_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.", "Desc": "VN0 Ingress (from CMS) Queue - Occupancy", "EvSel": 69, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_OCCUPANCY_VN0.AD_REQ": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.", "Desc": "VN0 Ingress (from CMS) Queue - Occupancy", "EvSel": 69, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_OCCUPANCY_VN1": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.", "Desc": "VN1 Ingress (from CMS) Queue - Occupancy", "EvSel": 70, "ExtSel": "", }, "M3UPI.RxC_OCCUPANCY_VN1.BL_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.", "Desc": "VN1 Ingress (from CMS) Queue - Occupancy", "EvSel": 70, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_OCCUPANCY_VN1.BL_WB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.", "Desc": "VN1 Ingress (from CMS) Queue - Occupancy", "EvSel": 70, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_OCCUPANCY_VN1.BL_NCS": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.", "Desc": "VN1 Ingress (from CMS) Queue - Occupancy", "EvSel": 70, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.RxC_OCCUPANCY_VN1.AD_REQ": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.", "Desc": "VN1 Ingress (from CMS) Queue - Occupancy", "EvSel": 70, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_OCCUPANCY_VN1.AD_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.", "Desc": "VN1 Ingress (from CMS) Queue - Occupancy", "EvSel": 70, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_OCCUPANCY_VN1.BL_NCB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.", "Desc": "VN1 Ingress (from CMS) Queue - Occupancy", "EvSel": 70, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_OCCUPANCY_VN1.AD_SNP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.", "Desc": "VN1 Ingress (from CMS) Queue - Occupancy", "EvSel": 70, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_PACKING_MISS_VN0": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Defn": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.", "Desc": "VN0 message can't slot into flit", "EvSel": 78, "ExtSel": "", }, "M3UPI.RxC_PACKING_MISS_VN0.AD_SNP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Defn": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.", "Desc": "VN0 message can't slot into flit", "EvSel": 78, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_PACKING_MISS_VN0.BL_NCB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Defn": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.", "Desc": "VN0 message can't slot into flit", "EvSel": 78, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_PACKING_MISS_VN0.AD_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Defn": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.", "Desc": "VN0 message can't slot into flit", "EvSel": 78, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_PACKING_MISS_VN0.AD_REQ": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Defn": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.", "Desc": "VN0 message can't slot into flit", "EvSel": 78, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_PACKING_MISS_VN0.BL_WB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Defn": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.", "Desc": "VN0 message can't slot into flit", "EvSel": 78, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_PACKING_MISS_VN0.BL_NCS": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Defn": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.", "Desc": "VN0 message can't slot into flit", "EvSel": 78, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.RxC_PACKING_MISS_VN0.BL_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Defn": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.", "Desc": "VN0 message can't slot into flit", "EvSel": 78, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_PACKING_MISS_VN1": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Defn": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.", "Desc": "VN1 message can't slot into flit", "EvSel": 79, "ExtSel": "", }, "M3UPI.RxC_PACKING_MISS_VN1.BL_NCS": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Defn": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.", "Desc": "VN1 message can't slot into flit", "EvSel": 79, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.RxC_PACKING_MISS_VN1.BL_WB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Defn": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.", "Desc": "VN1 message can't slot into flit", "EvSel": 79, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_PACKING_MISS_VN1.BL_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Defn": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.", "Desc": "VN1 message can't slot into flit", "EvSel": 79, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_PACKING_MISS_VN1.AD_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Defn": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.", "Desc": "VN1 message can't slot into flit", "EvSel": 79, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_PACKING_MISS_VN1.AD_REQ": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Defn": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.", "Desc": "VN1 message can't slot into flit", "EvSel": 79, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_PACKING_MISS_VN1.AD_SNP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Defn": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.", "Desc": "VN1 message can't slot into flit", "EvSel": 79, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_PACKING_MISS_VN1.BL_NCB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Defn": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.", "Desc": "VN1 message can't slot into flit", "EvSel": 79, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_VNA_CRD": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "Desc": "Remote VNA Credits", "EvSel": 90, "ExtSel": "", }, "M3UPI.RxC_VNA_CRD.LT10": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "Desc": "Remote VNA Credits", "EvSel": 90, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_VNA_CRD.ANY_IN_USE": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "Desc": "Remote VNA Credits", "EvSel": 90, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_VNA_CRD.CORRECTED": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "Desc": "Remote VNA Credits", "EvSel": 90, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_VNA_CRD.LT1": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "Desc": "Remote VNA Credits", "EvSel": 90, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_VNA_CRD.LT5": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "Desc": "Remote VNA Credits", "EvSel": 90, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_VNA_CRD.LT4": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "Desc": "Remote VNA Credits", "EvSel": 90, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_VNA_CRD_MISC": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "EvSel": 89, "ExtSel": "", }, "M3UPI.RxC_VNA_CRD_MISC.VN0_JUST_AD": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "EvSel": 89, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_VNA_CRD_MISC.VN1_JUST_AD": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "EvSel": 89, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.RxC_VNA_CRD_MISC.VN1_JUST_BL": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "EvSel": 89, "ExtSel": "", "Umask": "b1xxxxxxx", }, "M3UPI.RxC_VNA_CRD_MISC.VN0_JUST_BL": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "EvSel": 89, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_VNA_CRD_MISC.VN1_ONLY": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "EvSel": 89, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_VNA_CRD_MISC.REQ_VN01_ALLOC_LT10": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "EvSel": 89, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_VNA_CRD_MISC.REQ_ADBL_ALLOC_L5": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "EvSel": 89, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_VNA_CRD_MISC.VN0_ONLY": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "EvSel": 89, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxR_BUSY_STARVED": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority", "Desc": "Transgress Injection Starvation", "EvSel": 229, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. Selecting multiple ring types NOT supported", }, "M3UPI.RxR_BUSY_STARVED.BL_UNCRD": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority", "Desc": "Transgress Injection Starvation", "EvSel": 229, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. Selecting multiple ring types NOT supported", "Umask": "b00000100", }, "M3UPI.RxR_BUSY_STARVED.BL_CRD": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority", "Desc": "Transgress Injection Starvation", "EvSel": 229, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. Selecting multiple ring types NOT supported", "Umask": "b01000000", }, "M3UPI.RxR_BUSY_STARVED.AD_ALL": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority", "Desc": "Transgress Injection Starvation", "EvSel": 229, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. Selecting multiple ring types NOT supported", "Umask": "b00010001", }, "M3UPI.RxR_BUSY_STARVED.BL_ALL": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority", "Desc": "Transgress Injection Starvation", "EvSel": 229, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. Selecting multiple ring types NOT supported", "Umask": "b01000100", }, "M3UPI.RxR_BUSY_STARVED.AD_CRD": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority", "Desc": "Transgress Injection Starvation", "EvSel": 229, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. Selecting multiple ring types NOT supported", "Umask": "b00010000", }, "M3UPI.RxR_BUSY_STARVED.AD_UNCRD": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority", "Desc": "Transgress Injection Starvation", "EvSel": 229, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. Selecting multiple ring types NOT supported", "Umask": "b00000001", }, "M3UPI.RxR_BYPASS": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 226, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", }, "M3UPI.RxR_BYPASS.AD_UNCRD": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 226, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00000001", }, "M3UPI.RxR_BYPASS.IV": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 226, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00001000", }, "M3UPI.RxR_BYPASS.AKC_UNCRD": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 226, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b10000000", }, "M3UPI.RxR_BYPASS.BL_UNCRD": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 226, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00000100", }, "M3UPI.RxR_BYPASS.BL_ALL": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 226, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b01000100", }, "M3UPI.RxR_BYPASS.AD_CRD": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 226, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00010000", }, "M3UPI.RxR_BYPASS.BL_CRD": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 226, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b01000000", }, "M3UPI.RxR_BYPASS.AK": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 226, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00000010", }, "M3UPI.RxR_BYPASS.AD_ALL": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 226, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00010001", }, "M3UPI.RxR_CRD_STARVED": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 227, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", }, "M3UPI.RxR_CRD_STARVED.IFV": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 227, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "b10000000", }, "M3UPI.RxR_CRD_STARVED.AK": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 227, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "b00000010", }, "M3UPI.RxR_CRD_STARVED.AD_ALL": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 227, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "b00010001", }, "M3UPI.RxR_CRD_STARVED.BL_CRD": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 227, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "b01000000", }, "M3UPI.RxR_CRD_STARVED.AD_CRD": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 227, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "b00010000", }, "M3UPI.RxR_CRD_STARVED.BL_ALL": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 227, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "b01000100", }, "M3UPI.RxR_CRD_STARVED.BL_UNCRD": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 227, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "b00000100", }, "M3UPI.RxR_CRD_STARVED.IV": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 227, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "b00001000", }, "M3UPI.RxR_CRD_STARVED.AD_UNCRD": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 227, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "b00000001", }, "M3UPI.RxR_CRD_STARVED_1": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 228, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", }, "M3UPI.RxR_INSERTS": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 225, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", }, "M3UPI.RxR_INSERTS.AD_ALL": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 225, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00010001", }, "M3UPI.RxR_INSERTS.AK": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 225, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00000010", }, "M3UPI.RxR_INSERTS.BL_CRD": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 225, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b01000000", }, "M3UPI.RxR_INSERTS.AD_CRD": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 225, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00010000", }, "M3UPI.RxR_INSERTS.BL_ALL": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 225, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b01000100", }, "M3UPI.RxR_INSERTS.IV": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 225, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00001000", }, "M3UPI.RxR_INSERTS.AKC_UNCRD": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 225, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b10000000", }, "M3UPI.RxR_INSERTS.BL_UNCRD": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 225, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00000100", }, "M3UPI.RxR_INSERTS.AD_UNCRD": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 225, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00000001", }, "M3UPI.RxR_OCCUPANCY": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 224, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", }, "M3UPI.RxR_OCCUPANCY.BL_UNCRD": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 224, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00000100", }, "M3UPI.RxR_OCCUPANCY.AKC_UNCRD": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 224, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b10000000", }, "M3UPI.RxR_OCCUPANCY.IV": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 224, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00001000", }, "M3UPI.RxR_OCCUPANCY.AD_UNCRD": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 224, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00000001", }, "M3UPI.RxR_OCCUPANCY.AK": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 224, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00000010", }, "M3UPI.RxR_OCCUPANCY.AD_ALL": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 224, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00010001", }, "M3UPI.RxR_OCCUPANCY.BL_CRD": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 224, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00100000", }, "M3UPI.RxR_OCCUPANCY.AD_CRD": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 224, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b00010000", }, "M3UPI.RxR_OCCUPANCY.BL_ALL": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 224, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "b01000100", }, "M3UPI.STALL0_NO_TxR_HORZ_CRD_AD_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", }, "M3UPI.STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR4": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR5": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR6": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR7": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "b1xxxxxxx", }, "M3UPI.STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR3": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR0": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR1": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR2": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.STALL0_NO_TxR_HORZ_CRD_AD_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", }, "M3UPI.STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR2": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR1": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR0": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR7": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "b1xxxxxxx", }, "M3UPI.STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR3": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR6": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR4": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR5": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.STALL0_NO_TxR_HORZ_CRD_BL_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", }, "M3UPI.STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR6": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR7": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "b1xxxxxxx", }, "M3UPI.STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR3": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR5": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR4": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR2": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR0": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR1": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.STALL0_NO_TxR_HORZ_CRD_BL_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", }, "M3UPI.STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR2": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR1": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR0": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR7": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "b1xxxxxxx", }, "M3UPI.STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR3": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR6": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR5": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR4": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.STALL1_NO_TxR_HORZ_CRD_AD_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 209, "ExtSel": "", "Notes": "Extension not used by ICX.", }, "M3UPI.STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR8": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 209, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxxxx1", }, "M3UPI.STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR9": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 209, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxxx1x", }, "M3UPI.STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR10": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 209, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxx1xx", }, "M3UPI.STALL1_NO_TxR_HORZ_CRD_AD_AG1_1": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 211, "ExtSel": "", "Notes": "Extension not used by ICX.", }, "M3UPI.STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR8": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 211, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxxxx1", }, "M3UPI.STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR10": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 211, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxx1xx", }, "M3UPI.STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR9": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 211, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxxx1x", }, "M3UPI.STALL1_NO_TxR_HORZ_CRD_BL_AG0_1": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 213, "ExtSel": "", "Notes": "Extension not used by ICX.", }, "M3UPI.STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR9": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 213, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxxx1x", }, "M3UPI.STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR10": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 213, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxx1xx", }, "M3UPI.STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR8": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 213, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxxxx1", }, "M3UPI.STALL1_NO_TxR_HORZ_CRD_BL_AG1_1": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 215, "ExtSel": "", "Notes": "Extension not used by ICX.", }, "M3UPI.STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR8": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 215, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxxxx1", }, "M3UPI.STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR9": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 215, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxxx1x", }, "M3UPI.STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR10": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 215, "ExtSel": "", "Notes": "Extension not used by ICX.", "Umask": "bxxxxx1xx", }, "M3UPI.TxC_AD_ARB_FAIL": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "AD arb but no win; arb request asserted but not won", "Desc": "Failed ARB for AD", "EvSel": 48, "ExtSel": "", "Notes": "subevents added to 5b?", }, "M3UPI.TxC_AD_ARB_FAIL.VN1_REQ": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "AD arb but no win; arb request asserted but not won", "Desc": "Failed ARB for AD", "EvSel": 48, "ExtSel": "", "Notes": "subevents added to 5b?", "Umask": "bxxx1xxxx", }, "M3UPI.TxC_AD_ARB_FAIL.VN0_WB": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "AD arb but no win; arb request asserted but not won", "Desc": "Failed ARB for AD", "EvSel": 48, "ExtSel": "", "Notes": "subevents added to 5b?", "Umask": "bxxxx1xxx", }, "M3UPI.TxC_AD_ARB_FAIL.VN1_SNP": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "AD arb but no win; arb request asserted but not won", "Desc": "Failed ARB for AD", "EvSel": 48, "ExtSel": "", "Notes": "subevents added to 5b?", "Umask": "bxx1xxxxx", }, "M3UPI.TxC_AD_ARB_FAIL.VN0_RSP": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "AD arb but no win; arb request asserted but not won", "Desc": "Failed ARB for AD", "EvSel": 48, "ExtSel": "", "Notes": "subevents added to 5b?", "Umask": "bxxxxx1xx", }, "M3UPI.TxC_AD_ARB_FAIL.VN0_REQ": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "AD arb but no win; arb request asserted but not won", "Desc": "Failed ARB for AD", "EvSel": 48, "ExtSel": "", "Notes": "subevents added to 5b?", "Umask": "bxxxxxxx1", }, "M3UPI.TxC_AD_ARB_FAIL.VN1_WB": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "AD arb but no win; arb request asserted but not won", "Desc": "Failed ARB for AD", "EvSel": 48, "ExtSel": "", "Notes": "subevents added to 5b?", "Umask": "b1xxxxxxx", }, "M3UPI.TxC_AD_ARB_FAIL.VN0_SNP": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "AD arb but no win; arb request asserted but not won", "Desc": "Failed ARB for AD", "EvSel": 48, "ExtSel": "", "Notes": "subevents added to 5b?", "Umask": "bxxxxxx1x", }, "M3UPI.TxC_AD_ARB_FAIL.VN1_RSP": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "AD arb but no win; arb request asserted but not won", "Desc": "Failed ARB for AD", "EvSel": 48, "ExtSel": "", "Notes": "subevents added to 5b?", "Umask": "bx1xxxxxx", }, "M3UPI.TxC_AD_FLQ_BYPASS": { "Box": "M3UPI", "Category": "M3UPI Special Egress Events", "Counters": "0-3", "Defn": "Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)", "Desc": "AD FlowQ Bypass", "EvSel": 44, "ExtSel": "", }, "M3UPI.TxC_AD_FLQ_BYPASS.AD_SLOT0": { "Box": "M3UPI", "Category": "M3UPI Special Egress Events", "Counters": "0-3", "Defn": "Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)", "Desc": "AD FlowQ Bypass", "EvSel": 44, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.TxC_AD_FLQ_BYPASS.BL_EARLY_RSP": { "Box": "M3UPI", "Category": "M3UPI Special Egress Events", "Counters": "0-3", "Defn": "Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)", "Desc": "AD FlowQ Bypass", "EvSel": 44, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.TxC_AD_FLQ_BYPASS.AD_SLOT1": { "Box": "M3UPI", "Category": "M3UPI Special Egress Events", "Counters": "0-3", "Defn": "Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)", "Desc": "AD FlowQ Bypass", "EvSel": 44, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.TxC_AD_FLQ_BYPASS.AD_SLOT2": { "Box": "M3UPI", "Category": "M3UPI Special Egress Events", "Counters": "0-3", "Defn": "Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)", "Desc": "AD FlowQ Bypass", "EvSel": 44, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.TxC_AD_FLQ_CYCLES_NE": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Number of cycles the AD Egress queue is Not Empty", "Desc": "AD Flow Q Not Empty", "EvSel": 39, "ExtSel": "", "Notes": "Counts the number of cycles when the QPI FlowQ is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue occupancy. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", }, "M3UPI.TxC_AD_FLQ_CYCLES_NE.VN1_RSP": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Number of cycles the AD Egress queue is Not Empty", "Desc": "AD Flow Q Not Empty", "EvSel": 39, "ExtSel": "", "Notes": "Counts the number of cycles when the QPI FlowQ is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue occupancy. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Umask": "bx1xxxxxx", }, "M3UPI.TxC_AD_FLQ_CYCLES_NE.VN0_SNP": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Number of cycles the AD Egress queue is Not Empty", "Desc": "AD Flow Q Not Empty", "EvSel": 39, "ExtSel": "", "Notes": "Counts the number of cycles when the QPI FlowQ is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue occupancy. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Umask": "bxxxxxx1x", }, "M3UPI.TxC_AD_FLQ_CYCLES_NE.VN1_WB": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Number of cycles the AD Egress queue is Not Empty", "Desc": "AD Flow Q Not Empty", "EvSel": 39, "ExtSel": "", "Notes": "Counts the number of cycles when the QPI FlowQ is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue occupancy. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Umask": "b1xxxxxxx", }, "M3UPI.TxC_AD_FLQ_CYCLES_NE.VN0_REQ": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Number of cycles the AD Egress queue is Not Empty", "Desc": "AD Flow Q Not Empty", "EvSel": 39, "ExtSel": "", "Notes": "Counts the number of cycles when the QPI FlowQ is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue occupancy. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Umask": "bxxxxxxx1", }, "M3UPI.TxC_AD_FLQ_CYCLES_NE.VN0_RSP": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Number of cycles the AD Egress queue is Not Empty", "Desc": "AD Flow Q Not Empty", "EvSel": 39, "ExtSel": "", "Notes": "Counts the number of cycles when the QPI FlowQ is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue occupancy. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Umask": "bxxxxx1xx", }, "M3UPI.TxC_AD_FLQ_CYCLES_NE.VN1_SNP": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Number of cycles the AD Egress queue is Not Empty", "Desc": "AD Flow Q Not Empty", "EvSel": 39, "ExtSel": "", "Notes": "Counts the number of cycles when the QPI FlowQ is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue occupancy. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Umask": "bxx1xxxxx", }, "M3UPI.TxC_AD_FLQ_CYCLES_NE.VN0_WB": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Number of cycles the AD Egress queue is Not Empty", "Desc": "AD Flow Q Not Empty", "EvSel": 39, "ExtSel": "", "Notes": "Counts the number of cycles when the QPI FlowQ is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue occupancy. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Umask": "bxxxx1xxx", }, "M3UPI.TxC_AD_FLQ_CYCLES_NE.VN1_REQ": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Number of cycles the AD Egress queue is Not Empty", "Desc": "AD Flow Q Not Empty", "EvSel": 39, "ExtSel": "", "Notes": "Counts the number of cycles when the QPI FlowQ is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue occupancy. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Umask": "bxxx1xxxx", }, "M3UPI.TxC_AD_FLQ_INSERTS": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "AD Flow Q Inserts", "EvSel": 45, "ExtSel": "", "MaxIncCyc": 3, }, "M3UPI.TxC_AD_FLQ_INSERTS.VN0_WB": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "AD Flow Q Inserts", "EvSel": 45, "ExtSel": "", "MaxIncCyc": 3, "Umask": "bxxxx1xxx", }, "M3UPI.TxC_AD_FLQ_INSERTS.VN1_SNP": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "AD Flow Q Inserts", "EvSel": 45, "ExtSel": "", "MaxIncCyc": 3, "Umask": "bxx1xxxxx", }, "M3UPI.TxC_AD_FLQ_INSERTS.VN1_REQ": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "AD Flow Q Inserts", "EvSel": 45, "ExtSel": "", "MaxIncCyc": 3, "Umask": "bxxx1xxxx", }, "M3UPI.TxC_AD_FLQ_INSERTS.VN0_RSP": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "AD Flow Q Inserts", "EvSel": 45, "ExtSel": "", "MaxIncCyc": 3, "Umask": "bxxxxx1xx", }, "M3UPI.TxC_AD_FLQ_INSERTS.VN0_SNP": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "AD Flow Q Inserts", "EvSel": 45, "ExtSel": "", "MaxIncCyc": 3, "Umask": "bxxxxxx1x", }, "M3UPI.TxC_AD_FLQ_INSERTS.VN0_REQ": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "AD Flow Q Inserts", "EvSel": 45, "ExtSel": "", "MaxIncCyc": 3, "Umask": "bxxxxxxx1", }, "M3UPI.TxC_AD_FLQ_INSERTS.VN1_RSP": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "AD Flow Q Inserts", "EvSel": 45, "ExtSel": "", "MaxIncCyc": 3, "Umask": "bx1xxxxxx", }, "M3UPI.TxC_AD_FLQ_OCCUPANCY": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": 0, "Desc": "AD Flow Q Occupancy", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 8, }, "M3UPI.TxC_AD_FLQ_OCCUPANCY.VN1_SNP": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": 0, "Desc": "AD Flow Q Occupancy", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 8, "Umask": "bxx1xxxxx", }, "M3UPI.TxC_AD_FLQ_OCCUPANCY.VN0_WB": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": 0, "Desc": "AD Flow Q Occupancy", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 8, "Umask": "bxxxx1xxx", }, "M3UPI.TxC_AD_FLQ_OCCUPANCY.VN1_REQ": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": 0, "Desc": "AD Flow Q Occupancy", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 8, "Umask": "bxxx1xxxx", }, "M3UPI.TxC_AD_FLQ_OCCUPANCY.VN0_RSP": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": 0, "Desc": "AD Flow Q Occupancy", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 8, "Umask": "bxxxxx1xx", }, "M3UPI.TxC_AD_FLQ_OCCUPANCY.VN0_SNP": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": 0, "Desc": "AD Flow Q Occupancy", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 8, "Umask": "bxxxxxx1x", }, "M3UPI.TxC_AD_FLQ_OCCUPANCY.VN0_REQ": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": 0, "Desc": "AD Flow Q Occupancy", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 8, "Umask": "bxxxxxxx1", }, "M3UPI.TxC_AD_FLQ_OCCUPANCY.VN1_RSP": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": 0, "Desc": "AD Flow Q Occupancy", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 8, "Umask": "bx1xxxxxx", }, "M3UPI.TxC_AK_FLQ_INSERTS": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Desc": "AK Flow Q Inserts", "EvSel": 47, "ExtSel": "", "MaxIncCyc": 2, }, "M3UPI.TxC_AK_FLQ_OCCUPANCY": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": 0, "Desc": "AK Flow Q Occupancy", "EvSel": 30, "ExtSel": "", "MaxIncCyc": 8, }, "M3UPI.TxC_BL_ARB_FAIL": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "BL arb but no win; arb request asserted but not won", "Desc": "Failed ARB for BL", "EvSel": 53, "ExtSel": "", "Notes": "subevents added to 5b?", }, "M3UPI.TxC_BL_ARB_FAIL.VN0_NCB": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "BL arb but no win; arb request asserted but not won", "Desc": "Failed ARB for BL", "EvSel": 53, "ExtSel": "", "Notes": "subevents added to 5b?", "Umask": "bxxxxx1xx", }, "M3UPI.TxC_BL_ARB_FAIL.VN1_NCS": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "BL arb but no win; arb request asserted but not won", "Desc": "Failed ARB for BL", "EvSel": 53, "ExtSel": "", "Notes": "subevents added to 5b?", "Umask": "b1xxxxxxx", }, "M3UPI.TxC_BL_ARB_FAIL.VN0_WB": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "BL arb but no win; arb request asserted but not won", "Desc": "Failed ARB for BL", "EvSel": 53, "ExtSel": "", "Notes": "subevents added to 5b?", "Umask": "bxxxxxx1x", }, "M3UPI.TxC_BL_ARB_FAIL.VN0_RSP": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "BL arb but no win; arb request asserted but not won", "Desc": "Failed ARB for BL", "EvSel": 53, "ExtSel": "", "Notes": "subevents added to 5b?", "Umask": "bxxxxxxx1", }, "M3UPI.TxC_BL_ARB_FAIL.VN0_NCS": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "BL arb but no win; arb request asserted but not won", "Desc": "Failed ARB for BL", "EvSel": 53, "ExtSel": "", "Notes": "subevents added to 5b?", "Umask": "bxxxx1xxx", }, "M3UPI.TxC_BL_ARB_FAIL.VN1_NCB": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "BL arb but no win; arb request asserted but not won", "Desc": "Failed ARB for BL", "EvSel": 53, "ExtSel": "", "Notes": "subevents added to 5b?", "Umask": "bx1xxxxxx", }, "M3UPI.TxC_BL_ARB_FAIL.VN1_WB": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "BL arb but no win; arb request asserted but not won", "Desc": "Failed ARB for BL", "EvSel": 53, "ExtSel": "", "Notes": "subevents added to 5b?", "Umask": "bxx1xxxxx", }, "M3UPI.TxC_BL_ARB_FAIL.VN1_RSP": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "BL arb but no win; arb request asserted but not won", "Desc": "Failed ARB for BL", "EvSel": 53, "ExtSel": "", "Notes": "subevents added to 5b?", "Umask": "bxxx1xxxx", }, "M3UPI.TxC_BL_FLQ_CYCLES_NE": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Number of cycles the BL Egress queue is Not Empty", "Desc": "BL Flow Q Not Empty", "EvSel": 40, "ExtSel": "", "Notes": "Counts the number of cycles when the QPI FlowQ is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue occupancy. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", }, "M3UPI.TxC_BL_FLQ_CYCLES_NE.VN0_SNP": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Number of cycles the BL Egress queue is Not Empty", "Desc": "BL Flow Q Not Empty", "EvSel": 40, "ExtSel": "", "Notes": "Counts the number of cycles when the QPI FlowQ is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue occupancy. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Umask": "bxxxxxx1x", }, "M3UPI.TxC_BL_FLQ_CYCLES_NE.VN1_WB": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Number of cycles the BL Egress queue is Not Empty", "Desc": "BL Flow Q Not Empty", "EvSel": 40, "ExtSel": "", "Notes": "Counts the number of cycles when the QPI FlowQ is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue occupancy. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Umask": "b1xxxxxxx", }, "M3UPI.TxC_BL_FLQ_CYCLES_NE.VN0_REQ": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Number of cycles the BL Egress queue is Not Empty", "Desc": "BL Flow Q Not Empty", "EvSel": 40, "ExtSel": "", "Notes": "Counts the number of cycles when the QPI FlowQ is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue occupancy. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Umask": "bxxxxxxx1", }, "M3UPI.TxC_BL_FLQ_CYCLES_NE.VN1_RSP": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Number of cycles the BL Egress queue is Not Empty", "Desc": "BL Flow Q Not Empty", "EvSel": 40, "ExtSel": "", "Notes": "Counts the number of cycles when the QPI FlowQ is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue occupancy. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Umask": "bx1xxxxxx", }, "M3UPI.TxC_BL_FLQ_CYCLES_NE.VN1_SNP": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Number of cycles the BL Egress queue is Not Empty", "Desc": "BL Flow Q Not Empty", "EvSel": 40, "ExtSel": "", "Notes": "Counts the number of cycles when the QPI FlowQ is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue occupancy. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Umask": "bxx1xxxxx", }, "M3UPI.TxC_BL_FLQ_CYCLES_NE.VN0_WB": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Number of cycles the BL Egress queue is Not Empty", "Desc": "BL Flow Q Not Empty", "EvSel": 40, "ExtSel": "", "Notes": "Counts the number of cycles when the QPI FlowQ is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue occupancy. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Umask": "bxxxx1xxx", }, "M3UPI.TxC_BL_FLQ_CYCLES_NE.VN1_REQ": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Number of cycles the BL Egress queue is Not Empty", "Desc": "BL Flow Q Not Empty", "EvSel": 40, "ExtSel": "", "Notes": "Counts the number of cycles when the QPI FlowQ is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue occupancy. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Umask": "bxxx1xxxx", }, "M3UPI.TxC_BL_FLQ_CYCLES_NE.VN0_RSP": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Number of cycles the BL Egress queue is Not Empty", "Desc": "BL Flow Q Not Empty", "EvSel": 40, "ExtSel": "", "Notes": "Counts the number of cycles when the QPI FlowQ is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue occupancy. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Umask": "bxxxxx1xx", }, "M3UPI.TxC_BL_FLQ_INSERTS": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "BL Flow Q Inserts", "EvSel": 46, "ExtSel": "", }, "M3UPI.TxC_BL_FLQ_INSERTS.VN0_RSP": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "BL Flow Q Inserts", "EvSel": 46, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.TxC_BL_FLQ_INSERTS.VN0_WB": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "BL Flow Q Inserts", "EvSel": 46, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.TxC_BL_FLQ_INSERTS.VN1_NCS": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "BL Flow Q Inserts", "EvSel": 46, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.TxC_BL_FLQ_INSERTS.VN0_NCB": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "BL Flow Q Inserts", "EvSel": 46, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.TxC_BL_FLQ_INSERTS.VN1_RSP": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "BL Flow Q Inserts", "EvSel": 46, "ExtSel": "", "Umask": "b1xxxxxxx", }, "M3UPI.TxC_BL_FLQ_INSERTS.VN1_WB": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "BL Flow Q Inserts", "EvSel": 46, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.TxC_BL_FLQ_INSERTS.VN1_NCB": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "BL Flow Q Inserts", "EvSel": 46, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.TxC_BL_FLQ_INSERTS.VN0_NCS": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "BL Flow Q Inserts", "EvSel": 46, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.TxC_BL_FLQ_OCCUPANCY": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": 0, "Desc": "BL Flow Q Occupancy", "EvSel": 29, "ExtSel": "", "MaxIncCyc": 8, }, "M3UPI.TxC_BL_FLQ_OCCUPANCY.VN1_RSP": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": 0, "Desc": "BL Flow Q Occupancy", "EvSel": 29, "ExtSel": "", "MaxIncCyc": 8, "Umask": "bxxx1xxxx", }, "M3UPI.TxC_BL_FLQ_OCCUPANCY.VN0_NCS": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": 0, "Desc": "BL Flow Q Occupancy", "EvSel": 29, "ExtSel": "", "MaxIncCyc": 8, "Umask": "bxxxx1xxx", }, "M3UPI.TxC_BL_FLQ_OCCUPANCY.VN1_NCB": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": 0, "Desc": "BL Flow Q Occupancy", "EvSel": 29, "ExtSel": "", "MaxIncCyc": 8, "Umask": "bx1xxxxxx", }, "M3UPI.TxC_BL_FLQ_OCCUPANCY.VN1_WB": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": 0, "Desc": "BL Flow Q Occupancy", "EvSel": 29, "ExtSel": "", "MaxIncCyc": 8, "Umask": "bxx1xxxxx", }, "M3UPI.TxC_BL_FLQ_OCCUPANCY.VN0_RSP": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": 0, "Desc": "BL Flow Q Occupancy", "EvSel": 29, "ExtSel": "", "MaxIncCyc": 8, "Umask": "bxxxxxxx1", }, "M3UPI.TxC_BL_FLQ_OCCUPANCY.VN1_NCS": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": 0, "Desc": "BL Flow Q Occupancy", "EvSel": 29, "ExtSel": "", "MaxIncCyc": 8, "Umask": "b1xxxxxxx", }, "M3UPI.TxC_BL_FLQ_OCCUPANCY.VN0_NCB": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": 0, "Desc": "BL Flow Q Occupancy", "EvSel": 29, "ExtSel": "", "MaxIncCyc": 8, "Umask": "bxxxxx1xx", }, "M3UPI.TxC_BL_FLQ_OCCUPANCY.VN0_WB": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": 0, "Desc": "BL Flow Q Occupancy", "EvSel": 29, "ExtSel": "", "MaxIncCyc": 8, "Umask": "bxxxxxx1x", }, "M3UPI.TxC_BL_WB_FLQ_OCCUPANCY": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": 0, "Desc": "BL Flow Q Occupancy", "EvSel": 31, "ExtSel": "", "MaxIncCyc": 8, }, "M3UPI.TxC_BL_WB_FLQ_OCCUPANCY.VN1_THROUGH": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": 0, "Desc": "BL Flow Q Occupancy", "EvSel": 31, "ExtSel": "", "MaxIncCyc": 8, "Umask": "b00100000", }, "M3UPI.TxC_BL_WB_FLQ_OCCUPANCY.VN0_THROUGH": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": 0, "Desc": "BL Flow Q Occupancy", "EvSel": 31, "ExtSel": "", "MaxIncCyc": 8, "Umask": "b00000010", }, "M3UPI.TxC_BL_WB_FLQ_OCCUPANCY.VN0_LOCAL": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": 0, "Desc": "BL Flow Q Occupancy", "EvSel": 31, "ExtSel": "", "MaxIncCyc": 8, "Umask": "b00000001", }, "M3UPI.TxC_BL_WB_FLQ_OCCUPANCY.VN1_LOCAL": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": 0, "Desc": "BL Flow Q Occupancy", "EvSel": 31, "ExtSel": "", "MaxIncCyc": 8, "Umask": "b00010000", }, "M3UPI.TxC_BL_WB_FLQ_OCCUPANCY.VN0_WRPULL": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": 0, "Desc": "BL Flow Q Occupancy", "EvSel": 31, "ExtSel": "", "MaxIncCyc": 8, "Umask": "b00000100", }, "M3UPI.TxC_BL_WB_FLQ_OCCUPANCY.VN1_WRPULL": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": 0, "Desc": "BL Flow Q Occupancy", "EvSel": 31, "ExtSel": "", "MaxIncCyc": 8, "Umask": "b01000000", }, "M3UPI.TxR_HORZ_ADS_USED": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal ADS Used", "EvSel": 166, "ExtSel": "", }, "M3UPI.TxR_HORZ_ADS_USED.BL_ALL": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal ADS Used", "EvSel": 166, "ExtSel": "", "Umask": "b01000100", }, "M3UPI.TxR_HORZ_ADS_USED.AD_UNCRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal ADS Used", "EvSel": 166, "ExtSel": "", "Umask": "b00000001", }, "M3UPI.TxR_HORZ_ADS_USED.AD_CRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal ADS Used", "EvSel": 166, "ExtSel": "", "Umask": "b00010000", }, "M3UPI.TxR_HORZ_ADS_USED.BL_UNCRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal ADS Used", "EvSel": 166, "ExtSel": "", "Umask": "b00000100", }, "M3UPI.TxR_HORZ_ADS_USED.BL_CRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal ADS Used", "EvSel": 166, "ExtSel": "", "Umask": "b01000000", }, "M3UPI.TxR_HORZ_ADS_USED.AD_ALL": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal ADS Used", "EvSel": 166, "ExtSel": "", "Umask": "b00010001", }, "M3UPI.TxR_HORZ_BYPASS": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 167, "ExtSel": "", }, "M3UPI.TxR_HORZ_BYPASS.AD_CRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 167, "ExtSel": "", "Umask": "b00010000", }, "M3UPI.TxR_HORZ_BYPASS.BL_ALL": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 167, "ExtSel": "", "Umask": "b01000100", }, "M3UPI.TxR_HORZ_BYPASS.AK": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 167, "ExtSel": "", "Umask": "b00000010", }, "M3UPI.TxR_HORZ_BYPASS.AD_ALL": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 167, "ExtSel": "", "Umask": "b00010001", }, "M3UPI.TxR_HORZ_BYPASS.BL_CRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 167, "ExtSel": "", "Umask": "b01000000", }, "M3UPI.TxR_HORZ_BYPASS.AD_UNCRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 167, "ExtSel": "", "Umask": "b00000001", }, "M3UPI.TxR_HORZ_BYPASS.AKC_UNCRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 167, "ExtSel": "", "Umask": "b10000000", }, "M3UPI.TxR_HORZ_BYPASS.BL_UNCRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 167, "ExtSel": "", "Umask": "b00000100", }, "M3UPI.TxR_HORZ_BYPASS.IV": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 167, "ExtSel": "", "Umask": "b00001000", }, "M3UPI.TxR_HORZ_CYCLES_FULL": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 162, "ExtSel": "", }, "M3UPI.TxR_HORZ_CYCLES_FULL.BL_ALL": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 162, "ExtSel": "", "Umask": "b01000100", }, "M3UPI.TxR_HORZ_CYCLES_FULL.AD_CRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 162, "ExtSel": "", "Umask": "b00010000", }, "M3UPI.TxR_HORZ_CYCLES_FULL.BL_CRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 162, "ExtSel": "", "Umask": "b01000000", }, "M3UPI.TxR_HORZ_CYCLES_FULL.AD_ALL": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 162, "ExtSel": "", "Umask": "b00010001", }, "M3UPI.TxR_HORZ_CYCLES_FULL.AK": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 162, "ExtSel": "", "Umask": "b00000010", }, "M3UPI.TxR_HORZ_CYCLES_FULL.AD_UNCRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 162, "ExtSel": "", "Umask": "b00000001", }, "M3UPI.TxR_HORZ_CYCLES_FULL.BL_UNCRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 162, "ExtSel": "", "Umask": "b00000100", }, "M3UPI.TxR_HORZ_CYCLES_FULL.AKC_UNCRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 162, "ExtSel": "", "Umask": "b10000000", }, "M3UPI.TxR_HORZ_CYCLES_FULL.IV": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 162, "ExtSel": "", "Umask": "b00001000", }, "M3UPI.TxR_HORZ_CYCLES_NE": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 163, "ExtSel": "", }, "M3UPI.TxR_HORZ_CYCLES_NE.AK": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 163, "ExtSel": "", "Umask": "b00000010", }, "M3UPI.TxR_HORZ_CYCLES_NE.AD_ALL": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 163, "ExtSel": "", "Umask": "b00010001", }, "M3UPI.TxR_HORZ_CYCLES_NE.BL_CRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 163, "ExtSel": "", "Umask": "b01000000", }, "M3UPI.TxR_HORZ_CYCLES_NE.AD_CRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 163, "ExtSel": "", "Umask": "b00010000", }, "M3UPI.TxR_HORZ_CYCLES_NE.BL_ALL": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 163, "ExtSel": "", "Umask": "b01000100", }, "M3UPI.TxR_HORZ_CYCLES_NE.IV": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 163, "ExtSel": "", "Umask": "b00001000", }, "M3UPI.TxR_HORZ_CYCLES_NE.AKC_UNCRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 163, "ExtSel": "", "Umask": "b10000000", }, "M3UPI.TxR_HORZ_CYCLES_NE.BL_UNCRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 163, "ExtSel": "", "Umask": "b00000100", }, "M3UPI.TxR_HORZ_CYCLES_NE.AD_UNCRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 163, "ExtSel": "", "Umask": "b00000001", }, "M3UPI.TxR_HORZ_INSERTS": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 161, "ExtSel": "", }, "M3UPI.TxR_HORZ_INSERTS.BL_ALL": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 161, "ExtSel": "", "Umask": "b01000100", }, "M3UPI.TxR_HORZ_INSERTS.AD_CRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 161, "ExtSel": "", "Umask": "b00010000", }, "M3UPI.TxR_HORZ_INSERTS.BL_CRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 161, "ExtSel": "", "Umask": "b01000000", }, "M3UPI.TxR_HORZ_INSERTS.AK": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 161, "ExtSel": "", "Umask": "b00000010", }, "M3UPI.TxR_HORZ_INSERTS.AD_ALL": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 161, "ExtSel": "", "Umask": "b00010001", }, "M3UPI.TxR_HORZ_INSERTS.AD_UNCRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 161, "ExtSel": "", "Umask": "b00000001", }, "M3UPI.TxR_HORZ_INSERTS.AKC_UNCRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 161, "ExtSel": "", "Umask": "b10000000", }, "M3UPI.TxR_HORZ_INSERTS.IV": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 161, "ExtSel": "", "Umask": "b00001000", }, "M3UPI.TxR_HORZ_INSERTS.BL_UNCRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 161, "ExtSel": "", "Umask": "b00000100", }, "M3UPI.TxR_HORZ_NACK": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 164, "ExtSel": "", }, "M3UPI.TxR_HORZ_NACK.AD_UNCRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 164, "ExtSel": "", "Umask": "b00000001", }, "M3UPI.TxR_HORZ_NACK.BL_UNCRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 164, "ExtSel": "", "Umask": "b00000100", }, "M3UPI.TxR_HORZ_NACK.AKC_UNCRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 164, "ExtSel": "", "Umask": "b10000000", }, "M3UPI.TxR_HORZ_NACK.IV": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 164, "ExtSel": "", "Umask": "b00001000", }, "M3UPI.TxR_HORZ_NACK.BL_ALL": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 164, "ExtSel": "", "Umask": "b01000100", }, "M3UPI.TxR_HORZ_NACK.AD_CRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 164, "ExtSel": "", "Umask": "b00010000", }, "M3UPI.TxR_HORZ_NACK.BL_CRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 164, "ExtSel": "", "Umask": "b01000000", }, "M3UPI.TxR_HORZ_NACK.AK": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 164, "ExtSel": "", "Umask": "b00000010", }, "M3UPI.TxR_HORZ_NACK.AD_ALL": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 164, "ExtSel": "", "Umask": "b00010001", }, "M3UPI.TxR_HORZ_OCCUPANCY": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 160, "ExtSel": "", }, "M3UPI.TxR_HORZ_OCCUPANCY.BL_ALL": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 160, "ExtSel": "", "Umask": "b01000100", }, "M3UPI.TxR_HORZ_OCCUPANCY.AD_CRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 160, "ExtSel": "", "Umask": "b00010000", }, "M3UPI.TxR_HORZ_OCCUPANCY.BL_CRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 160, "ExtSel": "", "Umask": "b01000000", }, "M3UPI.TxR_HORZ_OCCUPANCY.AD_ALL": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 160, "ExtSel": "", "Umask": "b00010001", }, "M3UPI.TxR_HORZ_OCCUPANCY.AK": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 160, "ExtSel": "", "Umask": "b00000010", }, "M3UPI.TxR_HORZ_OCCUPANCY.AD_UNCRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 160, "ExtSel": "", "Umask": "b00000001", }, "M3UPI.TxR_HORZ_OCCUPANCY.IV": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 160, "ExtSel": "", "Umask": "b00001000", }, "M3UPI.TxR_HORZ_OCCUPANCY.AKC_UNCRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 160, "ExtSel": "", "Umask": "b10000000", }, "M3UPI.TxR_HORZ_OCCUPANCY.BL_UNCRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 160, "ExtSel": "", "Umask": "b00000100", }, "M3UPI.TxR_HORZ_STARVED": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.", "Desc": "CMS Horizontal Egress Injection Starvation", "EvSel": 165, "ExtSel": "", }, "M3UPI.TxR_HORZ_STARVED.IV": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.", "Desc": "CMS Horizontal Egress Injection Starvation", "EvSel": 165, "ExtSel": "", "Umask": "b00001000", }, "M3UPI.TxR_HORZ_STARVED.AKC_UNCRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.", "Desc": "CMS Horizontal Egress Injection Starvation", "EvSel": 165, "ExtSel": "", "Umask": "b10000000", }, "M3UPI.TxR_HORZ_STARVED.BL_UNCRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.", "Desc": "CMS Horizontal Egress Injection Starvation", "EvSel": 165, "ExtSel": "", "Umask": "b00000100", }, "M3UPI.TxR_HORZ_STARVED.AD_UNCRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.", "Desc": "CMS Horizontal Egress Injection Starvation", "EvSel": 165, "ExtSel": "", "Umask": "b00000001", }, "M3UPI.TxR_HORZ_STARVED.AD_ALL": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.", "Desc": "CMS Horizontal Egress Injection Starvation", "EvSel": 165, "ExtSel": "", "Umask": "b00000001", }, "M3UPI.TxR_HORZ_STARVED.AK": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.", "Desc": "CMS Horizontal Egress Injection Starvation", "EvSel": 165, "ExtSel": "", "Umask": "b00000010", }, "M3UPI.TxR_HORZ_STARVED.BL_ALL": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.", "Desc": "CMS Horizontal Egress Injection Starvation", "EvSel": 165, "ExtSel": "", "Umask": "b00000100", }, "M3UPI.TxR_VERT_ADS_USED": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 156, "ExtSel": "", }, "M3UPI.TxR_VERT_ADS_USED.AD_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 156, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.TxR_VERT_ADS_USED.BL_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 156, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.TxR_VERT_ADS_USED.AD_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 156, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.TxR_VERT_ADS_USED.BL_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 156, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.TxR_VERT_BYPASS": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 157, "ExtSel": "", }, "M3UPI.TxR_VERT_BYPASS.IV_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 157, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.TxR_VERT_BYPASS.BL_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 157, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.TxR_VERT_BYPASS.AK_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 157, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.TxR_VERT_BYPASS.AD_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 157, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.TxR_VERT_BYPASS.AK_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 157, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.TxR_VERT_BYPASS.AD_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 157, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.TxR_VERT_BYPASS.BL_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 157, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.TxR_VERT_BYPASS_1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 158, "ExtSel": "", }, "M3UPI.TxR_VERT_BYPASS_1.AKC_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 158, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.TxR_VERT_BYPASS_1.AKC_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 158, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.TxR_VERT_CYCLES_FULL0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 148, "ExtSel": "", }, "M3UPI.TxR_VERT_CYCLES_FULL0.BL_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 148, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.TxR_VERT_CYCLES_FULL0.AD_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 148, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.TxR_VERT_CYCLES_FULL0.AK_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 148, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.TxR_VERT_CYCLES_FULL0.AD_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 148, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.TxR_VERT_CYCLES_FULL0.AK_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 148, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.TxR_VERT_CYCLES_FULL0.BL_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 148, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.TxR_VERT_CYCLES_FULL0.IV_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 148, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.TxR_VERT_CYCLES_FULL1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 149, "ExtSel": "", }, "M3UPI.TxR_VERT_CYCLES_FULL1.AKC_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 149, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.TxR_VERT_CYCLES_FULL1.AKC_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 149, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.TxR_VERT_CYCLES_NE0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 150, "ExtSel": "", }, "M3UPI.TxR_VERT_CYCLES_NE0.IV_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 150, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.TxR_VERT_CYCLES_NE0.BL_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 150, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.TxR_VERT_CYCLES_NE0.AK_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 150, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.TxR_VERT_CYCLES_NE0.AD_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 150, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.TxR_VERT_CYCLES_NE0.AD_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 150, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.TxR_VERT_CYCLES_NE0.AK_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 150, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.TxR_VERT_CYCLES_NE0.BL_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 150, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.TxR_VERT_CYCLES_NE1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 151, "ExtSel": "", }, "M3UPI.TxR_VERT_CYCLES_NE1.AKC_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 151, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.TxR_VERT_CYCLES_NE1.AKC_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 151, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.TxR_VERT_INSERTS0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 146, "ExtSel": "", }, "M3UPI.TxR_VERT_INSERTS0.AD_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 146, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.TxR_VERT_INSERTS0.AK_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 146, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.TxR_VERT_INSERTS0.IV_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 146, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.TxR_VERT_INSERTS0.BL_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 146, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.TxR_VERT_INSERTS0.BL_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 146, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.TxR_VERT_INSERTS0.AK_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 146, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.TxR_VERT_INSERTS0.AD_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 146, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.TxR_VERT_INSERTS1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 147, "ExtSel": "", }, "M3UPI.TxR_VERT_INSERTS1.AKC_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 147, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.TxR_VERT_INSERTS1.AKC_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 147, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.TxR_VERT_NACK0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", }, "M3UPI.TxR_VERT_NACK0.AD_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.TxR_VERT_NACK0.AK_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.TxR_VERT_NACK0.BL_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.TxR_VERT_NACK0.IV_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.TxR_VERT_NACK0.BL_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.TxR_VERT_NACK0.AK_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.TxR_VERT_NACK0.AD_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.TxR_VERT_NACK1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 153, "ExtSel": "", }, "M3UPI.TxR_VERT_NACK1.AKC_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 153, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.TxR_VERT_NACK1.AKC_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 153, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.TxR_VERT_OCCUPANCY0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", }, "M3UPI.TxR_VERT_OCCUPANCY0.AK_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.TxR_VERT_OCCUPANCY0.AD_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.TxR_VERT_OCCUPANCY0.BL_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.TxR_VERT_OCCUPANCY0.IV_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.TxR_VERT_OCCUPANCY0.BL_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.TxR_VERT_OCCUPANCY0.AK_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.TxR_VERT_OCCUPANCY0.AD_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.TxR_VERT_OCCUPANCY1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 145, "ExtSel": "", }, "M3UPI.TxR_VERT_OCCUPANCY1.AKC_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 145, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.TxR_VERT_OCCUPANCY1.AKC_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 145, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.TxR_VERT_STARVED0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", }, "M3UPI.TxR_VERT_STARVED0.BL_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.TxR_VERT_STARVED0.AD_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.TxR_VERT_STARVED0.AK_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.TxR_VERT_STARVED0.AK_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.TxR_VERT_STARVED0.AD_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.TxR_VERT_STARVED0.BL_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.TxR_VERT_STARVED0.IV_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.TxR_VERT_STARVED1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 155, "ExtSel": "", }, "M3UPI.TxR_VERT_STARVED1.TGC": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 155, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.TxR_VERT_STARVED1.AKC_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 155, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.TxR_VERT_STARVED1.AKC_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 155, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.UPI_PEER_AD_CREDITS_EMPTY": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No credits available to send to UPIs on the AD Ring", "Desc": "UPI0 AD Credits Empty", "EvSel": 32, "ExtSel": "", "Notes": "2 cases for non-smi3 mode and 3 cases for smi3 mode", }, "M3UPI.UPI_PEER_AD_CREDITS_EMPTY.VNA": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No credits available to send to UPIs on the AD Ring", "Desc": "UPI0 AD Credits Empty", "EvSel": 32, "ExtSel": "", "Notes": "2 cases for non-smi3 mode and 3 cases for smi3 mode", "Umask": "bxxxxxxx1", }, "M3UPI.UPI_PEER_AD_CREDITS_EMPTY.VN1_RSP": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No credits available to send to UPIs on the AD Ring", "Desc": "UPI0 AD Credits Empty", "EvSel": 32, "ExtSel": "", "Notes": "2 cases for non-smi3 mode and 3 cases for smi3 mode", "Umask": "bx1xxxxxx", }, "M3UPI.UPI_PEER_AD_CREDITS_EMPTY.VN0_RSP": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No credits available to send to UPIs on the AD Ring", "Desc": "UPI0 AD Credits Empty", "EvSel": 32, "ExtSel": "", "Notes": "2 cases for non-smi3 mode and 3 cases for smi3 mode", "Umask": "bxxxx1xxx", }, "M3UPI.UPI_PEER_AD_CREDITS_EMPTY.VN1_SNP": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No credits available to send to UPIs on the AD Ring", "Desc": "UPI0 AD Credits Empty", "EvSel": 32, "ExtSel": "", "Notes": "2 cases for non-smi3 mode and 3 cases for smi3 mode", "Umask": "bxx1xxxxx", }, "M3UPI.UPI_PEER_AD_CREDITS_EMPTY.VN0_SNP": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No credits available to send to UPIs on the AD Ring", "Desc": "UPI0 AD Credits Empty", "EvSel": 32, "ExtSel": "", "Notes": "2 cases for non-smi3 mode and 3 cases for smi3 mode", "Umask": "bxxxxx1xx", }, "M3UPI.UPI_PEER_AD_CREDITS_EMPTY.VN0_REQ": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No credits available to send to UPIs on the AD Ring", "Desc": "UPI0 AD Credits Empty", "EvSel": 32, "ExtSel": "", "Notes": "2 cases for non-smi3 mode and 3 cases for smi3 mode", "Umask": "bxxxxxx1x", }, "M3UPI.UPI_PEER_AD_CREDITS_EMPTY.VN1_REQ": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No credits available to send to UPIs on the AD Ring", "Desc": "UPI0 AD Credits Empty", "EvSel": 32, "ExtSel": "", "Notes": "2 cases for non-smi3 mode and 3 cases for smi3 mode", "Umask": "bxxx1xxxx", }, "M3UPI.UPI_PEER_BL_CREDITS_EMPTY": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)", "Desc": "UPI0 BL Credits Empty", "EvSel": 33, "ExtSel": "", "Notes": "smi and non-smi modes", }, "M3UPI.UPI_PEER_BL_CREDITS_EMPTY.VN0_RSP": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)", "Desc": "UPI0 BL Credits Empty", "EvSel": 33, "ExtSel": "", "Notes": "smi and non-smi modes", "Umask": "bxxxxxx1x", }, "M3UPI.UPI_PEER_BL_CREDITS_EMPTY.VN1_RSP": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)", "Desc": "UPI0 BL Credits Empty", "EvSel": 33, "ExtSel": "", "Notes": "smi and non-smi modes", "Umask": "bxxx1xxxx", }, "M3UPI.UPI_PEER_BL_CREDITS_EMPTY.VN0_NCS_NCB": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)", "Desc": "UPI0 BL Credits Empty", "EvSel": 33, "ExtSel": "", "Notes": "smi and non-smi modes", "Umask": "bxxxxx1xx", }, "M3UPI.UPI_PEER_BL_CREDITS_EMPTY.VNA": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)", "Desc": "UPI0 BL Credits Empty", "EvSel": 33, "ExtSel": "", "Notes": "smi and non-smi modes", "Umask": "bxxxxxxx1", }, "M3UPI.UPI_PEER_BL_CREDITS_EMPTY.VN1_NCS_NCB": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)", "Desc": "UPI0 BL Credits Empty", "EvSel": 33, "ExtSel": "", "Notes": "smi and non-smi modes", "Umask": "bxx1xxxxx", }, "M3UPI.UPI_PEER_BL_CREDITS_EMPTY.VN1_WB": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)", "Desc": "UPI0 BL Credits Empty", "EvSel": 33, "ExtSel": "", "Notes": "smi and non-smi modes", "Umask": "bx1xxxxxx", }, "M3UPI.UPI_PEER_BL_CREDITS_EMPTY.VN0_WB": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)", "Desc": "UPI0 BL Credits Empty", "EvSel": 33, "ExtSel": "", "Notes": "smi and non-smi modes", "Umask": "bxxxx1xxx", }, "M3UPI.UPI_PREFETCH_SPAWN": { "Box": "M3UPI", "Category": "M3UPI Special Egress Events", "Counters": "0-3", "Defn": "Count cases where FlowQ causes spawn of Prefetch to iMC/SMI3 target", "Desc": "FlowQ Generated Prefetch", "EvSel": 41, "ExtSel": "", }, "M3UPI.VERT_RING_AD_IN_USE": { "Box": "M3UPI", "Category": "M3UPI Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AD Ring In Use", "EvSel": 176, "ExtSel": "", }, "M3UPI.VERT_RING_AD_IN_USE.DN_EVEN": { "Box": "M3UPI", "Category": "M3UPI Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AD Ring In Use", "EvSel": 176, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.VERT_RING_AD_IN_USE.UP_ODD": { "Box": "M3UPI", "Category": "M3UPI Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AD Ring In Use", "EvSel": 176, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.VERT_RING_AD_IN_USE.UP_EVEN": { "Box": "M3UPI", "Category": "M3UPI Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AD Ring In Use", "EvSel": 176, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.VERT_RING_AD_IN_USE.DN_ODD": { "Box": "M3UPI", "Category": "M3UPI Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AD Ring In Use", "EvSel": 176, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.VERT_RING_AKC_IN_USE": { "Box": "M3UPI", "Category": "M3UPI Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AKC Ring In Use", "EvSel": 180, "ExtSel": "", }, "M3UPI.VERT_RING_AKC_IN_USE.DN_ODD": { "Box": "M3UPI", "Category": "M3UPI Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AKC Ring In Use", "EvSel": 180, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.VERT_RING_AKC_IN_USE.UP_EVEN": { "Box": "M3UPI", "Category": "M3UPI Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AKC Ring In Use", "EvSel": 180, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.VERT_RING_AKC_IN_USE.UP_ODD": { "Box": "M3UPI", "Category": "M3UPI Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AKC Ring In Use", "EvSel": 180, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.VERT_RING_AKC_IN_USE.DN_EVEN": { "Box": "M3UPI", "Category": "M3UPI Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AKC Ring In Use", "EvSel": 180, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.VERT_RING_AK_IN_USE": { "Box": "M3UPI", "Category": "M3UPI Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AK Ring In Use", "EvSel": 177, "ExtSel": "", }, "M3UPI.VERT_RING_AK_IN_USE.DN_EVEN": { "Box": "M3UPI", "Category": "M3UPI Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AK Ring In Use", "EvSel": 177, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.VERT_RING_AK_IN_USE.UP_ODD": { "Box": "M3UPI", "Category": "M3UPI Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AK Ring In Use", "EvSel": 177, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.VERT_RING_AK_IN_USE.UP_EVEN": { "Box": "M3UPI", "Category": "M3UPI Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AK Ring In Use", "EvSel": 177, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.VERT_RING_AK_IN_USE.DN_ODD": { "Box": "M3UPI", "Category": "M3UPI Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AK Ring In Use", "EvSel": 177, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.VERT_RING_BL_IN_USE": { "Box": "M3UPI", "Category": "M3UPI Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical BL Ring in Use", "EvSel": 178, "ExtSel": "", }, "M3UPI.VERT_RING_BL_IN_USE.UP_EVEN": { "Box": "M3UPI", "Category": "M3UPI Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical BL Ring in Use", "EvSel": 178, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.VERT_RING_BL_IN_USE.DN_ODD": { "Box": "M3UPI", "Category": "M3UPI Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical BL Ring in Use", "EvSel": 178, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.VERT_RING_BL_IN_USE.DN_EVEN": { "Box": "M3UPI", "Category": "M3UPI Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical BL Ring in Use", "EvSel": 178, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.VERT_RING_BL_IN_USE.UP_ODD": { "Box": "M3UPI", "Category": "M3UPI Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical BL Ring in Use", "EvSel": 178, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.VERT_RING_IV_IN_USE": { "Box": "M3UPI", "Category": "M3UPI Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "Vertical IV Ring in Use", "EvSel": 179, "ExtSel": "", }, "M3UPI.VERT_RING_IV_IN_USE.UP": { "Box": "M3UPI", "Category": "M3UPI Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "Vertical IV Ring in Use", "EvSel": 179, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.VERT_RING_IV_IN_USE.DN": { "Box": "M3UPI", "Category": "M3UPI Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "Vertical IV Ring in Use", "EvSel": 179, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.VERT_RING_TGC_IN_USE": { "Box": "M3UPI", "Category": "M3UPI Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical TGC Ring In Use", "EvSel": 181, "ExtSel": "", }, "M3UPI.VERT_RING_TGC_IN_USE.UP_ODD": { "Box": "M3UPI", "Category": "M3UPI Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical TGC Ring In Use", "EvSel": 181, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.VERT_RING_TGC_IN_USE.DN_EVEN": { "Box": "M3UPI", "Category": "M3UPI Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical TGC Ring In Use", "EvSel": 181, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.VERT_RING_TGC_IN_USE.DN_ODD": { "Box": "M3UPI", "Category": "M3UPI Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical TGC Ring In Use", "EvSel": 181, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.VERT_RING_TGC_IN_USE.UP_EVEN": { "Box": "M3UPI", "Category": "M3UPI Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical TGC Ring In Use", "EvSel": 181, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.VN0_CREDITS_USED": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.", "Desc": "VN0 Credit Used", "EvSel": 91, "ExtSel": "", }, "M3UPI.VN0_CREDITS_USED.SNP": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.", "Desc": "VN0 Credit Used", "EvSel": 91, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.VN0_CREDITS_USED.WB": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.", "Desc": "VN0 Credit Used", "EvSel": 91, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.VN0_CREDITS_USED.NCB": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.", "Desc": "VN0 Credit Used", "EvSel": 91, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.VN0_CREDITS_USED.NCS": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.", "Desc": "VN0 Credit Used", "EvSel": 91, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.VN0_CREDITS_USED.REQ": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.", "Desc": "VN0 Credit Used", "EvSel": 91, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.VN0_CREDITS_USED.RSP": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.", "Desc": "VN0 Credit Used", "EvSel": 91, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.VN0_NO_CREDITS": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of Cycles there were no VN0 Credits", "Desc": "VN0 No Credits", "EvSel": 93, "ExtSel": "", }, "M3UPI.VN0_NO_CREDITS.SNP": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of Cycles there were no VN0 Credits", "Desc": "VN0 No Credits", "EvSel": 93, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.VN0_NO_CREDITS.WB": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of Cycles there were no VN0 Credits", "Desc": "VN0 No Credits", "EvSel": 93, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.VN0_NO_CREDITS.NCB": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of Cycles there were no VN0 Credits", "Desc": "VN0 No Credits", "EvSel": 93, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.VN0_NO_CREDITS.NCS": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of Cycles there were no VN0 Credits", "Desc": "VN0 No Credits", "EvSel": 93, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.VN0_NO_CREDITS.REQ": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of Cycles there were no VN0 Credits", "Desc": "VN0 No Credits", "EvSel": 93, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.VN0_NO_CREDITS.RSP": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of Cycles there were no VN0 Credits", "Desc": "VN0 No Credits", "EvSel": 93, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.VN1_CREDITS_USED": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.", "Desc": "VN1 Credit Used", "EvSel": 92, "ExtSel": "", }, "M3UPI.VN1_CREDITS_USED.SNP": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.", "Desc": "VN1 Credit Used", "EvSel": 92, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.VN1_CREDITS_USED.WB": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.", "Desc": "VN1 Credit Used", "EvSel": 92, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.VN1_CREDITS_USED.NCB": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.", "Desc": "VN1 Credit Used", "EvSel": 92, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.VN1_CREDITS_USED.NCS": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.", "Desc": "VN1 Credit Used", "EvSel": 92, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.VN1_CREDITS_USED.REQ": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.", "Desc": "VN1 Credit Used", "EvSel": 92, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.VN1_CREDITS_USED.RSP": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.", "Desc": "VN1 Credit Used", "EvSel": 92, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.VN1_NO_CREDITS": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of Cycles there were no VN1 Credits", "Desc": "VN1 No Credits", "EvSel": 94, "ExtSel": "", }, "M3UPI.VN1_NO_CREDITS.WB": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of Cycles there were no VN1 Credits", "Desc": "VN1 No Credits", "EvSel": 94, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.VN1_NO_CREDITS.SNP": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of Cycles there were no VN1 Credits", "Desc": "VN1 No Credits", "EvSel": 94, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.VN1_NO_CREDITS.NCB": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of Cycles there were no VN1 Credits", "Desc": "VN1 No Credits", "EvSel": 94, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.VN1_NO_CREDITS.REQ": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of Cycles there were no VN1 Credits", "Desc": "VN1 No Credits", "EvSel": 94, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.VN1_NO_CREDITS.NCS": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of Cycles there were no VN1 Credits", "Desc": "VN1 No Credits", "EvSel": 94, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.VN1_NO_CREDITS.RSP": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of Cycles there were no VN1 Credits", "Desc": "VN1 No Credits", "EvSel": 94, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.WB_OCC_COMPARE": { "Box": "M3UPI", "Category": "M3UPI Writeback Events", "EvSel": 126, "ExtSel": "", }, "M3UPI.WB_OCC_COMPARE.BOTHNONZERO_RT_GT_LOCALDEST_VN0": { "Box": "M3UPI", "Category": "M3UPI Writeback Events", "EvSel": 126, "ExtSel": "", "Umask": "b1xxxxxx1", }, "M3UPI.WB_OCC_COMPARE.BOTHNONZERO_RT_GT_LOCALDEST_VN1": { "Box": "M3UPI", "Category": "M3UPI Writeback Events", "EvSel": 126, "ExtSel": "", "Umask": "b1xx1xxxx", }, "M3UPI.WB_OCC_COMPARE.BOTHNONZERO_RT_LT_LOCALDEST_VN1": { "Box": "M3UPI", "Category": "M3UPI Writeback Events", "EvSel": 126, "ExtSel": "", "Umask": "b11xxxxxx", }, "M3UPI.WB_OCC_COMPARE.RT_LT_LOCALDEST_VN0": { "Box": "M3UPI", "Category": "M3UPI Writeback Events", "EvSel": 126, "ExtSel": "", "Umask": "b0xxxx1xx", }, "M3UPI.WB_OCC_COMPARE.RT_GT_LOCALDEST_VN0": { "Box": "M3UPI", "Category": "M3UPI Writeback Events", "EvSel": 126, "ExtSel": "", "Umask": "b0xxxxxx1", }, "M3UPI.WB_OCC_COMPARE.BOTHNONZERO_RT_EQ_LOCALDEST_VN1": { "Box": "M3UPI", "Category": "M3UPI Writeback Events", "EvSel": 126, "ExtSel": "", "Umask": "b1x1xxxxx", }, "M3UPI.WB_OCC_COMPARE.RT_LT_LOCALDEST_VN1": { "Box": "M3UPI", "Category": "M3UPI Writeback Events", "EvSel": 126, "ExtSel": "", "Umask": "b01xxxxxx", }, "M3UPI.WB_OCC_COMPARE.RT_GT_LOCALDEST_VN1": { "Box": "M3UPI", "Category": "M3UPI Writeback Events", "EvSel": 126, "ExtSel": "", "Umask": "b0xx1xxxx", }, "M3UPI.WB_OCC_COMPARE.BOTHNONZERO_RT_LT_LOCALDEST_VN0": { "Box": "M3UPI", "Category": "M3UPI Writeback Events", "EvSel": 126, "ExtSel": "", "Umask": "b1xxxx1xx", }, "M3UPI.WB_OCC_COMPARE.BOTHNONZERO_RT_EQ_LOCALDEST_VN0": { "Box": "M3UPI", "Category": "M3UPI Writeback Events", "EvSel": 126, "ExtSel": "", "Umask": "b1xxxxx1x", }, "M3UPI.WB_OCC_COMPARE.RT_EQ_LOCALDEST_VN0": { "Box": "M3UPI", "Category": "M3UPI Writeback Events", "EvSel": 126, "ExtSel": "", "Umask": "b0xxxxx1x", }, "M3UPI.WB_OCC_COMPARE.RT_EQ_LOCALDEST_VN1": { "Box": "M3UPI", "Category": "M3UPI Writeback Events", "EvSel": 126, "ExtSel": "", "Umask": "b0x1xxxxx", }, "M3UPI.WB_PENDING": { "Box": "M3UPI", "Category": "M3UPI Writeback Events", "EvSel": 125, "ExtSel": "", }, "M3UPI.WB_PENDING.LOCAL_AND_RT_VN1": { "Box": "M3UPI", "Category": "M3UPI Writeback Events", "EvSel": 125, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.WB_PENDING.WAITING4PULL_VN1": { "Box": "M3UPI", "Category": "M3UPI Writeback Events", "EvSel": 125, "ExtSel": "", "Umask": "b1xxxxxxx", }, "M3UPI.WB_PENDING.WAITING4PULL_VN0": { "Box": "M3UPI", "Category": "M3UPI Writeback Events", "EvSel": 125, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.WB_PENDING.LOCAL_AND_RT_VN0": { "Box": "M3UPI", "Category": "M3UPI Writeback Events", "EvSel": 125, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.WB_PENDING.LOCALDEST_VN0": { "Box": "M3UPI", "Category": "M3UPI Writeback Events", "EvSel": 125, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.WB_PENDING.ROUTETHRU_VN0": { "Box": "M3UPI", "Category": "M3UPI Writeback Events", "EvSel": 125, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.WB_PENDING.ROUTETHRU_VN1": { "Box": "M3UPI", "Category": "M3UPI Writeback Events", "EvSel": 125, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.WB_PENDING.LOCALDEST_VN1": { "Box": "M3UPI", "Category": "M3UPI Writeback Events", "EvSel": 125, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.XPT_PFTCH": { "Box": "M3UPI", "Category": "M3UPI XPT Events", "EvSel": 97, "ExtSel": "", }, "M3UPI.XPT_PFTCH.LOST_QFULL": { "Box": "M3UPI", "Category": "M3UPI XPT Events", "EvSel": 97, "ExtSel": "", "Umask": "bx1xxxxx", }, "M3UPI.XPT_PFTCH.LOST_ARB": { "Box": "M3UPI", "Category": "M3UPI XPT Events", "EvSel": 97, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.XPT_PFTCH.BYPASS": { "Box": "M3UPI", "Category": "M3UPI XPT Events", "EvSel": 97, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.XPT_PFTCH.ARRIVED": { "Box": "M3UPI", "Category": "M3UPI XPT Events", "EvSel": 97, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.XPT_PFTCH.LOST_OLD": { "Box": "M3UPI", "Category": "M3UPI XPT Events", "EvSel": 97, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.XPT_PFTCH.FLITTED": { "Box": "M3UPI", "Category": "M3UPI XPT Events", "EvSel": 97, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.XPT_PFTCH.ARB": { "Box": "M3UPI", "Category": "M3UPI XPT Events", "EvSel": 97, "ExtSel": "", "Umask": "bxxxxx1xx", }, # IIO: "IIO.CLOCKTICKS": { "Box": "IIO", "Category": "IIO CLOCK Events", "Counters": "0-3", "Defn": "Increments counter once every Traffic Controller clock, the LSCLK (500MHz)", "Desc": "Clockticks of the integrated IO (IIO) traffic controller", "EvSel": 129, "ExtSel": "", }, "IIO.COMP_BUF_INSERTS": { "Box": "IIO", "Category": "IIO PCIe Completion Buffer Events", "Counters": "0-3", "Desc": "PCIe Completion Buffer Inserts", "EvSel": 194, "ExtSel": "", "Notes": "aka OTC_ENQ.PREALLOC? (May need to redo)", }, "IIO.COMP_BUF_INSERTS.CMPD.PART4": { "Box": "IIO", "Category": "IIO PCIe Completion Buffer Events", "Counters": "0-3", "Desc": "PCIe Completion Buffer Inserts", "EvSel": 194, "FCMask": "b100", "ExtSel": "", "Notes": "aka OTC_ENQ.PREALLOC? (May need to redo )", "PortMask": "bxxxxxxx1xxxx", "Umask": "bxxxxxx11", }, "IIO.COMP_BUF_INSERTS.CMPD.PART7": { "Box": "IIO", "Category": "IIO PCIe Completion Buffer Events", "Counters": "0-3", "Desc": "PCIe Completion Buffer Inserts", "EvSel": 194, "FCMask": "b100", "ExtSel": "", "Notes": "aka OTC_ENQ.PREALLOC? (May need to redo )", "PortMask": "bxxxx1xxxxxxx", "Umask": "bxxxxxx11", }, "IIO.COMP_BUF_INSERTS.CMPD.PART1": { "Box": "IIO", "Category": "IIO PCIe Completion Buffer Events", "Counters": "0-3", "Desc": "PCIe Completion Buffer Inserts", "EvSel": 194, "FCMask": "b100", "ExtSel": "", "Notes": "aka OTC_ENQ.PREALLOC? (May need to redo )", "PortMask": "bxxxxxxxxxx1x", "Umask": "bxxxxxx11", }, "IIO.COMP_BUF_INSERTS.CMPD.PART5": { "Box": "IIO", "Category": "IIO PCIe Completion Buffer Events", "Counters": "0-3", "Desc": "PCIe Completion Buffer Inserts", "EvSel": 194, "FCMask": "b100", "ExtSel": "", "Notes": "aka OTC_ENQ.PREALLOC? (May need to redo )", "PortMask": "bxxxxxx1xxxxx", "Umask": "bxxxxxx11", }, "IIO.COMP_BUF_INSERTS.CMPD.PART3": { "Box": "IIO", "Category": "IIO PCIe Completion Buffer Events", "Counters": "0-3", "Desc": "PCIe Completion Buffer Inserts", "EvSel": 194, "FCMask": "b100", "ExtSel": "", "Notes": "aka OTC_ENQ.PREALLOC? (May need to redo )", "PortMask": "bxxxxxxxx1xxx", "Umask": "bxxxxxx11", }, "IIO.COMP_BUF_INSERTS.CMPD.PART0": { "Box": "IIO", "Category": "IIO PCIe Completion Buffer Events", "Counters": "0-3", "Desc": "PCIe Completion Buffer Inserts", "EvSel": 194, "FCMask": "b100", "ExtSel": "", "Notes": "aka OTC_ENQ.PREALLOC? (May need to redo )", "PortMask": "bxxxxxxxxxxx1", "Umask": "bxxxxxx11", }, "IIO.COMP_BUF_INSERTS.CMPD.PART2": { "Box": "IIO", "Category": "IIO PCIe Completion Buffer Events", "Counters": "0-3", "Desc": "PCIe Completion Buffer Inserts", "EvSel": 194, "FCMask": "b100", "ExtSel": "", "Notes": "aka OTC_ENQ.PREALLOC? (May need to redo )", "PortMask": "bxxxxxxxxx1xx", "Umask": "bxxxxxx11", }, "IIO.COMP_BUF_INSERTS.CMPD.PART6": { "Box": "IIO", "Category": "IIO PCIe Completion Buffer Events", "Counters": "0-3", "Desc": "PCIe Completion Buffer Inserts", "EvSel": 194, "FCMask": "b100", "ExtSel": "", "Notes": "aka OTC_ENQ.PREALLOC? (May need to redo )", "PortMask": "bxxxxx1xxxxxx", "Umask": "bxxxxxx11", }, "IIO.COMP_BUF_INSERTS.CMPD.ALL": { "Box": "IIO", "Category": "IIO PCIe Completion Buffer Events", "Counters": "0-3", "Desc": "PCIe Completion Buffer Inserts", "EvSel": 194, "FCMask": "b100", "ExtSel": "", "Notes": "aka OTC_ENQ.PREALLOC? (May need to redo )", "PortMask": "bxxxx11111111", "Umask": "bxxxxxx11", }, "IIO.COMP_BUF_OCCUPANCY": { "Box": "IIO", "Category": "IIO PCIe Completion Buffer Events", "Counters": "2-3", "Desc": "PCIe Completion Buffer Occupancy", "EvSel": 213, "ExtSel": "", }, "IIO.COMP_BUF_OCCUPANCY.CMPD.PART4": { "Box": "IIO", "Category": "IIO PCIe Completion Buffer Events", "Counters": "2-3", "Desc": "PCIe Completion Buffer Occupancy", "EvSel": 213, "FCMask": "b100", "ExtSel": "", "PortMask": "bxxxxxxxxxxxx", "Umask": "bxxx1xxxx", }, "IIO.COMP_BUF_OCCUPANCY.CMPD.PART7": { "Box": "IIO", "Category": "IIO PCIe Completion Buffer Events", "Counters": "2-3", "Desc": "PCIe Completion Buffer Occupancy", "EvSel": 213, "FCMask": "b100", "ExtSel": "", "PortMask": "bxxxxxxxxxxxx", "Umask": "b1xxxxxxx", }, "IIO.COMP_BUF_OCCUPANCY.CMPD.PART1": { "Box": "IIO", "Category": "IIO PCIe Completion Buffer Events", "Counters": "2-3", "Desc": "PCIe Completion Buffer Occupancy", "EvSel": 213, "FCMask": "b100", "ExtSel": "", "PortMask": "bxxxxxxxxxxxx", "Umask": "bxxxxxx1x", }, "IIO.COMP_BUF_OCCUPANCY.CMPD.PART3": { "Box": "IIO", "Category": "IIO PCIe Completion Buffer Events", "Counters": "2-3", "Desc": "PCIe Completion Buffer Occupancy", "EvSel": 213, "FCMask": "b100", "ExtSel": "", "PortMask": "bxxxxxxxxxxxx", "Umask": "bxxxx1xxx", }, "IIO.COMP_BUF_OCCUPANCY.CMPD.PART5": { "Box": "IIO", "Category": "IIO PCIe Completion Buffer Events", "Counters": "2-3", "Desc": "PCIe Completion Buffer Occupancy", "EvSel": 213, "FCMask": "b100", "ExtSel": "", "PortMask": "bxxxxxxxxxxxx", "Umask": "bxx1xxxxx", }, "IIO.COMP_BUF_OCCUPANCY.CMPD.PART6": { "Box": "IIO", "Category": "IIO PCIe Completion Buffer Events", "Counters": "2-3", "Desc": "PCIe Completion Buffer Occupancy", "EvSel": 213, "FCMask": "b100", "ExtSel": "", "PortMask": "bxxxxxxxxxxxx", "Umask": "bx1xxxxxx", }, "IIO.COMP_BUF_OCCUPANCY.CMPD.PART0": { "Box": "IIO", "Category": "IIO PCIe Completion Buffer Events", "Counters": "2-3", "Desc": "PCIe Completion Buffer Occupancy", "EvSel": 213, "FCMask": "b100", "ExtSel": "", "PortMask": "bxxxxxxxxxxxx", "Umask": "bxxxxxxx1", }, "IIO.COMP_BUF_OCCUPANCY.CMPD.PART2": { "Box": "IIO", "Category": "IIO PCIe Completion Buffer Events", "Counters": "2-3", "Desc": "PCIe Completion Buffer Occupancy", "EvSel": 213, "FCMask": "b100", "ExtSel": "", "PortMask": "bxxxxxxxxxxxx", "Umask": "bxxxxx1xx", }, "IIO.COMP_BUF_OCCUPANCY.CMPD.ALL": { "Box": "IIO", "Category": "IIO PCIe Completion Buffer Events", "Counters": "2-3", "Desc": "PCIe Completion Buffer Occupancy", "EvSel": 213, "FCMask": "b100", "ExtSel": "", "PortMask": "bxxxxxxxxxxxx", "Umask": "b11111111", }, "IIO.DATA_REQ_BY_CPU": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", }, "IIO.DATA_REQ_BY_CPU.MEM_READ.PART7": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxx1xxxxxxx", "Umask": "bxxxxx1xx", }, "IIO.DATA_REQ_BY_CPU.IO_READ.IOMMU0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxx1xxxxxxxx", "Umask": "b1xxxxxxx", }, "IIO.DATA_REQ_BY_CPU.MEM_WRITE.IOMMU1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxx1xxxxxxxxx", "Umask": "bxxxxxxx1", }, "IIO.DATA_REQ_BY_CPU.PEER_READ.PART6": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxx1xxxxxx", "Umask": "bxxxx1xxx", }, "IIO.DATA_REQ_BY_CPU.CFG_READ.PART6": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxx1xxxxxx", "Umask": "bx1xxxxxx", }, "IIO.DATA_REQ_BY_CPU.IO_WRITE.IOMMU0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxx1xxxxxxxx", "Umask": "bxx1xxxxx", }, "IIO.DATA_REQ_BY_CPU.PEER_WRITE.IOMMU0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxx1xxxxxxxx", "Umask": "bxxxxxx1x", }, "IIO.DATA_REQ_BY_CPU.PEER_READ.PART0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxxxxx1", "Umask": "bxxxx1xxx", }, "IIO.DATA_REQ_BY_CPU.IO_READ.PART5": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxx1xxxxx", "Umask": "b1xxxxxxx", }, "IIO.DATA_REQ_BY_CPU.CFG_READ.PART2": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxxx1xx", "Umask": "bx1xxxxxx", }, "IIO.DATA_REQ_BY_CPU.PEER_WRITE.PART4": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxx1xxxx", "Umask": "bxxxxxx1x", }, "IIO.DATA_REQ_BY_CPU.CFG_WRITE.IOMMU0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxx1xxxxxxxx", "Umask": "bxxx1xxxx", }, "IIO.DATA_REQ_BY_CPU.IO_WRITE.IOMMU1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxx1xxxxxxxxx", "Umask": "bxx1xxxxx", }, "IIO.DATA_REQ_BY_CPU.PEER_READ.PART5": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxx1xxxxx", "Umask": "bxxxx1xxx", }, "IIO.DATA_REQ_BY_CPU.IO_WRITE.PART4": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxx1xxxx", "Umask": "bxx1xxxxx", }, "IIO.DATA_REQ_BY_CPU.PEER_READ.PART4": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxx1xxxx", "Umask": "bxxxx1xxx", }, "IIO.DATA_REQ_BY_CPU.IO_WRITE.PART0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxxxxx1", "Umask": "bxx1xxxxx", }, "IIO.DATA_REQ_BY_CPU.PEER_READ.PART1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxxxx1x", "Umask": "bxxxx1xxx", }, "IIO.DATA_REQ_BY_CPU.MEM_WRITE.PART3": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxx1xxx", "Umask": "bxxxxxxx1", }, "IIO.DATA_REQ_BY_CPU.PEER_WRITE.IOMMU1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxx1xxxxxxxxx", "Umask": "bxxxxxx1x", }, "IIO.DATA_REQ_BY_CPU.CFG_WRITE.PART4": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxx1xxxx", "Umask": "bxxx1xxxx", }, "IIO.DATA_REQ_BY_CPU.CFG_READ.PART4": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxx1xxxx", "Umask": "bx1xxxxxx", }, "IIO.DATA_REQ_BY_CPU.CFG_READ.IOMMU1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxx1xxxxxxxxx", "Umask": "bx1xxxxxx", }, "IIO.DATA_REQ_BY_CPU.CFG_WRITE.PART5": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxx1xxxxx", "Umask": "bxxx1xxxx", }, "IIO.DATA_REQ_BY_CPU.CFG_WRITE.PART1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxxxx1x", "Umask": "bxxx1xxxx", }, "IIO.DATA_REQ_BY_CPU.MEM_WRITE.PART2": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxxx1xx", "Umask": "bxxxxxxx1", }, "IIO.DATA_REQ_BY_CPU.CFG_READ.PART0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxxxxx1", "Umask": "bx1xxxxxx", }, "IIO.DATA_REQ_BY_CPU.MEM_READ.PART5": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxx1xxxxx", "Umask": "bxxxxx1xx", }, "IIO.DATA_REQ_BY_CPU.CFG_READ.IOMMU0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxx1xxxxxxxx", "Umask": "bx1xxxxxx", }, "IIO.DATA_REQ_BY_CPU.CFG_WRITE.PART6": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxx1xxxxxx", "Umask": "bxxx1xxxx", }, "IIO.DATA_REQ_BY_CPU.IO_WRITE.PART6": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxx1xxxxxx", "Umask": "bxx1xxxxx", }, "IIO.DATA_REQ_BY_CPU.MEM_READ.PART1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxxxx1x", "Umask": "bxxxxx1xx", }, "IIO.DATA_REQ_BY_CPU.PEER_WRITE.PART2": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxxx1xx", "Umask": "bxxxxxx1x", }, "IIO.DATA_REQ_BY_CPU.PEER_WRITE.PART0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxxxxx1", "Umask": "bxxxxxx1x", }, "IIO.DATA_REQ_BY_CPU.PEER_READ.IOMMU0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxx1xxxxxxxx", "Umask": "bxxxx1xxx", }, "IIO.DATA_REQ_BY_CPU.CFG_WRITE.PART0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxxxxx1", "Umask": "bxxx1xxxx", }, "IIO.DATA_REQ_BY_CPU.MEM_READ.PART3": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxx1xxx", "Umask": "bxxxxx1xx", }, "IIO.DATA_REQ_BY_CPU.IO_WRITE.PART2": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxxx1xx", "Umask": "bxx1xxxxx", }, "IIO.DATA_REQ_BY_CPU.IO_READ.PART0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxxxxx1", "Umask": "b1xxxxxxx", }, "IIO.DATA_REQ_BY_CPU.MEM_WRITE.PART7": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxx1xxxxxxx", "Umask": "bxxxxxxx1", }, "IIO.DATA_REQ_BY_CPU.PEER_WRITE.PART1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxxxx1x", "Umask": "bxxxxxx1x", }, "IIO.DATA_REQ_BY_CPU.MEM_WRITE.PART0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxxxxx1", "Umask": "bxxxxxxx1", }, "IIO.DATA_REQ_BY_CPU.CFG_READ.PART3": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxx1xxx", "Umask": "bx1xxxxxx", }, "IIO.DATA_REQ_BY_CPU.CFG_WRITE.PART7": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxx1xxxxxxx", "Umask": "bxxx1xxxx", }, "IIO.DATA_REQ_BY_CPU.MEM_WRITE.PART6": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxx1xxxxxx", "Umask": "bxxxxxxx1", }, "IIO.DATA_REQ_BY_CPU.MEM_READ.IOMMU0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxx1xxxxxxxx", "Umask": "bxxxxx1xx", }, "IIO.DATA_REQ_BY_CPU.CFG_READ.PART1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxxxx1x", "Umask": "bx1xxxxxx", }, "IIO.DATA_REQ_BY_CPU.IO_READ.PART1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxxxx1x", "Umask": "b1xxxxxxx", }, "IIO.DATA_REQ_BY_CPU.PEER_READ.IOMMU1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxx1xxxxxxxxx", "Umask": "bxxxx1xxx", }, "IIO.DATA_REQ_BY_CPU.PEER_WRITE.PART6": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxx1xxxxxx", "Umask": "bxxxxxx1x", }, "IIO.DATA_REQ_BY_CPU.IO_WRITE.PART7": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxx1xxxxxxx", "Umask": "bxx1xxxxx", }, "IIO.DATA_REQ_BY_CPU.MEM_WRITE.PART1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxxxx1x", "Umask": "bxxxxxxx1", }, "IIO.DATA_REQ_BY_CPU.PEER_READ.PART3": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxx1xxx", "Umask": "bxxxx1xxx", }, "IIO.DATA_REQ_BY_CPU.CFG_WRITE.PART2": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxxx1xx", "Umask": "bxxx1xxxx", }, "IIO.DATA_REQ_BY_CPU.PEER_WRITE.PART7": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxx1xxxxxxx", "Umask": "bxxxxxx1x", }, "IIO.DATA_REQ_BY_CPU.MEM_READ.PART0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxxxxx1", "Umask": "bxxxxx1xx", }, "IIO.DATA_REQ_BY_CPU.IO_READ.PART4": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxx1xxxx", "Umask": "b1xxxxxxx", }, "IIO.DATA_REQ_BY_CPU.CFG_READ.PART5": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxx1xxxxx", "Umask": "bx1xxxxxx", }, "IIO.DATA_REQ_BY_CPU.IO_READ.PART2": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxxx1xx", "Umask": "b1xxxxxxx", }, "IIO.DATA_REQ_BY_CPU.MEM_WRITE.PART4": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxx1xxxx", "Umask": "bxxxxxxx1", }, "IIO.DATA_REQ_BY_CPU.MEM_READ.PART4": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxx1xxxx", "Umask": "bxxxxx1xx", }, "IIO.DATA_REQ_BY_CPU.MEM_READ.IOMMU1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxx1xxxxxxxxx", "Umask": "bxxxxx1xx", }, "IIO.DATA_REQ_BY_CPU.MEM_WRITE.PART5": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxx1xxxxx", "Umask": "bxxxxxxx1", }, "IIO.DATA_REQ_BY_CPU.IO_WRITE.PART5": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxx1xxxxx", "Umask": "bxx1xxxxx", }, "IIO.DATA_REQ_BY_CPU.PEER_WRITE.PART5": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxx1xxxxx", "Umask": "bxxxxxx1x", }, "IIO.DATA_REQ_BY_CPU.PEER_READ.PART2": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxxx1xx", "Umask": "bxxxx1xxx", }, "IIO.DATA_REQ_BY_CPU.IO_READ.IOMMU1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxx1xxxxxxxxx", "Umask": "b1xxxxxxx", }, "IIO.DATA_REQ_BY_CPU.CFG_WRITE.PART3": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxx1xxx", "Umask": "bxxx1xxxx", }, "IIO.DATA_REQ_BY_CPU.PEER_WRITE.PART3": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxx1xxx", "Umask": "bxxxxxx1x", }, "IIO.DATA_REQ_BY_CPU.IO_READ.PART3": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxx1xxx", "Umask": "b1xxxxxxx", }, "IIO.DATA_REQ_BY_CPU.IO_WRITE.PART3": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxx1xxx", "Umask": "bxx1xxxxx", }, "IIO.DATA_REQ_BY_CPU.MEM_READ.PART2": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxxx1xx", "Umask": "bxxxxx1xx", }, "IIO.DATA_REQ_BY_CPU.PEER_READ.PART7": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxx1xxxxxxx", "Umask": "bxxxx1xxx", }, "IIO.DATA_REQ_BY_CPU.MEM_WRITE.IOMMU0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxx1xxxxxxxx", "Umask": "bxxxxxxx1", }, "IIO.DATA_REQ_BY_CPU.CFG_READ.PART7": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxx1xxxxxxx", "Umask": "bx1xxxxxx", }, "IIO.DATA_REQ_BY_CPU.IO_READ.PART7": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxx1xxxxxxx", "Umask": "b1xxxxxxx", }, "IIO.DATA_REQ_BY_CPU.CFG_WRITE.IOMMU1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxx1xxxxxxxxx", "Umask": "bxxx1xxxx", }, "IIO.DATA_REQ_BY_CPU.IO_READ.PART6": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxx1xxxxxx", "Umask": "b1xxxxxxx", }, "IIO.DATA_REQ_BY_CPU.MEM_READ.PART6": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxx1xxxxxx", "Umask": "bxxxxx1xx", }, "IIO.DATA_REQ_BY_CPU.IO_WRITE.PART1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxxxx1x", "Umask": "bxx1xxxxx", }, "IIO.DATA_REQ_OF_CPU": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", }, "IIO.DATA_REQ_OF_CPU.PEER_READ.PART7": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxx1xxxxxxx", "Umask": "bxxxx1xxx", }, "IIO.DATA_REQ_OF_CPU.MEM_READ.PART2": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxxxxx1xx", "Umask": "bxxxxx1xx", }, "IIO.DATA_REQ_OF_CPU.ATOMIC.PART2": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxxxxx1xx", "Umask": "bxxx1xxxx", }, "IIO.DATA_REQ_OF_CPU.CMPD.PART4": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxxx1xxxx", "Umask": "b1xxxxxxx", }, "IIO.DATA_REQ_OF_CPU.MEM_WRITE.IOMMU0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxx1xxxxxxxx", "Umask": "bxxxxxxx1", }, "IIO.DATA_REQ_OF_CPU.CMPD.PART5": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxx1xxxxx", "Umask": "b1xxxxxxx", }, "IIO.DATA_REQ_OF_CPU.ATOMIC.IOMMU1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxx1xxxxxxxxx", "Umask": "bxxx1xxxx", }, "IIO.DATA_REQ_OF_CPU.MSG.PART7": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxx1xxxxxxx", "Umask": "bx1xxxxxx", }, "IIO.DATA_REQ_OF_CPU.MSG.PART3": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxxxx1xxx", "Umask": "bx1xxxxxx", }, "IIO.DATA_REQ_OF_CPU.CMPD.PART6": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxx1xxxxxx", "Umask": "b1xxxxxxx", }, "IIO.DATA_REQ_OF_CPU.MEM_READ.PART6": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxx1xxxxxx", "Umask": "bxxxxx1xx", }, "IIO.DATA_REQ_OF_CPU.PEER_WRITE.PART5": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxx1xxxxx", "Umask": "bxxxxxx1x", }, "IIO.DATA_REQ_OF_CPU.MSG.PART4": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxxx1xxxx", "Umask": "bx1xxxxxx", }, "IIO.DATA_REQ_OF_CPU.ATOMIC.PART5": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxx1xxxxx", "Umask": "bxxx1xxxx", }, "IIO.DATA_REQ_OF_CPU.PEER_READ.PART2": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxxxxx1xx", "Umask": "bxxxx1xxx", }, "IIO.DATA_REQ_OF_CPU.PEER_WRITE.PART3": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxxxx1xxx", "Umask": "bxxxxxx1x", }, "IIO.DATA_REQ_OF_CPU.PEER_WRITE.PART7": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxx1xxxxxxx", "Umask": "bxxxxxx1x", }, "IIO.DATA_REQ_OF_CPU.MEM_WRITE.PART1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxxxxxx1x", "Umask": "bxxxxxxx1", }, "IIO.DATA_REQ_OF_CPU.PEER_READ.PART3": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxxxx1xxx", "Umask": "bxxxx1xxx", }, "IIO.DATA_REQ_OF_CPU.MEM_READ.PART0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxxxxxxx1", "Umask": "bxxxxx1xx", }, "IIO.DATA_REQ_OF_CPU.MEM_READ.PART4": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxxx1xxxx", "Umask": "bxxxxx1xx", }, "IIO.DATA_REQ_OF_CPU.CMPD.PART2": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxxxxx1xx", "Umask": "b1xxxxxxx", }, "IIO.DATA_REQ_OF_CPU.MEM_WRITE.PART4": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxxx1xxxx", "Umask": "bxxxxxxx1", }, "IIO.DATA_REQ_OF_CPU.MEM_WRITE.PART5": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxx1xxxxx", "Umask": "bxxxxxxx1", }, "IIO.DATA_REQ_OF_CPU.MEM_READ.IOMMU1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxx1xxxxxxxxx", "Umask": "bxxxxx1xx", }, "IIO.DATA_REQ_OF_CPU.ATOMIC.PART0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxxxxxxx1", "Umask": "bxxx1xxxx", }, "IIO.DATA_REQ_OF_CPU.PEER_WRITE.PART1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxxxxxx1x", "Umask": "bxxxxxx1x", }, "IIO.DATA_REQ_OF_CPU.ATOMIC.PART1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxxxxxx1x", "Umask": "bxxx1xxxx", }, "IIO.DATA_REQ_OF_CPU.MEM_WRITE.PART0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxxxxxxx1", "Umask": "bxxxxxxx1", }, "IIO.DATA_REQ_OF_CPU.MEM_READ.IOMMU0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxx1xxxxxxxx", "Umask": "bxxxxx1xx", }, "IIO.DATA_REQ_OF_CPU.MSG.PART2": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxxxxx1xx", "Umask": "bx1xxxxxx", }, "IIO.DATA_REQ_OF_CPU.MEM_WRITE.PART6": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxx1xxxxxx", "Umask": "bxxxxxxx1", }, "IIO.DATA_REQ_OF_CPU.PEER_WRITE.PART6": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxx1xxxxxx", "Umask": "bxxxxxx1x", }, "IIO.DATA_REQ_OF_CPU.PEER_READ.IOMMU1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxx1xxxxxxxxx", "Umask": "bxxxx1xxx", }, "IIO.DATA_REQ_OF_CPU.PEER_WRITE.PART2": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxxxxx1xx", "Umask": "bxxxxxx1x", }, "IIO.DATA_REQ_OF_CPU.CMPD.IOMMU0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxx1xxxxxxxx", "Umask": "b1xxxxxxx", }, "IIO.DATA_REQ_OF_CPU.MEM_READ.PART1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxxxxxx1x", "Umask": "bxxxxx1xx", }, "IIO.DATA_REQ_OF_CPU.PEER_WRITE.PART0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxxxxxxx1", "Umask": "bxxxxxx1x", }, "IIO.DATA_REQ_OF_CPU.ATOMIC.PART4": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxxx1xxxx", "Umask": "bxxx1xxxx", }, "IIO.DATA_REQ_OF_CPU.MSG.PART1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxxxxxx1x", "Umask": "bx1xxxxxx", }, "IIO.DATA_REQ_OF_CPU.CMPD.PART0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxxxxxxx1", "Umask": "b1xxxxxxx", }, "IIO.DATA_REQ_OF_CPU.PEER_READ.IOMMU0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxx1xxxxxxxx", "Umask": "bxxxx1xxx", }, "IIO.DATA_REQ_OF_CPU.ATOMIC.PART3": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxxxx1xxx", "Umask": "bxxx1xxxx", }, "IIO.DATA_REQ_OF_CPU.MEM_WRITE.PART7": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxx1xxxxxxx", "Umask": "bxxxxxxx1", }, "IIO.DATA_REQ_OF_CPU.MEM_READ.PART3": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxxxx1xxx", "Umask": "bxxxxx1xx", }, "IIO.DATA_REQ_OF_CPU.PEER_WRITE.IOMMU1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxx1xxxxxxxxx", "Umask": "bxxxxxx1x", }, "IIO.DATA_REQ_OF_CPU.ATOMIC.PART6": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxx1xxxxxx", "Umask": "bxxx1xxxx", }, "IIO.DATA_REQ_OF_CPU.CMPD.PART3": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxxxx1xxx", "Umask": "b1xxxxxxx", }, "IIO.DATA_REQ_OF_CPU.MEM_WRITE.PART2": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxxxxx1xx", "Umask": "bxxxxxxx1", }, "IIO.DATA_REQ_OF_CPU.MEM_READ.PART5": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxx1xxxxx", "Umask": "bxxxxx1xx", }, "IIO.DATA_REQ_OF_CPU.MSG.IOMMU1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxx1xxxxxxxxx", "Umask": "bx1xxxxxx", }, "IIO.DATA_REQ_OF_CPU.MSG.PART5": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxx1xxxxx", "Umask": "bx1xxxxxx", }, "IIO.DATA_REQ_OF_CPU.MSG.PART0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxxxxxxx1", "Umask": "bx1xxxxxx", }, "IIO.DATA_REQ_OF_CPU.PEER_READ.PART5": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxx1xxxxx", "Umask": "bxxxx1xxx", }, "IIO.DATA_REQ_OF_CPU.ATOMIC.PART7": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxx1xxxxxxx", "Umask": "bxxx1xxxx", }, "IIO.DATA_REQ_OF_CPU.CMPD.PART1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxxxxxx1x", "Umask": "b1xxxxxxx", }, "IIO.DATA_REQ_OF_CPU.CMPD.IOMMU1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxx1xxxxxxxxx", "Umask": "b1xxxxxxx", }, "IIO.DATA_REQ_OF_CPU.PEER_READ.PART4": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxxx1xxxx", "Umask": "bxxxx1xxx", }, "IIO.DATA_REQ_OF_CPU.PEER_READ.PART1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxxxxxx1x", "Umask": "bxxxx1xxx", }, "IIO.DATA_REQ_OF_CPU.MEM_WRITE.PART3": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxxxx1xxx", "Umask": "bxxxxxxx1", }, "IIO.DATA_REQ_OF_CPU.ATOMIC.IOMMU0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxx1xxxxxxxx", "Umask": "bxxx1xxxx", }, "IIO.DATA_REQ_OF_CPU.MSG.IOMMU0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxx1xxxxxxxx", "Umask": "bx1xxxxxx", }, "IIO.DATA_REQ_OF_CPU.MEM_WRITE.IOMMU1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxx1xxxxxxxxx", "Umask": "bxxxxxxx1", }, "IIO.DATA_REQ_OF_CPU.CMPD.PART7": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxx1xxxxxxx", "Umask": "b1xxxxxxx", }, "IIO.DATA_REQ_OF_CPU.MEM_READ.PART7": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxx1xxxxxxx", "Umask": "bxxxxx1xx", }, "IIO.DATA_REQ_OF_CPU.PEER_WRITE.IOMMU0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxx1xxxxxxxx", "Umask": "bxxxxxx1x", }, "IIO.DATA_REQ_OF_CPU.PEER_READ.PART6": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxx1xxxxxx", "Umask": "bxxxx1xxx", }, "IIO.DATA_REQ_OF_CPU.PEER_READ.PART0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxxxxxxx1", "Umask": "bxxxx1xxx", }, "IIO.DATA_REQ_OF_CPU.MSG.PART6": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxx1xxxxxx", "Umask": "bx1xxxxxx", }, "IIO.DATA_REQ_OF_CPU.PEER_WRITE.PART4": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 1024, "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxxx1xxxx", "Umask": "bxxxxxx1x", }, "IIO.INBOUND_ARB_REQ": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "0-3", "Defn": "How often different queues (e.g. channel / fc) ask to send request into pipeline", "Desc": "Incoming arbitration requests", "EvSel": 134, "ExtSel": "", }, "IIO.INBOUND_ARB_REQ.DATA": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "0-3", "Defn": "How often different queues (e.g. channel / fc) ask to send request into pipeline", "Desc": "Incoming arbitration requests", "EvSel": 134, "FCMask": "b111", "ExtSel": "", "PortMask": "bxxxx11111111", "Umask": "bxx1xxxxx", }, "IIO.INBOUND_ARB_REQ.IOMMU_HIT": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "0-3", "Defn": "How often different queues (e.g. channel / fc) ask to send request into pipeline", "Desc": "Incoming arbitration requests", "EvSel": 134, "FCMask": "b111", "ExtSel": "", "PortMask": "bxxxx11111111", "Umask": "bxxxxxx1x", }, "IIO.INBOUND_ARB_REQ.REQ_OWN": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "0-3", "Defn": "How often different queues (e.g. channel / fc) ask to send request into pipeline", "Desc": "Incoming arbitration requests", "EvSel": 134, "FCMask": "b111", "ExtSel": "", "PortMask": "bxxxx11111111", "Umask": "bxxxxx1xx", }, "IIO.INBOUND_ARB_REQ.WR": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "0-3", "Defn": "How often different queues (e.g. channel / fc) ask to send request into pipeline", "Desc": "Incoming arbitration requests", "EvSel": 134, "FCMask": "b111", "ExtSel": "", "PortMask": "bxxxx11111111", "Umask": "bxxx1xxxx", }, "IIO.INBOUND_ARB_REQ.IOMMU_REQ": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "0-3", "Defn": "How often different queues (e.g. channel / fc) ask to send request into pipeline", "Desc": "Incoming arbitration requests", "EvSel": 134, "FCMask": "b111", "ExtSel": "", "PortMask": "bxxxx11111111", "Umask": "bxxxxxxx1", }, "IIO.INBOUND_ARB_REQ.FINAL_RD_WR": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "0-3", "Defn": "How often different queues (e.g. channel / fc) ask to send request into pipeline", "Desc": "Incoming arbitration requests", "EvSel": 134, "FCMask": "b111", "ExtSel": "", "PortMask": "bxxxx11111111", "Umask": "bxxxx1xxx", }, "IIO.INBOUND_ARB_WON": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "0-3", "Defn": "How often different queues (e.g. channel / fc) are allowed to send request into pipeline", "Desc": "Incoming arbitration requests granted", "EvSel": 135, "ExtSel": "", }, "IIO.INBOUND_ARB_WON.IOMMU_HIT": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "0-3", "Defn": "How often different queues (e.g. channel / fc) are allowed to send request into pipeline", "Desc": "Incoming arbitration requests granted", "EvSel": 135, "FCMask": "b111", "ExtSel": "", "PortMask": "bxxxx11111111", "Umask": "bxxxxxx1x", }, "IIO.INBOUND_ARB_WON.REQ_OWN": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "0-3", "Defn": "How often different queues (e.g. channel / fc) are allowed to send request into pipeline", "Desc": "Incoming arbitration requests granted", "EvSel": 135, "FCMask": "b111", "ExtSel": "", "PortMask": "bxxxx11111111", "Umask": "bxxxxx1xx", }, "IIO.INBOUND_ARB_WON.DATA": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "0-3", "Defn": "How often different queues (e.g. channel / fc) are allowed to send request into pipeline", "Desc": "Incoming arbitration requests granted", "EvSel": 135, "FCMask": "b111", "ExtSel": "", "PortMask": "bxxxx11111111", "Umask": "bxx1xxxxx", }, "IIO.INBOUND_ARB_WON.FINAL_RD_WR": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "0-3", "Defn": "How often different queues (e.g. channel / fc) are allowed to send request into pipeline", "Desc": "Incoming arbitration requests granted", "EvSel": 135, "FCMask": "b111", "ExtSel": "", "PortMask": "bxxxx11111111", "Umask": "bxxxx1xxx", }, "IIO.INBOUND_ARB_WON.IOMMU_REQ": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "0-3", "Defn": "How often different queues (e.g. channel / fc) are allowed to send request into pipeline", "Desc": "Incoming arbitration requests granted", "EvSel": 135, "FCMask": "b111", "ExtSel": "", "PortMask": "bxxxx11111111", "Umask": "bxxxxxxx1", }, "IIO.INBOUND_ARB_WON.WR": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "0-3", "Defn": "How often different queues (e.g. channel / fc) are allowed to send request into pipeline", "Desc": "Incoming arbitration requests granted", "EvSel": 135, "FCMask": "b111", "ExtSel": "", "PortMask": "bxxxx11111111", "Umask": "bxxx1xxxx", }, "IIO.IOMMU0": { "Box": "IIO", "Category": "IIO IOMMU Events", "EvSel": 64, "ExtSel": "", }, "IIO.IOMMU0.4K_HITS": { "Box": "IIO", "Category": "IIO IOMMU Events", "EvSel": 64, "ExtSel": "", "Umask": "bxxxxx1xx", }, "IIO.IOMMU0.1G_HITS": { "Box": "IIO", "Category": "IIO IOMMU Events", "EvSel": 64, "ExtSel": "", "Umask": "bxxx1xxxx", }, "IIO.IOMMU0.2M_HITS": { "Box": "IIO", "Category": "IIO IOMMU Events", "EvSel": 64, "ExtSel": "", "Umask": "bxxxx1xxx", }, "IIO.IOMMU0.MISSES": { "Box": "IIO", "Category": "IIO IOMMU Events", "EvSel": 64, "ExtSel": "", "Umask": "bxx1xxxxx", }, "IIO.IOMMU0.CTXT_CACHE_LOOKUPS": { "Box": "IIO", "Category": "IIO IOMMU Events", "EvSel": 64, "ExtSel": "", "Umask": "bx1xxxxxx", }, "IIO.IOMMU0.CTXT_CACHE_HITS": { "Box": "IIO", "Category": "IIO IOMMU Events", "EvSel": 64, "ExtSel": "", "Umask": "b1xxxxxxx", }, "IIO.IOMMU0.ALL_LOOKUPS": { "Box": "IIO", "Category": "IIO IOMMU Events", "EvSel": 64, "ExtSel": "", "Umask": "bxxxxxx1x", }, "IIO.IOMMU0.FIRST_LOOKUPS": { "Box": "IIO", "Category": "IIO IOMMU Events", "EvSel": 64, "ExtSel": "", "Umask": "bxxxxxxx1", }, "IIO.IOMMU1": { "Box": "IIO", "Category": "IIO IOMMU Events", "EvSel": 65, "ExtSel": "", }, "IIO.IOMMU1.PWC_1G_HITS": { "Box": "IIO", "Category": "IIO IOMMU Events", "EvSel": 65, "ExtSel": "", "Umask": "bxxxx1xxx", }, "IIO.IOMMU1.CYC_PWT_FULL": { "Box": "IIO", "Category": "IIO IOMMU Events", "EvSel": 65, "ExtSel": "", "Umask": "b1xxxxxxx", }, "IIO.IOMMU1.NUM_MEM_ACCESSES": { "Box": "IIO", "Category": "IIO IOMMU Events", "EvSel": 65, "ExtSel": "", "Umask": "bx1xxxxxx", }, "IIO.IOMMU1.PWC_2M_HITS": { "Box": "IIO", "Category": "IIO IOMMU Events", "EvSel": 65, "ExtSel": "", "Umask": "bxxxxx1xx", }, "IIO.IOMMU1.PWT_CACHE_LOOKUPS": { "Box": "IIO", "Category": "IIO IOMMU Events", "EvSel": 65, "ExtSel": "", "Umask": "bxxxxxxx1", }, "IIO.IOMMU1.PWC_512G_HITS": { "Box": "IIO", "Category": "IIO IOMMU Events", "EvSel": 65, "ExtSel": "", "Umask": "bxxx1xxxx", }, "IIO.IOMMU1.PWC_4K_HITS": { "Box": "IIO", "Category": "IIO IOMMU Events", "EvSel": 65, "ExtSel": "", "Umask": "bxxxxxx1x", }, "IIO.IOMMU1.PWC_CACHE_FILLS": { "Box": "IIO", "Category": "IIO IOMMU Events", "EvSel": 65, "ExtSel": "", "Umask": "bxx1xxxxx", }, "IIO.IOMMU3": { "Box": "IIO", "Category": "IIO IOMMU Events", "EvSel": 67, "ExtSel": "", }, "IIO.IOMMU3.NUM_INVAL_PAGE": { "Box": "IIO", "Category": "IIO IOMMU Events", "EvSel": 67, "ExtSel": "", "Umask": "bxxxxx1xx", }, "IIO.IOMMU3.NUM_CTXT_CACHE_INVAL_GBL": { "Box": "IIO", "Category": "IIO IOMMU Events", "EvSel": 67, "ExtSel": "", "Umask": "bxxxx1xxx", }, "IIO.IOMMU3.NUM_CTXT_CACHE_INVAL_DEVICE": { "Box": "IIO", "Category": "IIO IOMMU Events", "EvSel": 67, "ExtSel": "", "Umask": "bxx1xxxxx", }, "IIO.IOMMU3.NUM_CTXT_CACHE_INVAL_DOMAIN": { "Box": "IIO", "Category": "IIO IOMMU Events", "EvSel": 67, "ExtSel": "", "Umask": "bxxx1xxxx", }, "IIO.IOMMU3.INT_CACHE_HITS": { "Box": "IIO", "Category": "IIO IOMMU Events", "EvSel": 67, "ExtSel": "", "Umask": "b1xxxxxxx", }, "IIO.IOMMU3.INT_CACHE_LOOKUPS": { "Box": "IIO", "Category": "IIO IOMMU Events", "EvSel": 67, "ExtSel": "", "Umask": "bx1xxxxxx", }, "IIO.IOMMU3.NUM_INVAL_DOMAIN": { "Box": "IIO", "Category": "IIO IOMMU Events", "EvSel": 67, "ExtSel": "", "Umask": "bxxxxxx1x", }, "IIO.IOMMU3.NUM_INVAL_GBL": { "Box": "IIO", "Category": "IIO IOMMU Events", "EvSel": 67, "ExtSel": "", "Umask": "bxxxxxxx1", }, "IIO.MASK_MATCH_AND": { "Box": "IIO", "Category": "IIO Debug Events", "Counters": "0-1", "Defn": "Asserted if all bits specified by mask match", "Desc": "AND Mask/match for debug bus", "EvSel": 2, "ExtSel": "", }, "IIO.MASK_MATCH_AND.NOT_BUS0_NOT_BUS1": { "Box": "IIO", "Category": "IIO Debug Events", "Counters": "0-1", "Defn": "Asserted if all bits specified by mask match", "Desc": "AND Mask/match for debug bus", "EvSel": 2, "ExtSel": "", "Umask": "bxx1xxxxx", }, "IIO.MASK_MATCH_AND.BUS0_NOT_BUS1": { "Box": "IIO", "Category": "IIO Debug Events", "Counters": "0-1", "Defn": "Asserted if all bits specified by mask match", "Desc": "AND Mask/match for debug bus", "EvSel": 2, "ExtSel": "", "Umask": "bxxxxx1xx", }, "IIO.MASK_MATCH_AND.BUS0_BUS1": { "Box": "IIO", "Category": "IIO Debug Events", "Counters": "0-1", "Defn": "Asserted if all bits specified by mask match", "Desc": "AND Mask/match for debug bus", "EvSel": 2, "ExtSel": "", "Umask": "bxxxx1xxx", }, "IIO.MASK_MATCH_AND.BUS1": { "Box": "IIO", "Category": "IIO Debug Events", "Counters": "0-1", "Defn": "Asserted if all bits specified by mask match", "Desc": "AND Mask/match for debug bus", "EvSel": 2, "ExtSel": "", "Umask": "bxxxxxx1x", }, "IIO.MASK_MATCH_AND.BUS0": { "Box": "IIO", "Category": "IIO Debug Events", "Counters": "0-1", "Defn": "Asserted if all bits specified by mask match", "Desc": "AND Mask/match for debug bus", "EvSel": 2, "ExtSel": "", "Umask": "bxxxxxxx1", }, "IIO.MASK_MATCH_AND.NOT_BUS0_BUS1": { "Box": "IIO", "Category": "IIO Debug Events", "Counters": "0-1", "Defn": "Asserted if all bits specified by mask match", "Desc": "AND Mask/match for debug bus", "EvSel": 2, "ExtSel": "", "Umask": "bxxx1xxxx", }, "IIO.MASK_MATCH_OR": { "Box": "IIO", "Category": "IIO Debug Events", "Counters": "0-1", "Defn": "Asserted if any bits specified by mask match", "Desc": "OR Mask/match for debug bus", "EvSel": 3, "ExtSel": "", }, "IIO.MASK_MATCH_OR.NOT_BUS0_BUS1": { "Box": "IIO", "Category": "IIO Debug Events", "Counters": "0-1", "Defn": "Asserted if any bits specified by mask match", "Desc": "OR Mask/match for debug bus", "EvSel": 3, "ExtSel": "", "Umask": "bxxx1xxxx", }, "IIO.MASK_MATCH_OR.BUS0": { "Box": "IIO", "Category": "IIO Debug Events", "Counters": "0-1", "Defn": "Asserted if any bits specified by mask match", "Desc": "OR Mask/match for debug bus", "EvSel": 3, "ExtSel": "", "Umask": "bxxxxxxx1", }, "IIO.MASK_MATCH_OR.BUS1": { "Box": "IIO", "Category": "IIO Debug Events", "Counters": "0-1", "Defn": "Asserted if any bits specified by mask match", "Desc": "OR Mask/match for debug bus", "EvSel": 3, "ExtSel": "", "Umask": "bxxxxxx1x", }, "IIO.MASK_MATCH_OR.NOT_BUS0_NOT_BUS1": { "Box": "IIO", "Category": "IIO Debug Events", "Counters": "0-1", "Defn": "Asserted if any bits specified by mask match", "Desc": "OR Mask/match for debug bus", "EvSel": 3, "ExtSel": "", "Umask": "bxx1xxxxx", }, "IIO.MASK_MATCH_OR.BUS0_NOT_BUS1": { "Box": "IIO", "Category": "IIO Debug Events", "Counters": "0-1", "Defn": "Asserted if any bits specified by mask match", "Desc": "OR Mask/match for debug bus", "EvSel": 3, "ExtSel": "", "Umask": "bxxxxx1xx", }, "IIO.MASK_MATCH_OR.BUS0_BUS1": { "Box": "IIO", "Category": "IIO Debug Events", "Counters": "0-1", "Defn": "Asserted if any bits specified by mask match", "Desc": "OR Mask/match for debug bus", "EvSel": 3, "ExtSel": "", "Umask": "bxxxx1xxx", }, "IIO.NOTHING": { "Box": "IIO", "Category": "IIO CLOCK Events", "Counters": "0-3", "Desc": "Counting disabled", "EvSel": 128, "ExtSel": "", }, "IIO.NUM_OUSTANDING_REQ_FROM_CPU": { "Box": "IIO", "Category": "IIO OTC Events", "Counters": "2-3", "Defn": "Counts number of outbound requests/completions IIO is currently processing", "Desc": "Occupancy of outbound request queue", "EvSel": 197, "ExtSel": "", "MaxIncCyc": 256, "Notes": "Only 1 bit each of the fc_mask and ch_mask can be set", }, "IIO.NUM_OUSTANDING_REQ_FROM_CPU.TO_IO": { "Box": "IIO", "Category": "IIO OTC Events", "Counters": "2-3", "Defn": "Counts number of outbound requests/completions IIO is currently processing", "Desc": "Occupancy of outbound request queue", "EvSel": 197, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 256, "Notes": "Only 1 bit each of the fc_mask and ch_mask can be set", "PortMask": "bxxxx11111111", "Umask": "bxxxx1xxx", }, "IIO.NUM_OUTSTANDING_REQ_OF_CPU": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "2-3", "EvSel": 136, "ExtSel": "", "MaxIncCyc": 512, }, "IIO.NUM_OUTSTANDING_REQ_OF_CPU.FINAL_RD_WR": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "2-3", "EvSel": 136, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 512, "PortMask": "bxxxx11111111", "Umask": "bxxxx1xxx", }, "IIO.NUM_OUTSTANDING_REQ_OF_CPU.IOMMU_REQ": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "2-3", "EvSel": 136, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 512, "PortMask": "bxxxx11111111", "Umask": "bxxxxxxx1", }, "IIO.NUM_OUTSTANDING_REQ_OF_CPU.WR": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "2-3", "EvSel": 136, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 512, "PortMask": "bxxxx11111111", "Umask": "bxxx1xxxx", }, "IIO.NUM_OUTSTANDING_REQ_OF_CPU.IOMMU_HIT": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "2-3", "EvSel": 136, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 512, "PortMask": "bxxxx11111111", "Umask": "bxxxxxx1x", }, "IIO.NUM_OUTSTANDING_REQ_OF_CPU.REQ_OWN": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "2-3", "EvSel": 136, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 512, "PortMask": "bxxxx11111111", "Umask": "bxxxxx1xx", }, "IIO.NUM_OUTSTANDING_REQ_OF_CPU.DATA": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "2-3", "EvSel": 136, "FCMask": "b111", "ExtSel": "", "MaxIncCyc": 512, "PortMask": "bxxxx11111111", "Umask": "bxx1xxxxx", }, "IIO.NUM_REQ_FROM_CPU": { "Box": "IIO", "Category": "IIO OTC Events", "Counters": "0-3", "Desc": "Number requests sent to PCIe from main die", "EvSel": 194, "ExtSel": "", }, "IIO.NUM_REQ_FROM_CPU.PREALLOC": { "Box": "IIO", "Category": "IIO OTC Events", "Counters": "0-3", "Desc": "Number requests sent to PCIe from main die", "EvSel": 194, "FCMask": "b111", "ExtSel": "", "PortMask": "bxxxx11111111", "Umask": "bxxxxx1xx", }, "IIO.NUM_REQ_FROM_CPU.IRP": { "Box": "IIO", "Category": "IIO OTC Events", "Counters": "0-3", "Desc": "Number requests sent to PCIe from main die", "EvSel": 194, "FCMask": "b111", "ExtSel": "", "PortMask": "bxxxx11111111", "Umask": "bxxxxxxx1", }, "IIO.NUM_REQ_FROM_CPU.ITC": { "Box": "IIO", "Category": "IIO OTC Events", "Counters": "0-3", "Desc": "Number requests sent to PCIe from main die", "EvSel": 194, "FCMask": "b111", "ExtSel": "", "PortMask": "bxxxx11111111", "Umask": "bxxxxxx1x", }, "IIO.NUM_REQ_OF_CPU": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "0-3", "Defn": "Counts full PCIe requests before they're broken into a series of cache-line size requests as measured by DATA_REQ_OF_CPU and TXN_REQ_OF_CPU.", "Desc": "Number requests PCIe makes of the main die", "EvSel": 133, "ExtSel": "", }, "IIO.NUM_REQ_OF_CPU.ALL.DROP": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "0-3", "Defn": "Counts full PCIe requests before they're broken into a series of cache-line size requests as measured by DATA_REQ_OF_CPU and TXN_REQ_OF_CPU.", "Desc": "Number requests PCIe makes of the main die", "EvSel": 133, "FCMask": "b111", "ExtSel": "", "PortMask": "bxxxx11111111", "Umask": "bxxxxxx1x", }, "IIO.NUM_REQ_OF_CPU.COMMIT.ALL": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "0-3", "Defn": "Counts full PCIe requests before they're broken into a series of cache-line size requests as measured by DATA_REQ_OF_CPU and TXN_REQ_OF_CPU.", "Desc": "Number requests PCIe makes of the main die", "EvSel": 133, "FCMask": "b111", "ExtSel": "", "PortMask": "bxxxx11111111", "Umask": "bxxxxxxx1", }, "IIO.NUM_REQ_OF_CPU_BY_TGT": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "0-3", "Desc": "Num requests sent by PCIe - by target", "EvSel": 142, "ExtSel": "", }, "IIO.NUM_REQ_OF_CPU_BY_TGT.CONFINED_P2P": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "0-3", "Desc": "Num requests sent by PCIe - by target", "EvSel": 142, "FCMask": "b111", "ExtSel": "", "PortMask": "bxxxx11111111", "Umask": "bx1xxxxxx", }, "IIO.NUM_REQ_OF_CPU_BY_TGT.MCAST": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "0-3", "Desc": "Num requests sent by PCIe - by target", "EvSel": 142, "FCMask": "b111", "ExtSel": "", "PortMask": "bxxxx11111111", "Umask": "bxxxxxx1x", }, "IIO.NUM_REQ_OF_CPU_BY_TGT.ABORT": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "0-3", "Desc": "Num requests sent by PCIe - by target", "EvSel": 142, "FCMask": "b111", "ExtSel": "", "PortMask": "bxxxx11111111", "Umask": "b1xxxxxxx", }, "IIO.NUM_REQ_OF_CPU_BY_TGT.UBOX": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "0-3", "Desc": "Num requests sent by PCIe - by target", "EvSel": 142, "FCMask": "b111", "ExtSel": "", "PortMask": "bxxxx11111111", "Umask": "bxxxxx1xx", }, "IIO.NUM_REQ_OF_CPU_BY_TGT.MSGB": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "0-3", "Desc": "Num requests sent by PCIe - by target", "EvSel": 142, "FCMask": "b111", "ExtSel": "", "PortMask": "bxxxx11111111", "Umask": "bxxxxxxx1", }, "IIO.NUM_REQ_OF_CPU_BY_TGT.LOC_P2P": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "0-3", "Desc": "Num requests sent by PCIe - by target", "EvSel": 142, "FCMask": "b111", "ExtSel": "", "PortMask": "bxxxx11111111", "Umask": "bxx1xxxxx", }, "IIO.NUM_REQ_OF_CPU_BY_TGT.REM_P2P": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "0-3", "Desc": "Num requests sent by PCIe - by target", "EvSel": 142, "FCMask": "b111", "ExtSel": "", "PortMask": "bxxxx11111111", "Umask": "bxxx1xxxx", }, "IIO.NUM_REQ_OF_CPU_BY_TGT.MEM": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "0-3", "Desc": "Num requests sent by PCIe - by target", "EvSel": 142, "FCMask": "b111", "ExtSel": "", "PortMask": "bxxxx11111111", "Umask": "bxxxx1xxx", }, "IIO.NUM_TGT_MATCHED_REQ_OF_CPU": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "0-3", "Desc": "ITC address map 1", "EvSel": 143, "ExtSel": "", }, "IIO.OUTBOUND_CL_REQS_ISSUED": { "Box": "IIO", "Category": "IIO OTC Events", "Counters": "0-3", "Defn": "Each outbound cacheline granular request may need to make multiple passes through the pipeline. Each time a cacheline completes all its passes it advances line", "Desc": "Outbound cacheline requests issued", "EvSel": 208, "ExtSel": "", }, "IIO.OUTBOUND_CL_REQS_ISSUED.TO_IO": { "Box": "IIO", "Category": "IIO OTC Events", "Counters": "0-3", "Defn": "Each outbound cacheline granular request may need to make multiple passes through the pipeline. Each time a cacheline completes all its passes it advances line", "Desc": "Outbound cacheline requests issued", "EvSel": 208, "FCMask": "b111", "ExtSel": "", "PortMask": "bxxxx11111111", "Umask": "bxxxx1xxx", }, "IIO.OUTBOUND_TLP_REQS_ISSUED": { "Box": "IIO", "Category": "IIO OTC Events", "Counters": "0-3", "Defn": "Each time an outbound completes all its passes it advances the pointer", "Desc": "Outbound TLP (transaction layer packet) requests issued", "EvSel": 209, "ExtSel": "", }, "IIO.OUTBOUND_TLP_REQS_ISSUED.TO_IO": { "Box": "IIO", "Category": "IIO OTC Events", "Counters": "0-3", "Defn": "Each time an outbound completes all its passes it advances the pointer", "Desc": "Outbound TLP (transaction layer packet) requests issued", "EvSel": 209, "FCMask": "b111", "ExtSel": "", "PortMask": "bxxxx11111111", "Umask": "bxxxx1xxx", }, "IIO.PWT_OCCUPANCY": { "Box": "IIO", "Category": "IIO IOMMU Events", "Defn": "Indicates how many page walks are outstanding at any point in time.", "Desc": "PWT occupancy", "EvSel": 66, "ExtSel": "", }, "IIO.REQ_FROM_PCIE_CL_CMPL": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "0-3", "Defn": "Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a cacheline completes all its passes (e.g. finishes posting writes to all multi-cast targets) it advances line", "Desc": "PCIe Request - cacheline complete", "EvSel": 145, "ExtSel": "", "Notes": "For a normal write (no posted interrupt, no multi-cast) advance line = advance state. For a PCIe request of <= cacheline, advance pointer = advance line", }, "IIO.REQ_FROM_PCIE_CL_CMPL.DATA": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "0-3", "Defn": "Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a cacheline completes all its passes (e.g. finishes posting writes to all multi-cast targets) it advances line", "Desc": "PCIe Request - cacheline complete", "EvSel": 145, "FCMask": "b111", "ExtSel": "", "Notes": "For a normal write (no posted interrupt, no multi-cast) advance line = advance state. For a PCIe request of <= cacheline, advance pointer = advance line", "PortMask": "bxxxx11111111", "Umask": "bxx1xxxxx", }, "IIO.REQ_FROM_PCIE_CL_CMPL.WR": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "0-3", "Defn": "Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a cacheline completes all its passes (e.g. finishes posting writes to all multi-cast targets) it advances line", "Desc": "PCIe Request - cacheline complete", "EvSel": 145, "FCMask": "b111", "ExtSel": "", "Notes": "For a normal write (no posted interrupt, no multi-cast) advance line = advance state. For a PCIe request of <= cacheline, advance pointer = advance line", "PortMask": "bxxxx11111111", "Umask": "bxxx1xxxx", }, "IIO.REQ_FROM_PCIE_CL_CMPL.REQ_OWN": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "0-3", "Defn": "Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a cacheline completes all its passes (e.g. finishes posting writes to all multi-cast targets) it advances line", "Desc": "PCIe Request - cacheline complete", "EvSel": 145, "FCMask": "b111", "ExtSel": "", "Notes": "For a normal write (no posted interrupt, no multi-cast) advance line = advance state. For a PCIe request of <= cacheline, advance pointer = advance line", "PortMask": "bxxxx11111111", "Umask": "bxxxxx1xx", }, "IIO.REQ_FROM_PCIE_CL_CMPL.FINAL_RD_WR": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "0-3", "Defn": "Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a cacheline completes all its passes (e.g. finishes posting writes to all multi-cast targets) it advances line", "Desc": "PCIe Request - cacheline complete", "EvSel": 145, "FCMask": "b111", "ExtSel": "", "Notes": "For a normal write (no posted interrupt, no multi-cast) advance line = advance state. For a PCIe request of <= cacheline, advance pointer = advance line", "PortMask": "bxxxx11111111", "Umask": "bxxxx1xxx", }, "IIO.REQ_FROM_PCIE_CMPL": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "0-3", "Defn": "Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a single PCIe request completes all its cacheline granular requests, it advances pointer.", "Desc": "PCIe Request complete", "EvSel": 146, "ExtSel": "", "Notes": "For a PCIe request of <= cacheline, advance pointer = advance line", }, "IIO.REQ_FROM_PCIE_CMPL.IOMMU_HIT": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "0-3", "Defn": "Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a single PCIe request completes all its cacheline granular requests, it advances pointer.", "Desc": "PCIe Request complete", "EvSel": 146, "FCMask": "b111", "ExtSel": "", "Notes": "For a PCIe request of <= cacheline, advance pointer = advance line", "PortMask": "bxxxx11111111", "Umask": "bxxxxxx1x", }, "IIO.REQ_FROM_PCIE_CMPL.REQ_OWN": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "0-3", "Defn": "Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a single PCIe request completes all its cacheline granular requests, it advances pointer.", "Desc": "PCIe Request complete", "EvSel": 146, "FCMask": "b111", "ExtSel": "", "Notes": "For a PCIe request of <= cacheline, advance pointer = advance line", "PortMask": "bxxxx11111111", "Umask": "bxxxxx1xx", }, "IIO.REQ_FROM_PCIE_CMPL.DATA": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "0-3", "Defn": "Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a single PCIe request completes all its cacheline granular requests, it advances pointer.", "Desc": "PCIe Request complete", "EvSel": 146, "FCMask": "b111", "ExtSel": "", "Notes": "For a PCIe request of <= cacheline, advance pointer = advance line", "PortMask": "bxxxx11111111", "Umask": "bxx1xxxxx", }, "IIO.REQ_FROM_PCIE_CMPL.FINAL_RD_WR": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "0-3", "Defn": "Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a single PCIe request completes all its cacheline granular requests, it advances pointer.", "Desc": "PCIe Request complete", "EvSel": 146, "FCMask": "b111", "ExtSel": "", "Notes": "For a PCIe request of <= cacheline, advance pointer = advance line", "PortMask": "bxxxx11111111", "Umask": "bxxxx1xxx", }, "IIO.REQ_FROM_PCIE_CMPL.IOMMU_REQ": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "0-3", "Defn": "Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a single PCIe request completes all its cacheline granular requests, it advances pointer.", "Desc": "PCIe Request complete", "EvSel": 146, "FCMask": "b111", "ExtSel": "", "Notes": "For a PCIe request of <= cacheline, advance pointer = advance line", "PortMask": "bxxxx11111111", "Umask": "bxxxxxxx1", }, "IIO.REQ_FROM_PCIE_CMPL.WR": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "0-3", "Defn": "Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a single PCIe request completes all its cacheline granular requests, it advances pointer.", "Desc": "PCIe Request complete", "EvSel": 146, "FCMask": "b111", "ExtSel": "", "Notes": "For a PCIe request of <= cacheline, advance pointer = advance line", "PortMask": "bxxxx11111111", "Umask": "bxxx1xxxx", }, "IIO.REQ_FROM_PCIE_PASS_CMPL": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "0-3", "Defn": "Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a cacheline completes a single pass (e.g. posts a write to single multi-cast target) it advances state", "Desc": "PCIe Request - pass complete", "EvSel": 144, "ExtSel": "", "Notes": "For a normal write (no posted interrupt, no multi-cast) advance line = advance state", }, "IIO.REQ_FROM_PCIE_PASS_CMPL.FINAL_RD_WR": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "0-3", "Defn": "Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a cacheline completes a single pass (e.g. posts a write to single multi-cast target) it advances state", "Desc": "PCIe Request - pass complete", "EvSel": 144, "FCMask": "b111", "ExtSel": "", "Notes": "For a normal write (no posted interrupt, no multi-cast) advance line = advance state", "PortMask": "bxxxx11111111", "Umask": "bxxxx1xxx", }, "IIO.REQ_FROM_PCIE_PASS_CMPL.WR": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "0-3", "Defn": "Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a cacheline completes a single pass (e.g. posts a write to single multi-cast target) it advances state", "Desc": "PCIe Request - pass complete", "EvSel": 144, "FCMask": "b111", "ExtSel": "", "Notes": "For a normal write (no posted interrupt, no multi-cast) advance line = advance state", "PortMask": "bxxxx11111111", "Umask": "bxxx1xxxx", }, "IIO.REQ_FROM_PCIE_PASS_CMPL.DATA": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "0-3", "Defn": "Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a cacheline completes a single pass (e.g. posts a write to single multi-cast target) it advances state", "Desc": "PCIe Request - pass complete", "EvSel": 144, "FCMask": "b111", "ExtSel": "", "Notes": "For a normal write (no posted interrupt, no multi-cast) advance line = advance state", "PortMask": "bxxxx11111111", "Umask": "bxx1xxxxx", }, "IIO.REQ_FROM_PCIE_PASS_CMPL.REQ_OWN": { "Box": "IIO", "Category": "IIO ITC Events", "Counters": "0-3", "Defn": "Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a cacheline completes a single pass (e.g. posts a write to single multi-cast target) it advances state", "Desc": "PCIe Request - pass complete", "EvSel": 144, "FCMask": "b111", "ExtSel": "", "Notes": "For a normal write (no posted interrupt, no multi-cast) advance line = advance state", "PortMask": "bxxxx11111111", "Umask": "bxxxxx1xx", }, "IIO.SYMBOL_TIMES": { "Box": "IIO", "Category": "IIO Miscellaneous Events", "Defn": "Gen1 - increment once every 4nS, Gen2 - increment once every 2nS, Gen3 - increment once every 1nS", "Desc": "Symbol Times on Link", "EvSel": 130, "ExtSel": "", }, "IIO.TXN_REQ_BY_CPU": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", }, "IIO.TXN_REQ_BY_CPU.IO_READ.PART1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxxxx1x", "Umask": "b1xxxxxxx", }, "IIO.TXN_REQ_BY_CPU.MEM_WRITE.PART6": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxx1xxxxxx", "Umask": "bxxxxxxx1", }, "IIO.TXN_REQ_BY_CPU.MEM_READ.IOMMU0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxx1xxxxxxxx", "Umask": "bxxxxx1xx", }, "IIO.TXN_REQ_BY_CPU.CFG_READ.PART1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxxxx1x", "Umask": "bx1xxxxxx", }, "IIO.TXN_REQ_BY_CPU.IO_WRITE.PART7": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxx1xxxxxxx", "Umask": "bxx1xxxxx", }, "IIO.TXN_REQ_BY_CPU.PEER_READ.IOMMU1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxx1xxxxxxxxx", "Umask": "bxxxx1xxx", }, "IIO.TXN_REQ_BY_CPU.PEER_WRITE.PART6": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxx1xxxxxx", "Umask": "bxxxxxx1x", }, "IIO.TXN_REQ_BY_CPU.PEER_WRITE.PART1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxxxx1x", "Umask": "bxxxxxx1x", }, "IIO.TXN_REQ_BY_CPU.CFG_WRITE.PART7": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxx1xxxxxxx", "Umask": "bxxx1xxxx", }, "IIO.TXN_REQ_BY_CPU.CFG_READ.PART3": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxx1xxx", "Umask": "bx1xxxxxx", }, "IIO.TXN_REQ_BY_CPU.MEM_WRITE.PART0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxxxxx1", "Umask": "bxxxxxxx1", }, "IIO.TXN_REQ_BY_CPU.MEM_READ.PART4": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxx1xxxx", "Umask": "bxxxxx1xx", }, "IIO.TXN_REQ_BY_CPU.MEM_WRITE.PART4": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxx1xxxx", "Umask": "bxxxxxxx1", }, "IIO.TXN_REQ_BY_CPU.IO_READ.PART2": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxxx1xx", "Umask": "b1xxxxxxx", }, "IIO.TXN_REQ_BY_CPU.MEM_WRITE.PART5": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxx1xxxxx", "Umask": "bxxxxxxx1", }, "IIO.TXN_REQ_BY_CPU.MEM_READ.IOMMU1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxx1xxxxxxxxx", "Umask": "bxxxxx1xx", }, "IIO.TXN_REQ_BY_CPU.PEER_WRITE.PART7": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxx1xxxxxxx", "Umask": "bxxxxxx1x", }, "IIO.TXN_REQ_BY_CPU.PEER_READ.PART3": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxx1xxx", "Umask": "bxxxx1xxx", }, "IIO.TXN_REQ_BY_CPU.MEM_WRITE.PART1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxxxx1x", "Umask": "bxxxxxxx1", }, "IIO.TXN_REQ_BY_CPU.CFG_WRITE.PART2": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxxx1xx", "Umask": "bxxx1xxxx", }, "IIO.TXN_REQ_BY_CPU.CFG_READ.PART5": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxx1xxxxx", "Umask": "bx1xxxxxx", }, "IIO.TXN_REQ_BY_CPU.MEM_READ.PART0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxxxxx1", "Umask": "bxxxxx1xx", }, "IIO.TXN_REQ_BY_CPU.IO_READ.PART4": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxx1xxxx", "Umask": "b1xxxxxxx", }, "IIO.TXN_REQ_BY_CPU.PEER_WRITE.PART3": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxx1xxx", "Umask": "bxxxxxx1x", }, "IIO.TXN_REQ_BY_CPU.IO_READ.PART3": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxx1xxx", "Umask": "b1xxxxxxx", }, "IIO.TXN_REQ_BY_CPU.PEER_WRITE.PART5": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxx1xxxxx", "Umask": "bxxxxxx1x", }, "IIO.TXN_REQ_BY_CPU.IO_WRITE.PART5": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxx1xxxxx", "Umask": "bxx1xxxxx", }, "IIO.TXN_REQ_BY_CPU.IO_READ.IOMMU1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxx1xxxxxxxxx", "Umask": "b1xxxxxxx", }, "IIO.TXN_REQ_BY_CPU.PEER_READ.PART2": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxxx1xx", "Umask": "bxxxx1xxx", }, "IIO.TXN_REQ_BY_CPU.CFG_WRITE.PART3": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxx1xxx", "Umask": "bxxx1xxxx", }, "IIO.TXN_REQ_BY_CPU.IO_READ.PART6": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxx1xxxxxx", "Umask": "b1xxxxxxx", }, "IIO.TXN_REQ_BY_CPU.IO_READ.PART7": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxx1xxxxxxx", "Umask": "b1xxxxxxx", }, "IIO.TXN_REQ_BY_CPU.CFG_READ.PART7": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxx1xxxxxxx", "Umask": "bx1xxxxxx", }, "IIO.TXN_REQ_BY_CPU.CFG_WRITE.IOMMU1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxx1xxxxxxxxx", "Umask": "bxxx1xxxx", }, "IIO.TXN_REQ_BY_CPU.MEM_READ.PART6": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxx1xxxxxx", "Umask": "bxxxxx1xx", }, "IIO.TXN_REQ_BY_CPU.IO_WRITE.PART1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxxxx1x", "Umask": "bxx1xxxxx", }, "IIO.TXN_REQ_BY_CPU.IO_WRITE.PART3": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxx1xxx", "Umask": "bxx1xxxxx", }, "IIO.TXN_REQ_BY_CPU.MEM_READ.PART2": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxxx1xx", "Umask": "bxxxxx1xx", }, "IIO.TXN_REQ_BY_CPU.PEER_READ.PART7": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxx1xxxxxxx", "Umask": "bxxxx1xxx", }, "IIO.TXN_REQ_BY_CPU.MEM_WRITE.IOMMU0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxx1xxxxxxxx", "Umask": "bxxxxxxx1", }, "IIO.TXN_REQ_BY_CPU.CFG_READ.PART2": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxxx1xx", "Umask": "bx1xxxxxx", }, "IIO.TXN_REQ_BY_CPU.PEER_READ.PART0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxxxxx1", "Umask": "bxxxx1xxx", }, "IIO.TXN_REQ_BY_CPU.IO_READ.PART5": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxx1xxxxx", "Umask": "b1xxxxxxx", }, "IIO.TXN_REQ_BY_CPU.CFG_WRITE.IOMMU0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxx1xxxxxxxx", "Umask": "bxxx1xxxx", }, "IIO.TXN_REQ_BY_CPU.PEER_WRITE.PART4": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxx1xxxx", "Umask": "bxxxxxx1x", }, "IIO.TXN_REQ_BY_CPU.MEM_READ.PART7": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxx1xxxxxxx", "Umask": "bxxxxx1xx", }, "IIO.TXN_REQ_BY_CPU.IO_READ.IOMMU0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxx1xxxxxxxx", "Umask": "b1xxxxxxx", }, "IIO.TXN_REQ_BY_CPU.MEM_WRITE.IOMMU1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxx1xxxxxxxxx", "Umask": "bxxxxxxx1", }, "IIO.TXN_REQ_BY_CPU.CFG_READ.PART6": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxx1xxxxxx", "Umask": "bx1xxxxxx", }, "IIO.TXN_REQ_BY_CPU.IO_WRITE.IOMMU0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxx1xxxxxxxx", "Umask": "bxx1xxxxx", }, "IIO.TXN_REQ_BY_CPU.PEER_WRITE.IOMMU0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxx1xxxxxxxxx", "Umask": "bxxxxxx1x", }, "IIO.TXN_REQ_BY_CPU.PEER_READ.PART6": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxx1xxxxxx", "Umask": "bxxxx1xxx", }, "IIO.TXN_REQ_BY_CPU.IO_WRITE.PART0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxxxxx1", "Umask": "bxx1xxxxx", }, "IIO.TXN_REQ_BY_CPU.MEM_WRITE.PART3": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxx1xxx", "Umask": "bxxxxxxx1", }, "IIO.TXN_REQ_BY_CPU.PEER_READ.PART1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxxxx1x", "Umask": "bxxxx1xxx", }, "IIO.TXN_REQ_BY_CPU.IO_WRITE.IOMMU1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxx1xxxxxxxxx", "Umask": "bxx1xxxxx", }, "IIO.TXN_REQ_BY_CPU.PEER_READ.PART5": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxx1xxxxx", "Umask": "bxxxx1xxx", }, "IIO.TXN_REQ_BY_CPU.IO_WRITE.PART4": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxx1xxxx", "Umask": "bxx1xxxxx", }, "IIO.TXN_REQ_BY_CPU.PEER_READ.PART4": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxx1xxxx", "Umask": "bxxxx1xxx", }, "IIO.TXN_REQ_BY_CPU.MEM_WRITE.PART2": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxxx1xx", "Umask": "bxxxxxxx1", }, "IIO.TXN_REQ_BY_CPU.CFG_WRITE.PART1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxxxx1x", "Umask": "bxxx1xxxx", }, "IIO.TXN_REQ_BY_CPU.MEM_READ.PART5": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxx1xxxxx", "Umask": "bxxxxx1xx", }, "IIO.TXN_REQ_BY_CPU.CFG_READ.PART0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxxxxx1", "Umask": "bx1xxxxxx", }, "IIO.TXN_REQ_BY_CPU.CFG_READ.PART4": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxx1xxxx", "Umask": "bx1xxxxxx", }, "IIO.TXN_REQ_BY_CPU.CFG_WRITE.PART4": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxx1xxxx", "Umask": "bxxx1xxxx", }, "IIO.TXN_REQ_BY_CPU.CFG_WRITE.PART5": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxx1xxxxx", "Umask": "bxxx1xxxx", }, "IIO.TXN_REQ_BY_CPU.CFG_READ.IOMMU1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxx1xxxxxxxxx", "Umask": "bx1xxxxxx", }, "IIO.TXN_REQ_BY_CPU.PEER_READ.IOMMU0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxx1xxxxxxxx", "Umask": "bxxxx1xxx", }, "IIO.TXN_REQ_BY_CPU.MEM_WRITE.PART7": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxx1xxxxxxx", "Umask": "bxxxxxxx1", }, "IIO.TXN_REQ_BY_CPU.CFG_WRITE.PART0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxxxxx1", "Umask": "bxxx1xxxx", }, "IIO.TXN_REQ_BY_CPU.MEM_READ.PART3": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxx1xxx", "Umask": "bxxxxx1xx", }, "IIO.TXN_REQ_BY_CPU.IO_WRITE.PART2": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxxx1xx", "Umask": "bxx1xxxxx", }, "IIO.TXN_REQ_BY_CPU.IO_READ.PART0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxxxxx1", "Umask": "b1xxxxxxx", }, "IIO.TXN_REQ_BY_CPU.PEER_WRITE.PART2": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxxx1xx", "Umask": "bxxxxxx1x", }, "IIO.TXN_REQ_BY_CPU.CFG_READ.IOMMU0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxx1xxxxxxxx", "Umask": "bx1xxxxxx", }, "IIO.TXN_REQ_BY_CPU.CFG_WRITE.PART6": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxx1xxxxxx", "Umask": "bxxx1xxxx", }, "IIO.TXN_REQ_BY_CPU.IO_WRITE.PART6": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxx1xxxxxx", "Umask": "bxx1xxxxx", }, "IIO.TXN_REQ_BY_CPU.MEM_READ.PART1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxxxx1x", "Umask": "bxxxxx1xx", }, "IIO.TXN_REQ_BY_CPU.PEER_WRITE.PART0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests initiated by the main die, including reads and writes.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Peer R/W subevents are also counted as Mem R/W subevents.", "PortMask": "bxxxxxxxxxxx1", "Umask": "bxxxxxx1x", }, "IIO.TXN_REQ_OF_CPU": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", }, "IIO.TXN_REQ_OF_CPU.PEER_WRITE.PART6": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxx1xxxxxx", "Umask": "bxxxxxx1x", }, "IIO.TXN_REQ_OF_CPU.PEER_READ.IOMMU1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxx1xxxxxxxxx", "Umask": "bxxxx1xxx", }, "IIO.TXN_REQ_OF_CPU.MSG.PART2": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxxxxxx1xx", "Umask": "bx1xxxxxx", }, "IIO.TXN_REQ_OF_CPU.MEM_READ.IOMMU0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxx1xxxxxxxx", "Umask": "bxxxxx1xx", }, "IIO.TXN_REQ_OF_CPU.MEM_WRITE.PART6": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxx1xxxxxx", "Umask": "bxxxxxxx1", }, "IIO.TXN_REQ_OF_CPU.MEM_WRITE.PART0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxxxxxxxx1", "Umask": "bxxxxxxx1", }, "IIO.TXN_REQ_OF_CPU.ATOMIC.PART1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxxxxxxx1x", "Umask": "bxxx1xxxx", }, "IIO.TXN_REQ_OF_CPU.PEER_WRITE.PART1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxxxxxxx1x", "Umask": "bxxxxxx1x", }, "IIO.TXN_REQ_OF_CPU.ATOMIC.PART0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxxxxxxxx1", "Umask": "bxxx1xxxx", }, "IIO.TXN_REQ_OF_CPU.MEM_WRITE.PART5": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxxx1xxxxx", "Umask": "bxxxxxxx1", }, "IIO.TXN_REQ_OF_CPU.MEM_READ.IOMMU1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxx1xxxxxxxxx", "Umask": "bxxxxx1xx", }, "IIO.TXN_REQ_OF_CPU.CMPD.PART2": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxxxxxx1xx", "Umask": "b1xxxxxxx", }, "IIO.TXN_REQ_OF_CPU.MEM_READ.PART4": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxxxx1xxxx", "Umask": "bxxxxx1xx", }, "IIO.TXN_REQ_OF_CPU.MEM_WRITE.PART4": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxxxx1xxxx", "Umask": "bxxxxxxx1", }, "IIO.TXN_REQ_OF_CPU.MEM_READ.PART0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxxxxxxxx1", "Umask": "bxxxxx1xx", }, "IIO.TXN_REQ_OF_CPU.PEER_WRITE.PART7": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxx1xxxxxxx", "Umask": "bxxxxxx1x", }, "IIO.TXN_REQ_OF_CPU.MEM_WRITE.PART1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxxxxxxx1x", "Umask": "bxxxxxxx1", }, "IIO.TXN_REQ_OF_CPU.PEER_READ.PART3": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxxxxx1xxx", "Umask": "bxxxx1xxx", }, "IIO.TXN_REQ_OF_CPU.PEER_WRITE.PART3": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxxxxx1xxx", "Umask": "bxxxxxx1x", }, "IIO.TXN_REQ_OF_CPU.ATOMIC.PART5": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxxx1xxxxx", "Umask": "bxxx1xxxx", }, "IIO.TXN_REQ_OF_CPU.PEER_READ.PART2": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxxxxxx1xx", "Umask": "bxxxx1xxx", }, "IIO.TXN_REQ_OF_CPU.PEER_WRITE.PART5": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxxx1xxxxx", "Umask": "bxxxxxx1x", }, "IIO.TXN_REQ_OF_CPU.MSG.PART4": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxxxx1xxxx", "Umask": "bx1xxxxxx", }, "IIO.TXN_REQ_OF_CPU.MEM_READ.PART6": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxx1xxxxxx", "Umask": "bxxxxx1xx", }, "IIO.TXN_REQ_OF_CPU.MSG.PART7": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxx1xxxxxxx", "Umask": "bx1xxxxxx", }, "IIO.TXN_REQ_OF_CPU.CMPD.PART6": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxx1xxxxxx", "Umask": "b1xxxxxxx", }, "IIO.TXN_REQ_OF_CPU.MSG.PART3": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxxxxx1xxx", "Umask": "bx1xxxxxx", }, "IIO.TXN_REQ_OF_CPU.MEM_WRITE.IOMMU0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxx1xxxxxxxx", "Umask": "bxxxxxxx1", }, "IIO.TXN_REQ_OF_CPU.ATOMIC.IOMMU1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxx1xxxxxxxxx", "Umask": "bxxx1xxxx", }, "IIO.TXN_REQ_OF_CPU.CMPD.PART5": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxxx1xxxxx", "Umask": "b1xxxxxxx", }, "IIO.TXN_REQ_OF_CPU.PEER_READ.PART7": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxx1xxxxxxx", "Umask": "bxxxx1xxx", }, "IIO.TXN_REQ_OF_CPU.MEM_READ.PART2": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxxxxxx1xx", "Umask": "bxxxxx1xx", }, "IIO.TXN_REQ_OF_CPU.CMPD.PART4": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxxxx1xxxx", "Umask": "b1xxxxxxx", }, "IIO.TXN_REQ_OF_CPU.ATOMIC.PART2": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxxxxxx1xx", "Umask": "bxxx1xxxx", }, "IIO.TXN_REQ_OF_CPU.MSG.PART6": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxx1xxxxxx", "Umask": "bx1xxxxxx", }, "IIO.TXN_REQ_OF_CPU.PEER_WRITE.PART4": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxxxx1xxxx", "Umask": "bxxxxxx1x", }, "IIO.TXN_REQ_OF_CPU.PEER_READ.PART0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxxxxxxxx1", "Umask": "bxxxx1xxx", }, "IIO.TXN_REQ_OF_CPU.PEER_WRITE.IOMMU0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxx1xxxxxxxx", "Umask": "bxxxxxx1x", }, "IIO.TXN_REQ_OF_CPU.PEER_READ.PART6": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxx1xxxxxx", "Umask": "bxxxx1xxx", }, "IIO.TXN_REQ_OF_CPU.ATOMIC.IOMMU0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxx1xxxxxxxx", "Umask": "bxxx1xxxx", }, "IIO.TXN_REQ_OF_CPU.CMPD.PART7": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxx1xxxxxxx", "Umask": "b1xxxxxxx", }, "IIO.TXN_REQ_OF_CPU.MEM_WRITE.IOMMU1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxx1xxxxxxxxx", "Umask": "bxxxxxxx1", }, "IIO.TXN_REQ_OF_CPU.MEM_READ.PART7": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxx1xxxxxxx", "Umask": "bxxxxx1xx", }, "IIO.TXN_REQ_OF_CPU.MSG.IOMMU0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxx1xxxxxxxx", "Umask": "bx1xxxxxx", }, "IIO.TXN_REQ_OF_CPU.MEM_WRITE.PART3": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxxxxx1xxx", "Umask": "bxxxxxxx1", }, "IIO.TXN_REQ_OF_CPU.PEER_READ.PART1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxxxxxxx1x", "Umask": "bxxxx1xxx", }, "IIO.TXN_REQ_OF_CPU.ATOMIC.PART7": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxx1xxxxxxx", "Umask": "bxxx1xxxx", }, "IIO.TXN_REQ_OF_CPU.CMPD.IOMMU1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxx1xxxxxxxxx", "Umask": "b1xxxxxxx", }, "IIO.TXN_REQ_OF_CPU.CMPD.PART1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxxxxxxx1x", "Umask": "b1xxxxxxx", }, "IIO.TXN_REQ_OF_CPU.PEER_READ.PART4": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxxxx1xxxx", "Umask": "bxxxx1xxx", }, "IIO.TXN_REQ_OF_CPU.PEER_READ.PART5": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxxx1xxxxx", "Umask": "bxxxx1xxx", }, "IIO.TXN_REQ_OF_CPU.MSG.PART5": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxxx1xxxxx", "Umask": "bx1xxxxxx", }, "IIO.TXN_REQ_OF_CPU.MSG.IOMMU1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxx1xxxxxxxxx", "Umask": "bx1xxxxxx", }, "IIO.TXN_REQ_OF_CPU.MEM_READ.PART5": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxxx1xxxxx", "Umask": "bxxxxx1xx", }, "IIO.TXN_REQ_OF_CPU.MSG.PART0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxxxxxxxx1", "Umask": "bx1xxxxxx", }, "IIO.TXN_REQ_OF_CPU.MEM_WRITE.PART2": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxxxxxx1xx", "Umask": "bxxxxxxx1", }, "IIO.TXN_REQ_OF_CPU.ATOMIC.PART6": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxx1xxxxxx", "Umask": "bxxx1xxxx", }, "IIO.TXN_REQ_OF_CPU.CMPD.PART3": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxxxxx1xxx", "Umask": "b1xxxxxxx", }, "IIO.TXN_REQ_OF_CPU.PEER_WRITE.IOMMU1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxx1xxxxxxxxx", "Umask": "bxxxxxx1x", }, "IIO.TXN_REQ_OF_CPU.MEM_WRITE.PART7": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxx1xxxxxxx", "Umask": "bxxxxxxx1", }, "IIO.TXN_REQ_OF_CPU.ATOMIC.PART3": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxxxxx1xxx", "Umask": "bxxx1xxxx", }, "IIO.TXN_REQ_OF_CPU.MEM_READ.PART3": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxxxxx1xxx", "Umask": "bxxxxx1xx", }, "IIO.TXN_REQ_OF_CPU.CMPD.PART0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxxxxxxxx1", "Umask": "b1xxxxxxx", }, "IIO.TXN_REQ_OF_CPU.MSG.PART1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxxxxxxx1x", "Umask": "bx1xxxxxx", }, "IIO.TXN_REQ_OF_CPU.PEER_READ.IOMMU0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxx1xxxxxxxx", "Umask": "bxxxx1xxx", }, "IIO.TXN_REQ_OF_CPU.PEER_WRITE.PART0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxxxxxxxx1", "Umask": "bxxxxxx1x", }, "IIO.TXN_REQ_OF_CPU.ATOMIC.PART4": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxxxx1xxxx", "Umask": "bxxx1xxxx", }, "IIO.TXN_REQ_OF_CPU.CMPD.IOMMU0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxx1xxxxxxxx", "Umask": "b1xxxxxxx", }, "IIO.TXN_REQ_OF_CPU.PEER_WRITE.PART2": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxxxxxx1xx", "Umask": "bxxxxxx1x", }, "IIO.TXN_REQ_OF_CPU.MEM_READ.PART1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Unlike the *_BY_CPU.PEER* events, peer R/W subevents do include confined P2P traffic.", "PortMask": "bxxxxxxxxxx1x", "Umask": "bxxxxx1xx", }, # iMC: "iMC.ACT_COUNT": { "Box": "iMC", "Category": "iMC ACT Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.", "Desc": "DRAM Activate Count", "EvSel": 1, "ExtSel": "", }, "iMC.ACT_COUNT.ALL": { "Box": "iMC", "Category": "iMC ACT Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.", "Desc": "DRAM Activate Count", "EvSel": 1, "ExtSel": "", "Umask": "b00001011", }, "iMC.ACT_COUNT.BYP": { "Box": "iMC", "Category": "iMC ACT Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.", "Desc": "DRAM Activate Count", "EvSel": 1, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.CAS_COUNT": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", }, "iMC.CAS_COUNT.RD": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", "Umask": "b00001111", }, "iMC.CAS_COUNT.RD_REG": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.CAS_COUNT.WR": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", "Umask": "b00110000", }, "iMC.CAS_COUNT.RD_PRE_REG": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.CAS_COUNT.RD_UNDERFILL": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.CAS_COUNT.ALL": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", "Umask": "b00111111", }, "iMC.CAS_COUNT.RD_PRE_UNDERFILL": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.CAS_COUNT.WR_NONPRE": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", "Umask": "bxxx1xxxx", }, "iMC.CAS_COUNT.WR_PRE": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", "Umask": "bxx1xxxxx", }, "iMC.CLOCKTICKS": { "Box": "iMC", "Category": "iMC DCLK Events", "Counters": "0-3", "Desc": "DRAM Clockticks", "EvSel": 0, "ExtSel": "", }, "iMC.DRAM_PRE_ALL": { "Box": "iMC", "Category": "iMC DRAM_PRE_ALL Events", "Counters": "0-3", "Defn": "Counts the number of times that the precharge all command was sent.", "Desc": "DRAM Precharge All Commands", "EvSel": 68, "ExtSel": "", }, "iMC.DRAM_REFRESH": { "Box": "iMC", "Category": "iMC DRAM_REFRESH Events", "Counters": "0-3", "Defn": "Counts the number of refreshes issued.", "Desc": "Number of DRAM Refreshes Issued", "EvSel": 69, "ExtSel": "", }, "iMC.DRAM_REFRESH.PANIC": { "Box": "iMC", "Category": "iMC DRAM_REFRESH Events", "Counters": "0-3", "Defn": "Counts the number of refreshes issued.", "Desc": "Number of DRAM Refreshes Issued", "EvSel": 69, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.DRAM_REFRESH.HIGH": { "Box": "iMC", "Category": "iMC DRAM_REFRESH Events", "Counters": "0-3", "Defn": "Counts the number of refreshes issued.", "Desc": "Number of DRAM Refreshes Issued", "EvSel": 69, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.DRAM_REFRESH.OPPORTUNISTIC": { "Box": "iMC", "Category": "iMC DRAM_REFRESH Events", "Counters": "0-3", "Defn": "Counts the number of refreshes issued.", "Desc": "Number of DRAM Refreshes Issued", "EvSel": 69, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.PARITY_ERRORS": { "Box": "iMC", "Category": "iMC Error Events", "EvSel": 44, "ExtSel": "", }, "iMC.PCLS": { "Box": "iMC", "Category": "iMC Debug Events", "EvSel": 160, "ExtSel": "", }, "iMC.PCLS.WR": { "Box": "iMC", "Category": "iMC Debug Events", "EvSel": 160, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.PCLS.RD": { "Box": "iMC", "Category": "iMC Debug Events", "EvSel": 160, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.PCLS.TOTAL": { "Box": "iMC", "Category": "iMC Debug Events", "EvSel": 160, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.PMM_CMD1": { "Box": "iMC", "Category": "iMC PMM CMD Events", "Desc": "PMM Commands", "EvSel": 234, "ExtSel": "", }, "iMC.PMM_CMD1.RD": { "Box": "iMC", "Category": "iMC PMM CMD Events", "Desc": "PMM Commands", "EvSel": 234, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.PMM_CMD1.RPQ_GNTS": { "Box": "iMC", "Category": "iMC PMM CMD Events", "Desc": "PMM Commands", "EvSel": 234, "ExtSel": "", "Umask": "bxxx1xxxx", }, "iMC.PMM_CMD1.MISC_GNT": { "Box": "iMC", "Category": "iMC PMM CMD Events", "Desc": "PMM Commands", "EvSel": 234, "ExtSel": "", "Umask": "bx1xxxxxx", }, "iMC.PMM_CMD1.WR": { "Box": "iMC", "Category": "iMC PMM CMD Events", "Desc": "PMM Commands", "EvSel": 234, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.PMM_CMD1.ALL": { "Box": "iMC", "Category": "iMC PMM CMD Events", "Desc": "PMM Commands", "EvSel": 234, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.PMM_CMD1.MISC": { "Box": "iMC", "Category": "iMC PMM CMD Events", "Desc": "PMM Commands", "EvSel": 234, "ExtSel": "", "Umask": "b1xxxxxxx", }, "iMC.PMM_CMD1.WPQ_GNTS": { "Box": "iMC", "Category": "iMC PMM CMD Events", "Desc": "PMM Commands", "EvSel": 234, "ExtSel": "", "Umask": "bxx1xxxxx", }, "iMC.PMM_CMD1.UFILL_RD": { "Box": "iMC", "Category": "iMC PMM CMD Events", "Desc": "PMM Commands", "EvSel": 234, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.PMM_CMD2": { "Box": "iMC", "Category": "iMC PMM CMD Events", "Desc": "PMM Commands - Part 2", "EvSel": 235, "ExtSel": "", }, "iMC.PMM_CMD2.NODATA_EXP": { "Box": "iMC", "Category": "iMC PMM CMD Events", "Desc": "PMM Commands - Part 2", "EvSel": 235, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.PMM_CMD2.REQS_SLOT0": { "Box": "iMC", "Category": "iMC PMM CMD Events", "Desc": "PMM Commands - Part 2", "EvSel": 235, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.PMM_CMD2.PMM_ERID_ERROR": { "Box": "iMC", "Category": "iMC PMM CMD Events", "Desc": "PMM Commands - Part 2", "EvSel": 235, "ExtSel": "", "Umask": "bx1xxxxxx", }, "iMC.PMM_CMD2.NODATA_UNEXP": { "Box": "iMC", "Category": "iMC PMM CMD Events", "Desc": "PMM Commands - Part 2", "EvSel": 235, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.PMM_CMD2.PMM_ECC_ERROR": { "Box": "iMC", "Category": "iMC PMM CMD Events", "Desc": "PMM Commands - Part 2", "EvSel": 235, "ExtSel": "", "Umask": "bxx1xxxxx", }, "iMC.PMM_CMD2.REQS_SLOT1": { "Box": "iMC", "Category": "iMC PMM CMD Events", "Desc": "PMM Commands - Part 2", "EvSel": 235, "ExtSel": "", "Umask": "bxxx1xxxx", }, "iMC.PMM_CMD2.OPP_RD": { "Box": "iMC", "Category": "iMC PMM CMD Events", "Desc": "PMM Commands - Part 2", "EvSel": 235, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.PMM_CMD2.PMM_ERID_STARVED": { "Box": "iMC", "Category": "iMC PMM CMD Events", "Desc": "PMM Commands - Part 2", "EvSel": 235, "ExtSel": "", "Umask": "b1xxxxxxx", }, "iMC.PMM_RPQ_CYCLES_FULL": { "Box": "iMC", "Category": "iMC PMM RPQ Events", "Desc": "PMM Read Queue Cycles Full", "EvSel": 226, "ExtSel": "", }, "iMC.PMM_RPQ_CYCLES_NE": { "Box": "iMC", "Category": "iMC PMM RPQ Events", "Desc": "PMM Read Queue Cycles Not Empty", "EvSel": 225, "ExtSel": "", }, "iMC.PMM_RPQ_INSERTS": { "Box": "iMC", "Category": "iMC PMM RPQ Events", "Defn": "Counts number of read requests allocated in the PMM Read Pending Queue. This includes both ISOCH and non-ISOCH requests.", "Desc": "PMM Read Queue Inserts", "EvSel": 227, "ExtSel": "", }, "iMC.PMM_RPQ_OCCUPANCY": { "Box": "iMC", "Category": "iMC PMM RPQ Events", "Defn": "Accumulates the per cycle occupancy of the PMM Read Pending Queue.", "Desc": "PMM Read Pending Queue Occupancy", "EvSel": 224, "ExtSel": "", "Notes": "Dividing by Inserts provides the average latency entries were resident in the queue. The HA must acquire a credit from the iMC to ensure the request will be accepted and queued. The credit must be received by the HA before sending the request. The read queue entry is deallocated once the data has been transferred from the IXP DIMM", }, "iMC.PMM_RPQ_OCCUPANCY.NO_GNT": { "Box": "iMC", "Category": "iMC PMM RPQ Events", "Defn": "Accumulates the per cycle occupancy of the PMM Read Pending Queue.", "Desc": "PMM Read Pending Queue Occupancy", "EvSel": 224, "ExtSel": "", "Notes": "Dividing by Inserts provides the average latency entries were resident in the queue. The HA must acquire a credit from the iMC to ensure the request will be accepted and queued. The credit must be received by the HA before sending the request. The read queue entry is deallocated once the data has been transferred from the IXP DIMM", "Umask": "bxxxxxx1x", }, "iMC.PMM_RPQ_OCCUPANCY.GNT_WAIT": { "Box": "iMC", "Category": "iMC PMM RPQ Events", "Defn": "Accumulates the per cycle occupancy of the PMM Read Pending Queue.", "Desc": "PMM Read Pending Queue Occupancy", "EvSel": 224, "ExtSel": "", "Notes": "Dividing by Inserts provides the average latency entries were resident in the queue. The HA must acquire a credit from the iMC to ensure the request will be accepted and queued. The credit must be received by the HA before sending the request. The read queue entry is deallocated once the data has been transferred from the IXP DIMM", "Umask": "bxxxxx1xx", }, "iMC.PMM_RPQ_OCCUPANCY.ALL": { "Box": "iMC", "Category": "iMC PMM RPQ Events", "Defn": "Accumulates the per cycle occupancy of the PMM Read Pending Queue.", "Desc": "PMM Read Pending Queue Occupancy", "EvSel": 224, "ExtSel": "", "Notes": "Dividing by Inserts provides the average latency entries were resident in the queue. The HA must acquire a credit from the iMC to ensure the request will be accepted and queued. The credit must be received by the HA before sending the request. The read queue entry is deallocated once the data has been transferred from the IXP DIMM", "Umask": "bxxxxxxx1", }, "iMC.PMM_WPQ_CYCLES_FULL": { "Box": "iMC", "Category": "iMC PMM WPQ Events", "Desc": "PMM Write Queue Cycles Full", "EvSel": 230, "ExtSel": "", }, "iMC.PMM_WPQ_CYCLES_NE": { "Box": "iMC", "Category": "iMC PMM WPQ Events", "Desc": "PMM Write Queue Cycles Not Empty", "EvSel": 229, "ExtSel": "", }, "iMC.PMM_WPQ_FLUSH": { "Box": "iMC", "Category": "iMC PMM WPQ Events", "EvSel": 232, "ExtSel": "", }, "iMC.PMM_WPQ_FLUSH_CYC": { "Box": "iMC", "Category": "iMC PMM WPQ Events", "EvSel": 233, "ExtSel": "", }, "iMC.PMM_WPQ_INSERTS": { "Box": "iMC", "Category": "iMC PMM WPQ Events", "Defn": "Counts number of write requests allocated in the PMM Write Pending Queue.", "Desc": "PMM Write Queue Inserts", "EvSel": 231, "ExtSel": "", }, "iMC.PMM_WPQ_OCCUPANCY": { "Box": "iMC", "Category": "iMC PMM WPQ Events", "Defn": "Accumulates the per cycle occupancy of the PMM Write Pending Queue.", "Desc": "PMM Write Pending Queue Occupancy", "EvSel": 228, "ExtSel": "", "Notes": "Dividing by Inserts provides the average latency entries were resident in the queue. The HA must acquire a credit from the iMC to ensure the request will be accepted and queued. The credit must be received by the HA before sending the request. The write queue entry is deallocated once the iMC has ensured the data was transferred to the IXP DIMM.", }, "iMC.PMM_WPQ_OCCUPANCY.ALL": { "Box": "iMC", "Category": "iMC PMM WPQ Events", "Defn": "Accumulates the per cycle occupancy of the PMM Write Pending Queue.", "Desc": "PMM Write Pending Queue Occupancy", "EvSel": 228, "ExtSel": "", "Notes": "Dividing by Inserts provides the average latency entries were resident in the queue. The HA must acquire a credit from the iMC to ensure the request will be accepted and queued. The credit must be received by the HA before sending the request. The write queue entry is deallocated once the iMC has ensured the data was transferred to the IXP DIMM.", "Umask": "bxxxxxxx1", }, "iMC.PMM_WPQ_OCCUPANCY.CAS": { "Box": "iMC", "Category": "iMC PMM WPQ Events", "Defn": "Accumulates the per cycle occupancy of the PMM Write Pending Queue.", "Desc": "PMM Write Pending Queue Occupancy", "EvSel": 228, "ExtSel": "", "Notes": "Dividing by Inserts provides the average latency entries were resident in the queue. The HA must acquire a credit from the iMC to ensure the request will be accepted and queued. The credit must be received by the HA before sending the request. The write queue entry is deallocated once the iMC has ensured the data was transferred to the IXP DIMM.", "Umask": "bxxxxxx1x", }, "iMC.PMM_WPQ_OCCUPANCY.PWR": { "Box": "iMC", "Category": "iMC PMM WPQ Events", "Defn": "Accumulates the per cycle occupancy of the PMM Write Pending Queue.", "Desc": "PMM Write Pending Queue Occupancy", "EvSel": 228, "ExtSel": "", "Notes": "Dividing by Inserts provides the average latency entries were resident in the queue. The HA must acquire a credit from the iMC to ensure the request will be accepted and queued. The credit must be received by the HA before sending the request. The write queue entry is deallocated once the iMC has ensured the data was transferred to the IXP DIMM.", "Umask": "bxxxxx1xx", }, "iMC.POWER_CHANNEL_PPD": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles when all the ranks in the channel are in PPD mode. If IBT=off is enabled, then this can be used to count those cycles. If it is not enabled, then this can count the number of cycles when that could have been taken advantage of.", "Desc": "Channel PPD Cycles", "EvSel": 133, "ExtSel": "", "MaxIncCyc": 4, "Notes": "IBT = Input Buffer Termination = On", }, "iMC.POWER_CKE_CYCLES": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 71, "ExtSel": "", "MaxIncCyc": 16, }, "iMC.POWER_CKE_CYCLES.LOW_3": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 71, "ExtSel": "", "MaxIncCyc": 16, "Umask": "b00001000", }, "iMC.POWER_CKE_CYCLES.LOW_2": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 71, "ExtSel": "", "MaxIncCyc": 16, "Umask": "b00000100", }, "iMC.POWER_CKE_CYCLES.LOW_0": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 71, "ExtSel": "", "MaxIncCyc": 16, "Umask": "b00000001", }, "iMC.POWER_CKE_CYCLES.LOW_1": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 71, "ExtSel": "", "MaxIncCyc": 16, "Umask": "b00000010", }, "iMC.POWER_CRIT_THROTTLE_CYCLES": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 134, "ExtSel": "", }, "iMC.POWER_CRIT_THROTTLE_CYCLES.SLOT0": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 134, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.POWER_CRIT_THROTTLE_CYCLES.SLOT1": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 134, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.POWER_SELF_REFRESH": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the iMC is in self-refresh and the iMC still has a clock. This happens in some package C-states. For example, the PCU may ask the iMC to enter self-refresh even though some of the cores are still processing. One use of this is for Monroe technology. Self-refresh is required during package C3 and C6, but there is no clock in the iMC at this time, so it is not possible to count these cases.", "Desc": "Clock-Enabled Self-Refresh", "EvSel": 67, "ExtSel": "", }, "iMC.POWER_THROTTLE_CYCLES": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 70, "ExtSel": "", }, "iMC.POWER_THROTTLE_CYCLES.SLOT0": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 70, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.POWER_THROTTLE_CYCLES.SLOT1": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 70, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.PRE_COUNT": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Precharge commands sent on this channel.", "Desc": "DRAM Precharge commands.", "EvSel": 2, "ExtSel": "", }, "iMC.PRE_COUNT.PAGE_MISS": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Precharge commands sent on this channel.", "Desc": "DRAM Precharge commands.", "EvSel": 2, "ExtSel": "", "Umask": "bxxxx11xx", }, "iMC.PRE_COUNT.RD": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Precharge commands sent on this channel.", "Desc": "DRAM Precharge commands.", "EvSel": 2, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.PRE_COUNT.WR": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Precharge commands sent on this channel.", "Desc": "DRAM Precharge commands.", "EvSel": 2, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.PRE_COUNT.PGT": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Precharge commands sent on this channel.", "Desc": "DRAM Precharge commands.", "EvSel": 2, "ExtSel": "", "Umask": "bxxx1xxxx", }, "iMC.PRE_COUNT.ALL": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Precharge commands sent on this channel.", "Desc": "DRAM Precharge commands.", "EvSel": 2, "ExtSel": "", "Umask": "b00011100", }, "iMC.RDB_FULL": { "Box": "iMC", "Category": "iMC RDB Events", "Counters": "0-3", "Desc": "Read Data Buffer Full", "EvSel": 25, "ExtSel": "", }, "iMC.RDB_INSERTS": { "Box": "iMC", "Category": "iMC RDB Events", "Counters": "0-3", "Desc": "Read Data Buffer Inserts", "EvSel": 23, "ExtSel": "", }, "iMC.RDB_NOT_EMPTY": { "Box": "iMC", "Category": "iMC RDB Events", "Counters": "0-3", "Desc": "Read Data Buffer Not Empty", "EvSel": 24, "ExtSel": "", }, "iMC.RDB_OCCUPANCY": { "Box": "iMC", "Category": "iMC RDB Events", "Counters": "0-3", "Desc": "Read Data Buffer Occupancy", "EvSel": 26, "ExtSel": "", }, "iMC.RPQ_CYCLES_FULL_PCH0": { "Box": "iMC", "Category": "iMC RPQ Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the Read Pending Queue is full. When the RPQ is full, the HA will not be able to issue any additional read requests into the iMC. This count should be similar count in the HA which tracks the number of cycles that the HA has no RPQ credits, just somewhat smaller to account for the credit return overhead. We generally do not expect to see RPQ become full except for potentially during Write Major Mode or while running with slow DRAM. This event only tracks non-ISOC queue entries.", "Desc": "Read Pending Queue Full Cycles", "EvSel": 18, "ExtSel": "", }, "iMC.RPQ_CYCLES_FULL_PCH1": { "Box": "iMC", "Category": "iMC RPQ Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the Read Pending Queue is full. When the RPQ is full, the HA will not be able to issue any additional read requests into the iMC. This count should be similar count in the HA which tracks the number of cycles that the HA has no RPQ credits, just somewhat smaller to account for the credit return overhead. We generally do not expect to see RPQ become full except for potentially during Write Major Mode or while running with slow DRAM. This event only tracks non-ISOC queue entries.", "Desc": "Read Pending Queue Full Cycles", "EvSel": 21, "ExtSel": "", }, "iMC.RPQ_CYCLES_NE": { "Box": "iMC", "Category": "iMC RPQ Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the Read Pending Queue is not empty. This can then be used to calculate the average occupancy (in conjunction with the Read Pending Queue Occupancy count). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This filter is to be used in conjunction with the occupancy filter so that one can correctly track the average occupancies for schedulable entries and scheduled requests.", "Desc": "Read Pending Queue Not Empty", "EvSel": 17, "ExtSel": "", }, "iMC.RPQ_CYCLES_NE.PCH1": { "Box": "iMC", "Category": "iMC RPQ Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the Read Pending Queue is not empty. This can then be used to calculate the average occupancy (in conjunction with the Read Pending Queue Occupancy count). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This filter is to be used in conjunction with the occupancy filter so that one can correctly track the average occupancies for schedulable entries and scheduled requests.", "Desc": "Read Pending Queue Not Empty", "EvSel": 17, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.RPQ_CYCLES_NE.PCH0": { "Box": "iMC", "Category": "iMC RPQ Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the Read Pending Queue is not empty. This can then be used to calculate the average occupancy (in conjunction with the Read Pending Queue Occupancy count). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This filter is to be used in conjunction with the occupancy filter so that one can correctly track the average occupancies for schedulable entries and scheduled requests.", "Desc": "Read Pending Queue Not Empty", "EvSel": 17, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.RPQ_INSERTS": { "Box": "iMC", "Category": "iMC RPQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.", "Desc": "Read Pending Queue Allocations", "EvSel": 16, "ExtSel": "", }, "iMC.RPQ_INSERTS.PCH0": { "Box": "iMC", "Category": "iMC RPQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.", "Desc": "Read Pending Queue Allocations", "EvSel": 16, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.RPQ_INSERTS.PCH1": { "Box": "iMC", "Category": "iMC RPQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.", "Desc": "Read Pending Queue Allocations", "EvSel": 16, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.RPQ_OCCUPANCY_PCH0": { "Box": "iMC", "Category": "iMC RPQ Events", "Counters": "0-3", "Defn": "Accumulates the occupancies of the Read Pending Queue each cycle. This can then be used to calculate both the average occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory.", "Desc": "Read Pending Queue Occupancy", "EvSel": 128, "ExtSel": "", "MaxIncCyc": 22, }, "iMC.RPQ_OCCUPANCY_PCH1": { "Box": "iMC", "Category": "iMC RPQ Events", "Counters": "0-3", "Defn": "Accumulates the occupancies of the Read Pending Queue each cycle. This can then be used to calculate both the average occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory.", "Desc": "Read Pending Queue Occupancy", "EvSel": 129, "ExtSel": "", "MaxIncCyc": 22, }, "iMC.SB_ACCESSES": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "Desc": "Scoreboard Accesses", "EvSel": 210, "ExtSel": "", }, "iMC.SB_ACCESSES.FM_WR_CMPS": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "Desc": "Scoreboard Accesses", "EvSel": 210, "ExtSel": "", "Umask": "b1xxxxxxx", }, "iMC.SB_ACCESSES.REJECTS": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "Desc": "Scoreboard Accesses", "EvSel": 210, "ExtSel": "", "Umask": "b00001010", }, "iMC.SB_ACCESSES.ACCEPTS": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "Desc": "Scoreboard Accesses", "EvSel": 210, "ExtSel": "", "Umask": "b00000101", }, "iMC.SB_ACCESSES.NM_RD_CMPS": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "Desc": "Scoreboard Accesses", "EvSel": 210, "ExtSel": "", "Umask": "bxxx1xxxx", }, "iMC.SB_ACCESSES.WR_ACCEPTS": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "Desc": "Scoreboard Accesses", "EvSel": 210, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.SB_ACCESSES.NM_WR_CMPS": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "Desc": "Scoreboard Accesses", "EvSel": 210, "ExtSel": "", "Umask": "bxx1xxxxx", }, "iMC.SB_ACCESSES.RD_REJECTS": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "Desc": "Scoreboard Accesses", "EvSel": 210, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.SB_ACCESSES.RD_ACCEPTS": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "Desc": "Scoreboard Accesses", "EvSel": 210, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.SB_ACCESSES.FM_RD_CMPS": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "Desc": "Scoreboard Accesses", "EvSel": 210, "ExtSel": "", "Umask": "bx1xxxxxx", }, "iMC.SB_ACCESSES.WR_REJECTS": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "Desc": "Scoreboard Accesses", "EvSel": 210, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.SB_CANARY": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "EvSel": 217, "ExtSel": "", }, "iMC.SB_CANARY.ALLOC": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "EvSel": 217, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.SB_CANARY.FM_RD_STARVED": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "EvSel": 217, "ExtSel": "", "Umask": "bxx1xxxxx", }, "iMC.SB_CANARY.NM_WR_STARVED": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "EvSel": 217, "ExtSel": "", "Umask": "bxxx1xxxx", }, "iMC.SB_CANARY.FM_TGR_WR_STARVED": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "EvSel": 217, "ExtSel": "", "Umask": "b1xxxxxxx", }, "iMC.SB_CANARY.VLD": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "EvSel": 217, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.SB_CANARY.NM_RD_STARVED": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "EvSel": 217, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.SB_CANARY.FM_WR_STARVED": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "EvSel": 217, "ExtSel": "", "Umask": "bx1xxxxxx", }, "iMC.SB_CANARY.DEALLOC": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "EvSel": 217, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.SB_CYCLES_FULL": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "Desc": "Scoreboard Cycles Full", "EvSel": 209, "ExtSel": "", }, "iMC.SB_CYCLES_NE": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "Desc": "Scoreboard Cycles Not-Empty", "EvSel": 208, "ExtSel": "", }, "iMC.SB_INSERTS": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "Desc": "Scoreboard Inserts", "EvSel": 214, "ExtSel": "", }, "iMC.SB_INSERTS.RDS": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "Desc": "Scoreboard Inserts", "EvSel": 214, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.SB_INSERTS.PMM_RDS": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "Desc": "Scoreboard Inserts", "EvSel": 214, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.SB_INSERTS.WRS": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "Desc": "Scoreboard Inserts", "EvSel": 214, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.SB_INSERTS.PMM_WRS": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "Desc": "Scoreboard Inserts", "EvSel": 214, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.SB_INSERTS.BLOCK_WRS": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "Desc": "Scoreboard Inserts", "EvSel": 214, "ExtSel": "", "Umask": "bxx1xxxxx", }, "iMC.SB_INSERTS.BLOCK_RDS": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "Desc": "Scoreboard Inserts", "EvSel": 214, "ExtSel": "", "Umask": "bxxx1xxxx", }, "iMC.SB_OCCUPANCY": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "Desc": "Scoreboard Occupancy", "EvSel": 213, "ExtSel": "", "MaxIncCyc": 128, }, "iMC.SB_OCCUPANCY.PMM_WRS": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "Desc": "Scoreboard Occupancy", "EvSel": 213, "ExtSel": "", "MaxIncCyc": 128, "Umask": "bxxxx1xxx", }, "iMC.SB_OCCUPANCY.BLOCK_RDS": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "Desc": "Scoreboard Occupancy", "EvSel": 213, "ExtSel": "", "MaxIncCyc": 128, "Umask": "bxx1xxxxx", }, "iMC.SB_OCCUPANCY.BLOCK_WRS": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "Desc": "Scoreboard Occupancy", "EvSel": 213, "ExtSel": "", "MaxIncCyc": 128, "Umask": "bx1xxxxxx", }, "iMC.SB_OCCUPANCY.PMM_RDS": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "Desc": "Scoreboard Occupancy", "EvSel": 213, "ExtSel": "", "MaxIncCyc": 128, "Umask": "bxxxxx1xx", }, "iMC.SB_OCCUPANCY.RDS": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "Desc": "Scoreboard Occupancy", "EvSel": 213, "ExtSel": "", "MaxIncCyc": 128, "Umask": "bxxxxxxx1", }, "iMC.SB_PREF_INSERTS": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "Desc": "Scoreboard Prefetch Inserts", "EvSel": 218, "ExtSel": "", }, "iMC.SB_PREF_INSERTS.PMM": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "Desc": "Scoreboard Prefetch Inserts", "EvSel": 218, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.SB_PREF_INSERTS.DDR": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "Desc": "Scoreboard Prefetch Inserts", "EvSel": 218, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.SB_PREF_INSERTS.ALL": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "Desc": "Scoreboard Prefetch Inserts", "EvSel": 218, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.SB_PREF_OCCUPANCY": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "Desc": "Scoreboard Prefetch Occupancy", "EvSel": 219, "ExtSel": "", }, "iMC.SB_PREF_OCCUPANCY.DDR": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "Desc": "Scoreboard Prefetch Occupancy", "EvSel": 219, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.SB_PREF_OCCUPANCY.PMM": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "Desc": "Scoreboard Prefetch Occupancy", "EvSel": 219, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.SB_PREF_OCCUPANCY.ALL": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "Desc": "Scoreboard Prefetch Occupancy", "EvSel": 219, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.SB_REJECT": { "Box": "iMC", "Category": "iMC PMM MEMMODE COHERENCY Events", "Counters": "0-3", "Desc": "Number of Scoreboard Requests Rejected", "EvSel": 212, "ExtSel": "", }, "iMC.SB_REJECT.PATROL_SET_CNFLT": { "Box": "iMC", "Category": "iMC PMM MEMMODE COHERENCY Events", "Counters": "0-3", "Desc": "Number of Scoreboard Requests Rejected", "EvSel": 212, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.SB_REJECT.NM_SET_CNFLT": { "Box": "iMC", "Category": "iMC PMM MEMMODE COHERENCY Events", "Counters": "0-3", "Desc": "Number of Scoreboard Requests Rejected", "EvSel": 212, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.SB_REJECT.FM_ADDR_CNFLT": { "Box": "iMC", "Category": "iMC PMM MEMMODE COHERENCY Events", "Counters": "0-3", "Desc": "Number of Scoreboard Requests Rejected", "EvSel": 212, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.SB_REJECT.CANARY": { "Box": "iMC", "Category": "iMC PMM MEMMODE COHERENCY Events", "Counters": "0-3", "Desc": "Number of Scoreboard Requests Rejected", "EvSel": 212, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.SB_REJECT.DDR_EARLY_CMP": { "Box": "iMC", "Category": "iMC PMM MEMMODE COHERENCY Events", "Counters": "0-3", "Desc": "Number of Scoreboard Requests Rejected", "EvSel": 212, "ExtSel": "", "Umask": "bxx1xxxxx", }, "iMC.SB_STRV_ALLOC": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "EvSel": 215, "ExtSel": "", }, "iMC.SB_STRV_ALLOC.FM_RD": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "EvSel": 215, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.SB_STRV_ALLOC.NM_RD": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "EvSel": 215, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.SB_STRV_ALLOC.FM_TGR": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "EvSel": 215, "ExtSel": "", "Umask": "bxxx1xxxx", }, "iMC.SB_STRV_ALLOC.FM_WR": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "EvSel": 215, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.SB_STRV_ALLOC.NM_WR": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "EvSel": 215, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.SB_STRV_DEALLOC": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "EvSel": 222, "ExtSel": "", }, "iMC.SB_STRV_DEALLOC.FM_TGR": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "EvSel": 222, "ExtSel": "", "Umask": "bxxx1xxxx", }, "iMC.SB_STRV_DEALLOC.NM_RD": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "EvSel": 222, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.SB_STRV_DEALLOC.FM_RD": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "EvSel": 222, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.SB_STRV_DEALLOC.NM_WR": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "EvSel": 222, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.SB_STRV_DEALLOC.FM_WR": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "EvSel": 222, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.SB_STRV_OCC": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "EvSel": 216, "ExtSel": "", "MaxIncCyc": 63, }, "iMC.SB_STRV_OCC.FM_TGR": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "EvSel": 216, "ExtSel": "", "MaxIncCyc": 63, "Umask": "bxxx1xxxx", }, "iMC.SB_STRV_OCC.NM_RD": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "EvSel": 216, "ExtSel": "", "MaxIncCyc": 63, "Umask": "bxxxxxxx1", }, "iMC.SB_STRV_OCC.FM_RD": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "EvSel": 216, "ExtSel": "", "MaxIncCyc": 63, "Umask": "bxxxxxx1x", }, "iMC.SB_STRV_OCC.NM_WR": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "EvSel": 216, "ExtSel": "", "MaxIncCyc": 63, "Umask": "bxxxxx1xx", }, "iMC.SB_STRV_OCC.FM_WR": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "Counters": "0-3", "EvSel": 216, "ExtSel": "", "MaxIncCyc": 63, "Umask": "bxxxx1xxx", }, "iMC.SB_TAGGED": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "EvSel": 221, "ExtSel": "", }, "iMC.SB_TAGGED.RD_MISS": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "EvSel": 221, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.SB_TAGGED.OCC": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "EvSel": 221, "ExtSel": "", "Umask": "b1xxxxxxx", }, "iMC.SB_TAGGED.RD_HIT": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "EvSel": 221, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.SB_TAGGED.DDR4_CMP": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "EvSel": 221, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.SB_TAGGED.PMM0_CMP": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "EvSel": 221, "ExtSel": "", "Umask": "bxxx1xxxx", }, "iMC.SB_TAGGED.PMM1_CMP": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "EvSel": 221, "ExtSel": "", "Umask": "bxx1xxxxx", }, "iMC.SB_TAGGED.NEW": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "EvSel": 221, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.SB_TAGGED.PMM2_CMP": { "Box": "iMC", "Category": "iMC PMM MEMMODE SCOREBOARD Events", "EvSel": 221, "ExtSel": "", "Umask": "bx1xxxxxx", }, "iMC.TAGCHK": { "Box": "iMC", "Category": "iMC TAG CHECK Events", "Counters": "0-3", "Desc": "2LM Tag Check", "EvSel": 211, "ExtSel": "", }, "iMC.TAGCHK.NM_WR_HIT": { "Box": "iMC", "Category": "iMC TAG CHECK Events", "Counters": "0-3", "Desc": "2LM Tag Check", "EvSel": 211, "ExtSel": "", "Umask": "bxxx1xxxx", }, "iMC.TAGCHK.MISS_CLEAN": { "Box": "iMC", "Category": "iMC TAG CHECK Events", "Counters": "0-3", "Desc": "2LM Tag Check", "EvSel": 211, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.TAGCHK.MISS_DIRTY": { "Box": "iMC", "Category": "iMC TAG CHECK Events", "Counters": "0-3", "Desc": "2LM Tag Check", "EvSel": 211, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.TAGCHK.HIT": { "Box": "iMC", "Category": "iMC TAG CHECK Events", "Counters": "0-3", "Desc": "2LM Tag Check", "EvSel": 211, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.TAGCHK.NM_RD_HIT": { "Box": "iMC", "Category": "iMC TAG CHECK Events", "Counters": "0-3", "Desc": "2LM Tag Check", "EvSel": 211, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.WPQ_CYCLES_FULL_PCH0": { "Box": "iMC", "Category": "iMC WPQ Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the Write Pending Queue is full. When the WPQ is full, the HA will not be able to issue any additional write requests into the iMC. This count should be similar count in the CHA which tracks the number of cycles that the CHA has no WPQ credits, just somewhat smaller to account for the credit return overhead.", "Desc": "Write Pending Queue Full Cycles", "EvSel": 34, "ExtSel": "", }, "iMC.WPQ_CYCLES_FULL_PCH1": { "Box": "iMC", "Category": "iMC WPQ Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the Write Pending Queue is full. When the WPQ is full, the HA will not be able to issue any additional write requests into the iMC. This count should be similar count in the CHA which tracks the number of cycles that the CHA has no WPQ credits, just somewhat smaller to account for the credit return overhead.", "Desc": "Write Pending Queue Full Cycles", "EvSel": 22, "ExtSel": "", }, "iMC.WPQ_CYCLES_NE": { "Box": "iMC", "Category": "iMC WPQ Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the Write Pending Queue is not empty. This can then be used to calculate the average queue occupancy (in conjunction with the WPQ Occupancy Accumulation count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have \"posted\" to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies.", "Desc": "Write Pending Queue Not Empty", "EvSel": 33, "ExtSel": "", }, "iMC.WPQ_CYCLES_NE.PCH0": { "Box": "iMC", "Category": "iMC WPQ Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the Write Pending Queue is not empty. This can then be used to calculate the average queue occupancy (in conjunction with the WPQ Occupancy Accumulation count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have \"posted\" to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies.", "Desc": "Write Pending Queue Not Empty", "EvSel": 33, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.WPQ_CYCLES_NE.PCH1": { "Box": "iMC", "Category": "iMC WPQ Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the Write Pending Queue is not empty. This can then be used to calculate the average queue occupancy (in conjunction with the WPQ Occupancy Accumulation count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have \"posted\" to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies.", "Desc": "Write Pending Queue Not Empty", "EvSel": 33, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.WPQ_INSERTS": { "Box": "iMC", "Category": "iMC WPQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the Write Pending Queue. This can then be used to calculate the average queuing latency (in conjunction with the WPQ occupancy count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have \"posted\" to the iMC.", "Desc": "Write Pending Queue Allocations", "EvSel": 32, "ExtSel": "", }, "iMC.WPQ_INSERTS.PCH1": { "Box": "iMC", "Category": "iMC WPQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the Write Pending Queue. This can then be used to calculate the average queuing latency (in conjunction with the WPQ occupancy count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have \"posted\" to the iMC.", "Desc": "Write Pending Queue Allocations", "EvSel": 32, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.WPQ_INSERTS.PCH0": { "Box": "iMC", "Category": "iMC WPQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the Write Pending Queue. This can then be used to calculate the average queuing latency (in conjunction with the WPQ occupancy count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have \"posted\" to the iMC.", "Desc": "Write Pending Queue Allocations", "EvSel": 32, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.WPQ_OCCUPANCY_PCH0": { "Box": "iMC", "Category": "iMC WPQ Events", "Counters": "0-3", "Defn": "Accumulates the occupancies of the Write Pending Queue each cycle. This can then be used to calculate both the average queue occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have \"posted\" to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies. So, we provide filtering based on if the request has posted or not. By using the \"not posted\" filter, we can track how long writes spent in the iMC before completions were sent to the HA. The \"posted\" filter, on the other hand, provides information about how much queueing is actually happenning in the iMC for writes before they are actually issued to memory. High average occupancies will generally coincide with high write major mode counts.", "Desc": "Write Pending Queue Occupancy", "EvSel": 130, "ExtSel": "", "MaxIncCyc": 40, }, "iMC.WPQ_OCCUPANCY_PCH1": { "Box": "iMC", "Category": "iMC WPQ Events", "Counters": "0-3", "Defn": "Accumulates the occupancies of the Write Pending Queue each cycle. This can then be used to calculate both the average queue occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have \"posted\" to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies. So, we provide filtering based on if the request has posted or not. By using the \"not posted\" filter, we can track how long writes spent in the iMC before completions were sent to the HA. The \"posted\" filter, on the other hand, provides information about how much queueing is actually happenning in the iMC for writes before they are actually issued to memory. High average occupancies will generally coincide with high write major mode counts.", "Desc": "Write Pending Queue Occupancy", "EvSel": 131, "ExtSel": "", "MaxIncCyc": 40, }, "iMC.WPQ_READ_HIT": { "Box": "iMC", "Category": "iMC WPQ Events", "Counters": "0-3", "Defn": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.", "Desc": "Write Pending Queue CAM Match", "EvSel": 35, "ExtSel": "", }, "iMC.WPQ_READ_HIT.PCH0": { "Box": "iMC", "Category": "iMC WPQ Events", "Counters": "0-3", "Defn": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.", "Desc": "Write Pending Queue CAM Match", "EvSel": 35, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.WPQ_READ_HIT.PCH1": { "Box": "iMC", "Category": "iMC WPQ Events", "Counters": "0-3", "Defn": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.", "Desc": "Write Pending Queue CAM Match", "EvSel": 35, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.WPQ_WRITE_HIT": { "Box": "iMC", "Category": "iMC WPQ Events", "Counters": "0-3", "Defn": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.", "Desc": "Write Pending Queue CAM Match", "EvSel": 36, "ExtSel": "", }, "iMC.WPQ_WRITE_HIT.PCH1": { "Box": "iMC", "Category": "iMC WPQ Events", "Counters": "0-3", "Defn": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.", "Desc": "Write Pending Queue CAM Match", "EvSel": 36, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.WPQ_WRITE_HIT.PCH0": { "Box": "iMC", "Category": "iMC WPQ Events", "Counters": "0-3", "Defn": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.", "Desc": "Write Pending Queue CAM Match", "EvSel": 36, "ExtSel": "", "Umask": "bxxxxxxx1", }, # PCU: "PCU.CLOCKTICKS": { "Box": "PCU", "Category": "PCU PCLK Events", "Counters": "0-3", "Defn": "The PCU runs off a fixed 1 GHz clock. This event counts the number of pclk cycles measured while the counter was enabled. The pclk, like the Memory Controller's dclk, counts at a constant rate making it a good measure of actual wall time.", "Desc": "Clockticks of the power control unit (PCU)", "EvSel": 0, "ExtSel": "", }, "PCU.CORE_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "EvSel": 96, "ExtSel": "", }, "PCU.DEMOTIONS": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "EvSel": 48, "ExtSel": "", }, "PCU.FIVR_PS_PS0_CYCLES": { "Box": "PCU", "Category": "PCU FIVR Events", "Counters": "0-3", "Defn": "Cycles spent in phase-shedding power state 0", "Desc": "Phase Shed 0 Cycles", "EvSel": 117, "ExtSel": "", }, "PCU.FIVR_PS_PS1_CYCLES": { "Box": "PCU", "Category": "PCU FIVR Events", "Counters": "0-3", "Defn": "Cycles spent in phase-shedding power state 1", "Desc": "Phase Shed 1 Cycles", "EvSel": 118, "ExtSel": "", }, "PCU.FIVR_PS_PS2_CYCLES": { "Box": "PCU", "Category": "PCU FIVR Events", "Counters": "0-3", "Defn": "Cycles spent in phase-shedding power state 2", "Desc": "Phase Shed 2 Cycles", "EvSel": 119, "ExtSel": "", }, "PCU.FIVR_PS_PS3_CYCLES": { "Box": "PCU", "Category": "PCU FIVR Events", "Counters": "0-3", "Defn": "Cycles spent in phase-shedding power state 3", "Desc": "Phase Shed 3 Cycles", "EvSel": 120, "ExtSel": "", }, "PCU.FREQ_CLIP_AVX256": { "Box": "PCU", "Category": "PCU Frequency Clipping Events", "Desc": "AVX256 Frequency Clipping", "EvSel": 73, "ExtSel": "", }, "PCU.FREQ_CLIP_AVX512": { "Box": "PCU", "Category": "PCU Frequency Clipping Events", "Desc": "AVX512 Frequency Clipping", "EvSel": 74, "ExtSel": "", }, "PCU.FREQ_MAX_LIMIT_THERMAL_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_MAX_LIMIT Events", "Counters": "0-3", "Defn": "Number of cycles any frequency is reduced due to a thermal limit. Count only if throttling is occurring.", "Desc": "Thermal Strongest Upper Limit Cycles", "EvSel": 4, "ExtSel": "", }, "PCU.FREQ_MAX_POWER_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_MAX_LIMIT Events", "Counters": "0-3", "Defn": "Counts the number of cycles when power is the upper limit on frequency.", "Desc": "Power Strongest Upper Limit Cycles", "EvSel": 5, "ExtSel": "", }, "PCU.FREQ_MIN_IO_P_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_MIN_LIMIT Events", "Counters": "0-3", "Defn": "Counts the number of cycles when IO P Limit is preventing us from dropping the frequency lower. This algorithm monitors the needs to the IO subsystem on both local and remote sockets and will maintain a frequency high enough to maintain good IO BW. This is necessary for when all the IA cores on a socket are idle but a user still would like to maintain high IO Bandwidth.", "Desc": "IO P Limit Strongest Lower Limit Cycles", "EvSel": 115, "ExtSel": "", }, "PCU.FREQ_TRANS_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_TRANS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the system is changing frequency. This can not be filtered by thread ID. One can also use it with the occupancy counter that monitors number of threads in C0 to estimate the performance impact that frequency transitions had on the system.", "Desc": "Cycles spent changing Frequency", "EvSel": 116, "ExtSel": "", }, "PCU.MEMORY_PHASE_SHEDDING_CYCLES": { "Box": "PCU", "Category": "PCU MEMORY_PHASE_SHEDDING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the PCU has triggered memory phase shedding. This is a mode that can be run in the iMC physicals that saves power at the expense of additional latency.", "Desc": "Memory Phase Shedding Cycles", "EvSel": 47, "ExtSel": "", "Notes": "Package C1", }, "PCU.PKG_RESIDENCY_C0_CYCLES": { "Box": "PCU", "Category": "PCU PKG_C_STATE_RESIDENCY Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the package was in C0. This event can be used in conjunction with edge detect to count C0 entrances (or exits using invert). Residency events do not include transition times.", "Desc": "Package C State Residency - C0", "EvSel": 42, "ExtSel": "", }, "PCU.PKG_RESIDENCY_C2E_CYCLES": { "Box": "PCU", "Category": "PCU PKG_C_STATE_RESIDENCY Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the package was in C2E. This event can be used in conjunction with edge detect to count C2E entrances (or exits using invert). Residency events do not include transition times.", "Desc": "Package C State Residency - C2E", "EvSel": 43, "ExtSel": "", }, "PCU.PKG_RESIDENCY_C3_CYCLES": { "Box": "PCU", "Category": "PCU PKG_C_STATE_RESIDENCY Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the package was in C3. This event can be used in conjunction with edge detect to count C3 entrances (or exits using invert). Residency events do not include transition times.", "Desc": "Package C State Residency - C3", "EvSel": 44, "ExtSel": "", }, "PCU.PKG_RESIDENCY_C6_CYCLES": { "Box": "PCU", "Category": "PCU PKG_C_STATE_RESIDENCY Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the package was in C6. This event can be used in conjunction with edge detect to count C6 entrances (or exits using invert). Residency events do not include transition times.", "Desc": "Package C State Residency - C6", "EvSel": 45, "ExtSel": "", }, "PCU.PMAX_THROTTLED_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_MAX_LIMIT Events", "EvSel": 6, "ExtSel": "", }, "PCU.POWER_STATE_OCCUPANCY": { "Box": "PCU", "Category": "PCU POWER_STATE_OCC Events", "Counters": "0-3", "Defn": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", "Desc": "Number of cores in C-State", "EvSel": 128, "ExtSel": "", "MaxIncCyc": 8, }, "PCU.POWER_STATE_OCCUPANCY.CORES_C3": { "Box": "PCU", "Category": "PCU POWER_STATE_OCC Events", "Counters": "0-3", "Defn": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", "Desc": "Number of cores in C-State", "EvSel": 128, "ExtSel": "", "MaxIncCyc": 8, "Umask": "b10000000", }, "PCU.POWER_STATE_OCCUPANCY.CORES_C6": { "Box": "PCU", "Category": "PCU POWER_STATE_OCC Events", "Counters": "0-3", "Defn": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", "Desc": "Number of cores in C-State", "EvSel": 128, "ExtSel": "", "MaxIncCyc": 8, "Umask": "b11000000", }, "PCU.POWER_STATE_OCCUPANCY.CORES_C0": { "Box": "PCU", "Category": "PCU POWER_STATE_OCC Events", "Counters": "0-3", "Defn": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", "Desc": "Number of cores in C-State", "EvSel": 128, "ExtSel": "", "MaxIncCyc": 8, "Umask": "b01000000", }, "PCU.PROCHOT_EXTERNAL_CYCLES": { "Box": "PCU", "Category": "PCU PROCHOT Events", "Counters": "0-3", "Defn": "Counts the number of cycles that we are in external PROCHOT mode. This mode is triggered when a sensor off the die determines that something off-die (like DRAM) is too hot and must throttle to avoid damaging the chip.", "Desc": "External Prochot", "EvSel": 10, "ExtSel": "", }, "PCU.PROCHOT_INTERNAL_CYCLES": { "Box": "PCU", "Category": "PCU PROCHOT Events", "Counters": "0-3", "Defn": "Counts the number of cycles that we are in Interal PROCHOT mode. This mode is triggered when a sensor on the die determines that we are too hot and must throttle to avoid damaging the chip.", "Desc": "Internal Prochot", "EvSel": 9, "ExtSel": "", }, "PCU.TOTAL_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions across all cores.", "Desc": "Total Core C State Transition Cycles", "EvSel": 114, "ExtSel": "", }, "PCU.VR_HOT_CYCLES": { "Box": "PCU", "Category": "PCU VR_HOT Events", "Counters": "0-3", "Defn": "Number of cycles that a CPU SVID VR is hot. Does not cover DRAM VRs", "Desc": "VR Hot", "EvSel": 66, "ExtSel": "", }, } derived = { # CHA: "CHA.AVG_CRD_MISS_LATENCY": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "Average Latency of Code Reads from an iA Core that miss the LLC", "Desc": "Average Code Read Latency", "Equation": "(TOR_OCCUPANCY.IA_MISS_CRD + TOR_OCCUPANCY.IA_MISS_CRD_PREF) / (TOR_INSERTS.IA_MISS_CRD + TOR_INSERTS.IA_MISS_CRD_PREF)", }, "CHA.AVG_DEMAND_RD_HIT_LATENCY": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "Average Latency of Data Reads that hit the LLC", "Desc": "Average Data Read Hit Latency", "Equation": "TOR_OCCUPANCY.IA_HIT_DRD / TOR_INSERTS.IA_HIT_DRD", }, "CHA.AVG_DEMAND_RD_MISS_LOCAL_LATENCY": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "Average Latency of Data Reads from an IA Core that miss the LLC and were satsified by Local Memory", "Desc": "Average Data Read Local Miss Latency", "Equation": "TOR_OCCUPANCY.IA_MISS_DRD_LOCAL / TOR_INSERTS.IA_MISS_DRD_LOCAL", }, "CHA.AVG_DEMAND_RD_MISS_REMOTE_LATENCY": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "Average Latency of Data Reads from an iA Core that miss the LLC and were satsified by a Remote cache or Remote Memory", "Desc": "Average Data Read Remote Miss Latency", "Equation": "TOR_OCCUPANCY.IA_MISS_DRD_REMOTE / TOR_INSERTS.IA_MISS_DRD_REMOTE", }, "CHA.AVG_DRD_MISS_LATENCY": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "Average Latency of Data Reads or Data Read Prefetches from an IA Core that miss the LLC", "Desc": "Average Data Read Miss Latency", "Equation": "(TOR_OCCUPANCY.IA_MISS_DRD + TOR_OCCUPANCY.IA_MISS_DRD_PREF) / (TOR_INSERTS.IA_MISS_DRD + TOR_INSERTS.IA_MISS_DRD_PREF)", }, "CHA.AVG_IA_CRD_LLC_HIT_LATENCY": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "Average Latency of Code Reads from an iA Core that miss the LLC", "Desc": "Average Code Read Latency", "Equation": "TOR_OCCUPANCY.IA_HIT_CRD / TOR_INSERTS.IA_HIT_CRD", }, "CHA.AVG_INGRESS_DEPTH": { "Box": "CHA", "Category": "CHA INGRESS Events", "Defn": "Average Depth of the Ingress Queue through the sample interval", "Desc": "Average Ingress (from CMS) Depth", "Equation": "RxC_OCCUPANCY.IRQ / SAMPLE_INTERVAL", }, "CHA.AVG_INGRESS_LATENCY": { "Box": "CHA", "Category": "CHA INGRESS Events", "Defn": "Average Latency of Requests through the Ingress Queue in Uncore Clocks", "Desc": "Average Ingress (from CMS) Latency", "Equation": "RxC_OCCUPANCY.IRQ / RxC_INSERTS.IRQ", }, "CHA.AVG_INGRESS_LATENCY_WHEN_NE": { "Box": "CHA", "Category": "CHA INGRESS Events", "Defn": "Average Latency of Requests through the Ingress Queue in Uncore Clocks when Ingress Queue has at least one entry", "Desc": "Average Latency in Non-Empty Ingress (from CMS)", "Equation": "RxC_OCCUPANCY.IRQ / COUNTER0_OCCUPANCY{edge_det,thresh=0x1}", }, "CHA.AVG_RFO_MISS_LATENCY": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "Average Latency of RFOs from an iA Core that miss the LLC", "Desc": "Average RFO Latency", "Equation": "(TOR_OCCUPANCY.IA_MISS_RFO + TOR_OCCUPANCY.IA_MISS_RFO_PREF) / (TOR_INSERTS.IA_MISS_RFO + TOR_INSERTS.IA_MISS_RFO_PREF)", }, "CHA.AVG_TOR_DRDS_MISS_WHEN_NE": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "Average Number of Data Read Entries that Miss the LLC when the TOR is not empty.", "Desc": "Average Data Read Misses in Non-Empty TOR", "Equation": "TOR_OCCUPANCY.IA_MISS_DRD / COUNTER0_OCCUPANCY{edge_det,thresh=0x1}", }, "CHA.AVG_TOR_DRDS_WHEN_NE": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "Average Number of Data Read Entries when the TOR is not empty.", "Desc": "Average Data Reads in Non-Empty TOR", "Equation": "TOR_OCCUPANCY.IA_DRD / COUNTER0_OCCUPANCY{edge_det,thresh=0x1}", }, "CHA.CYC_INGRESS_BLOCKED": { "Box": "CHA", "Category": "CHA INGRESS Events", "Defn": "Cycles the Ingress Request Queue arbiter was Blocked", "Desc": "Cycles Ingress (from CMS) Blocked", "Equation": "RxC_EXT_STARVED.IRQ / SAMPLE_INTERVAL", }, "CHA.FAST_STR_LLC_HIT": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "Number of ItoM (fast string) operations that reference the LLC", "Desc": "Fast String operations", "Equation": "TOR_INSERTS.IA_HIT_ITOM", }, "CHA.FAST_STR_LLC_MISS": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "Number of ItoM (fast string) operations that miss the LLC", "Desc": "Fast String misses", "Equation": "TOR_INSERTS.IA_MISS_ITOM", }, "CHA.INGRESS_REJ_V_INS": { "Box": "CHA", "Category": "CHA INGRESS Events", "Defn": "Ratio of Ingress Request Entries that were rejected vs. inserted", "Desc": "Ingress (from CMS) Rejects vs. Inserts", "Equation": "RxC_INSERTS.IRQ_REJECTED / RxC_INSERTS.IRQ", }, "CHA.LLC_CRD_MISS_TO_LOC_MEM": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "LLC Code Read and Code Prefetch misses satisfied by local memory.", "Desc": "LLC Code Read Misses to Local Memory", "Equation": "TOR_INSERTS.IA_MISS_CRD_PREF_LOCAL + TOR_INSERTS.IA_MISS_CRD_LOCAL", }, "CHA.LLC_CRD_MISS_TO_REM_MEM": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "LLC Code Read and Code Read Prefetch misses satisfied by a remote cache or remote memory.", "Desc": "LLC Code Read Misses to Remote Memory", "Equation": "TOR_INSERTS.IA_MISS_CRD_PREF_REMOTE + TOR_INSERTS.IA_MISS_CRD_REMOTE", }, "CHA.LLC_DRD_MISS_PCT": { "Box": "CHA", "Category": "CHA CACHE Events", "Desc": "LLC DRd Miss Percentage", "Equation": "LLC_LOOKUP.DATA_READ_MISS / LLC_LOOKUP.DATA_READ_ALL", }, "CHA.LLC_DRD_MISS_TO_LOC_MEM": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "LLC Data Read and Data Prefetch misses satisfied by local memory.", "Desc": "LLC Data Read Misses to Local Memory", "Equation": "TOR_INSERTS.IA_MISS_DRD_LOCAL", }, "CHA.LLC_DRD_MISS_TO_REM_MEM": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "LLC Data Read and Data Prefetch misses satisfied by a remote cache or remote memory.", "Desc": "LLC Data Read Misses to Remote Memory", "Equation": "TOR_INSERTS.IA_MISS_DRD_REMOTE", }, "CHA.LLC_DRD_PREFETCH_HITS": { "Box": "CHA", "Category": "CHA TOR Events", "Desc": "DRd Prefetches that Hit the LLC", "Equation": "TOR_INSERTS.IA_HIT_DRD_PREF", }, "CHA.LLC_DRD_PREFETCH_MISSES": { "Box": "CHA", "Category": "CHA TOR Events", "Desc": "DRd Prefetches that Missed the LLC", "Equation": "TOR_INSERTS.IA_MISS_DRD_PREF", }, "CHA.LLC_IA_CRD_HITS": { "Box": "CHA", "Category": "CHA TOR Events", "Desc": "LLC Code Read Misses to Local Memory", "Equation": "TOR_INSERTS.IA_HIT_CRD", }, "CHA.LLC_MPI": { "Box": "CHA", "Category": "CHA CACHE Events", "Defn": "LLC Misses Per Instruction (code, read, RFO and prefetches)", "Desc": "LLC MPI", "Equation": "LLC_LOOKUP.MISS_ALL / INST_RETIRED.ALL (on Core)", }, "CHA.LLC_PCIE_DATA_BYTES": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "LLC write miss (disk/network reads) bandwidth in MB", "Desc": "LLC Miss Data from PCIe", "Equation": "TOR_INSERTS.IO_ITOM * 64", }, "CHA.LLC_RFO_MISS_PCT": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "LLC RFO Miss Ratio", "Desc": "LLC RFO Miss Ratio", "Equation": "TOR_INSERTS.IA_MISS_RFO / TOR_INSERTS.IA_RFO", }, "CHA.LLC_RFO_MISS_TO_LOC_MEM": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "LLC RFO and RFO Prefetch misses satisfied by local memory.", "Desc": "LLC RFO Misses to Local Memory", "Equation": "TOR_INSERTS.IA_MISS_RFO_LOCAL", }, "CHA.LLC_RFO_MISS_TO_REM_MEM": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "LLC RFO and RFO Prefetch misses satisfied by a remote cache or remote memory.", "Desc": "LLC RFO Misses to Remote Memory", "Equation": "TOR_INSERTS.IA_MISS_RFO_REMOTE", }, "CHA.LLC_RFO_PREFETCH_HITS": { "Box": "CHA", "Category": "CHA TOR Events", "Desc": "RFO Prefetches that Hit the LLC", "Equation": "TOR_INSERTS.IA_HIT_RFO_PREF", }, "CHA.LLC_RFO_PREFETCH_MISSES": { "Box": "CHA", "Category": "CHA TOR Events", "Desc": "RFO Prefetches that Missed the LLC", "Equation": "TOR_INSERTS.IA_MISS_RFO_PREF", }, "CHA.MEM_WB_BYTES": { "Box": "CHA", "Category": "CHA CACHE Events", "Defn": "Data written back to memory in Number of Bytes", "Equation": "LLC_VICTIMS.M_STATE * 64", }, "CHA.MMIO_READ_BW": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "IO Read Bandwidth in MB - Disk or Network Reads", "Desc": "IO Read Bandwidth", "Equation": "TOR_INSERTS.IA_MISS_UCRDF * 64 / 1000000", }, "CHA.MMIO_WRITE_BW": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "IO Write Bandwidth in MB - Disk or Network Writes", "Desc": "IO Write Bandwidth", "Equation": "TOR_INSERTS.IA_MISS_WIL* 64 / 1000000", }, "CHA.PCIE_FULL_WRITES": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "Number of full PCI writes", "Desc": "PCIe Data Traffic", "Equation": "TOR_INSERTS.IO_ITOM", }, "CHA.PCI_PARTIAL_WRITES": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "Number of partial PCI writes", "Desc": "Partial PCI Writes", "Equation": "TOR_INSERTS.IO_RFO", }, "CHA.PCI_READS": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "Number of PCI reads", "Desc": "Partial PCI Reads", "Equation": "TOR_INSERTS.IO_PCIRDCUR", }, "CHA.PCT_RD_REQUESTS": { "Box": "CHA", "Category": "CHA HA REQUEST Events", "Defn": "Percentage of HA traffic that is from Read Requests", "Desc": "Percent Read Requests", "Equation": "REQUESTS.READS / (REQUESTS.READS + REQUESTS.WRITES)", }, "CHA.PCT_WR_REQUESTS": { "Box": "CHA", "Category": "CHA HA REQUEST Events", "Defn": "Percentage of HA traffic that is from Write Requests", "Desc": "Percent Write Requests", "Equation": "REQUESTS.WRITES / (REQUESTS.READS + REQUESTS.WRITES)", }, "CHA.STREAMED_FULL_STORES": { "Box": "CHA", "Category": "CHA TOR Events", "Desc": "Streaming Stores (Full Line)", "Equation": "TOR_INSERTS.IA_WCILF", }, "CHA.STREAMED_FULL_STORES.MISS_LOCAL_TO_DDR": { "Box": "CHA", "Category": "CHA TOR Events", "Desc": "Streaming Stores (Full Line), targeting DDR, that Miss the LLC - Locally HOMed", "Equation": "TOR_INSERTS.IA_MISS_LOCAL_WCILF_DDR", }, "CHA.STREAMED_FULL_STORES.MISS_LOCAL_TO_PMM": { "Box": "CHA", "Category": "CHA TOR Events", "Desc": "Streaming Stores (Full Line), targeting PMM, that Miss the LLC - Locally HOMed", "Equation": "TOR_INSERTS.IA_MISS_LOCAL_WCILF_PMM", }, "CHA.STREAMED_FULL_STORES.MISS_REMOTE_TO_DDR": { "Box": "CHA", "Category": "CHA TOR Events", "Desc": "Streaming Stores (Full Line), targeting DDR, that Miss the LLC - Remotely HOMed", "Equation": "TOR_INSERTS.IA_MISS_REMOTE_WCILF_DDR", }, "CHA.STREAMED_FULL_STORES.MISS_REMOTE_TO_PMM": { "Box": "CHA", "Category": "CHA TOR Events", "Desc": "Streaming Stores (Full Line), targeting PMM, that Miss the LLC - Remotely HOMed", "Equation": "TOR_INSERTS.IA_MISS_REMOTE_WCILF_PMM", }, "CHA.STREAMED_FULL_STORES.MISS_TO_DDR": { "Box": "CHA", "Category": "CHA TOR Events", "Desc": "Streaming Stores (Full Line), targeting DDR, that Miss the LLC", "Equation": "TOR_INSERTS.IA_MISS_WCILF_DDR", }, "CHA.STREAMED_FULL_STORES.MISS_TO_PMM": { "Box": "CHA", "Category": "CHA TOR Events", "Desc": "Streaming Stores (Full Line), targeting PMM, that Miss the LLC", "Equation": "TOR_INSERTS.IA_MISS_WCILF_PMM", }, "CHA.STREAMED_PART_STORES": { "Box": "CHA", "Category": "CHA TOR Events", "Desc": "Streaming Stores (Partial Line)", "Equation": "TOR_INSERTS.IA_WCIL", }, "CHA.STREAMED_PART_STORES.MISS_LOCAL_TO_DDR": { "Box": "CHA", "Category": "CHA TOR Events", "Desc": "Streaming Stores (Partial Line), targeting DDR, that Miss the LLC - Locally HOMed", "Equation": "TOR_INSERTS.IA_MISS_LOCAL_WCIL_DDR", }, "CHA.STREAMED_PART_STORES.MISS_LOCAL_TO_PMM": { "Box": "CHA", "Category": "CHA TOR Events", "Desc": "Streaming Stores (Partial Line), targeting PMM, that Miss the LLC - Locally HOMed", "Equation": "TOR_INSERTS.IA_MISS_LOCAL_WCIL_PMM", }, "CHA.STREAMED_PART_STORES.MISS_REMOTE_TO_DDR": { "Box": "CHA", "Category": "CHA TOR Events", "Desc": "Streaming Stores (Partial Line), targeting DDR, that Miss the LLC - Remotely HOMed", "Equation": "TOR_INSERTS.IA_MISS_REMOTE_WCIL_DDR", }, "CHA.STREAMED_PART_STORES.MISS_REMOTE_TO_PMM": { "Box": "CHA", "Category": "CHA TOR Events", "Desc": "Streaming Stores (Partial Line), targeting PMM, that Miss the LLC - Remotely HOMed", "Equation": "TOR_INSERTS.IA_MISS_REMOTE_WCIL_PMM", }, "CHA.STREAMED_PART_STORES.MISS_TO_DDR": { "Box": "CHA", "Category": "CHA TOR Events", "Desc": "Streaming Stores (Partial Line), targeting DDR, that Miss the LLC", "Equation": "TOR_INSERTS.IA_MISS_WCIL_DDR", }, "CHA.STREAMED_PART_STORES.MISS_TO_PMM": { "Box": "CHA", "Category": "CHA TOR Events", "Desc": "Streaming Stores (Partial Line), targeting PMM, that Miss the LLC", "Equation": "TOR_INSERTS.IA_MISS_WCIL_PMM", }, # UPI_LL: "UPI_LL.DRS_E_FROM_UPI": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Defn": "DRS response in F or E states received from UPI in bytes. To calculate the total data response for each cache line state, it's necessary to add the contribution from three flavors {DataC, DataC_FrcAckCnflt, DataC_Cmp} of data response packets for each cache line state.", "Desc": "DRS Data in F or E From UPI", "Equation": "RxL_BASIC_HDR_MATCH.{umask,opc}={0x1C,1} * 64", }, "UPI_LL.DRS_M_FROM_UPI": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Defn": "Data Response DataM packets received from UPI. Expressed in bytes", "Desc": "DRS Data_Ms From UPI", "Equation": "RxL_BASIC_HDR_MATCH.{umask,opc}={0x0C,1} * 64", }, "UPI_LL.DRS_WB_FROM_UPI": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Defn": "DRS writeback packets received from UPI in bytes. This is the sum of Wb{I,S,E} DRS packets", "Desc": "DRS Writeback From UPI", "Equation": "DRS_WbI_FROM_UPI + DRS_WbS_FROM_UPI + DRS_WbE_FROM_UPI", }, "UPI_LL.DRS_WbE_FROM_UPI": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Defn": "DRS writeback 'change M to E state' packets received from UPI in bytes", "Desc": "DRS WbE From UPI", "Equation": "RxL_BASIC_HDR_MATCH.{umask,opc}={0x2D,1} *64", }, "UPI_LL.DRS_WbI_FROM_UPI": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Defn": "DRS writeback 'change M to I state' packets received from UPI in bytes", "Desc": "DRS WbI From UPI", "Equation": "RxL_BASIC_HDR_MATCH.{umask,opc}={0x0D,1} *64", }, "UPI_LL.DRS_WbS_FROM_UPI": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Defn": "DRS writeback 'change M to S state' packets received from UPI in bytes", "Desc": "DRS WbSFrom UPI", "Equation": "RxL_BASIC_HDR_MATCH.{umask,opc}={0x1D,1} *64", }, "UPI_LL.NCB_DATA_FROM_UPI_TO_NODEx": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Defn": "NCB Data packets (Any - Interrupts) received from UPI sent to Node ID 'x'. Expressed in bytes", "Desc": "NCB Data From UPI To Node x", "Equation": "RxL_BASIC_HDR_MATCH.{umask,endnid,dnid} = {0xE,1,x} * 64", }, "UPI_LL.PCT_LINK_CRC_RETRY_CYCLES": { "Box": "UPI_LL", "Category": "UPI_LL CRC_ERRORS_RX Events", "Defn": "Percent of Cycles the UPI link layer is in retry mode due to CRC errors", "Desc": "Percent Link CRC Retry Cycles", "Equation": "RxL_CRC_CYCLES_IN_LLR / CLOCKTICKS", }, "UPI_LL.PCT_LINK_FULL_POWER_CYCLES": { "Box": "UPI_LL", "Category": "UPI_LL POWER_RX Events", "Defn": "Percent of Cycles the UPI link is at Full Power", "Desc": "Percent Link Full Power Cycles", "Equation": "RxL0_POWER_CYCLES / CLOCKTICKS", }, "UPI_LL.PCT_LINK_HALF_DISABLED_CYCLES": { "Box": "UPI_LL", "Category": "UPI_LL POWER_RX Events", "Defn": "Percent of Cycles the UPI link in power mode where half of the lanes are disabled.", "Desc": "Percent Link Half Disabled Cycles", "Equation": "RxL0P_POWER_CYCLES / CLOCKTICKS", }, "UPI_LL.PCT_LINK_SHUTDOWN_CYCLES": { "Box": "UPI_LL", "Category": "UPI_LL POWER Events", "Defn": "Percent of Cycles the UPI link is Shutdown", "Desc": "Percent Link Shutdown Cycles", "Equation": "L1_POWER_CYCLES / CLOCKTICKS", }, "UPI_LL.UPI_SPEED": { "Box": "UPI_LL", "Category": "UPI_LL CFCLK Events", "Defn": "UPI Speed - In GT/s (GigaTransfers / Second) - Max UPI Bandwidth is 2 * ROUND ( UPI Speed , 0)", "Desc": "UPI Speed", "Equation": "ROUND (( CLOCKTICKS / TSC ) * TSC_SPEED, 0 ) * ( 8 / 1000)", }, # PCU: "PCU.PCT_CYC_FREQ_POWER_LTD": { "Box": "PCU", "Category": "PCU FREQ_MAX_LIMIT Events", "Defn": "Percentage of Cycles the Max Frequency is limited by power", "Desc": "Percent Frequency Power Limited", "Equation": "FREQ_MAX_POWER_CYCLES / CLOCKTICKS", }, # iMC: "iMC.MEM_BW_READS": { "Box": "iMC", "Category": "iMC CAS Events", "Defn": "Memory bandwidth consumed by reads. Expressed in bytes.", "Desc": "Read Memory Bandwidth", "Equation": "(CAS_COUNT.RD * 64)", }, "iMC.MEM_BW_TOTAL": { "Box": "iMC", "Category": "iMC CAS Events", "Defn": "Total memory bandwidth. Expressed in bytes.", "Desc": "Total Memory Bandwidth", "Equation": "MEM_BW_READS + MEM_BW_WRITES", }, "iMC.MEM_BW_WRITES": { "Box": "iMC", "Category": "iMC CAS Events", "Defn": "Memory bandwidth consumed by writes Expressed in bytes.", "Desc": "Write Memory Bandwidth", "Equation": "(CAS_COUNT.WR * 64)", }, "iMC.PCT_CYCLES_CRITICAL_THROTTLE": { "Box": "iMC", "Category": "iMC POWER Events", "Defn": "The percentage of cycles all DRAM ranks in critical thermal throttling", "Desc": "Percent Cycles Critical Throttle", "Equation": "POWER_CRITICAL_THROTTLE_CYCLES / MC_Chy_PCI_PMON_CTR_FIXED", }, "iMC.PCT_CYCLES_DRAM_RANKx_IN_THR": { "Box": "iMC", "Category": "iMC POWER Events", "Defn": "The percentage of cycles DRAM rank (x) spent in thermal throttling.", "Desc": "Percent Cycles DRAM Rank x in CKE", "Equation": "POWER_THROTTLE_CYCLES.RANKx / MC_Chy_PCI_PMON_CTR_FIXED", }, "iMC.PCT_CYCLES_PPD": { "Box": "iMC", "Category": "iMC POWER Events", "Defn": "The percentage of cycles all DRAM ranks in PPD mode", "Desc": "Percent Cycles PPD", "Equation": "POWER_CHANNEL_PPD / MC_Chy_PCI_PMON_CTR_FIXED", }, "iMC.PCT_CYCLES_SELF_REFRESH": { "Box": "iMC", "Category": "iMC POWER Events", "Defn": "The percentage of cycles Memory is in self refresh power mode", "Desc": "Percent Cycles Self Refresh", "Equation": "POWER_SELF_REFRESH / MC_Chy_PCI_PMON_CTR_FIXED", }, "iMC.PCT_RD_REQUESTS": { "Box": "iMC", "Category": "iMC RPQ Events", "Defn": "Percentage of read requests from total requests.", "Desc": "Percent Read Requests", "Equation": "RPQ_INSERTS / (RPQ_INSERTS + WPQ_INSERTS)", }, "iMC.PCT_REQUESTS_PAGE_EMPTY": { "Box": "iMC", "Category": "iMC ACT Events", "Defn": "Percentage of memory requests that resulted in Page Empty", "Desc": "Percent Requests Page Empty", "Equation": "PRE_COUNT.PGT - CAS_COUNT.ALL", }, "iMC.PCT_REQUESTS_PAGE_HIT": { "Box": "iMC", "Category": "iMC ACT Events", "Defn": "Percentage of memory requests that resulted in Page Misses - Precharge followed by Activate", "Desc": "Percent Requests Page Miss", "Equation": "(PRE_COUNT.RD + PRE_COUNT.WR) / CAS_COUNT.ALL", }, "iMC.PCT_WR_REQUESTS": { "Box": "iMC", "Category": "iMC WPQ Events", "Defn": "Percentage of write requests from total requests.", "Desc": "Percent Write Requests", "Equation": "WPQ_INSERTS / (RPQ_INSERTS + WPQ_INSERTS)", }, } categories = ( "CHA CACHE Events", "CHA CBO SNOOP RESPONSE Events", "CHA CMS Horizontal EGRESS Events", "CHA CMS Transgress Credit Events", "CHA CMS Transgress INGRESS Events", "CHA CMS Vertical EGRESS Events", "CHA DIRECT GO Events", "CHA External Misc Events (eg. From MS2IDI)", "CHA HA BYPASS Events", "CHA HA DIRECTORY Events", "CHA HA HitME Events", "CHA HA HitME Pipe Events", "CHA HA OSB Events", "CHA HA PM MEMMODE Events", "CHA HA PMM QOS Events", "CHA HA REQUEST Events", "CHA HA SNOOP RESPONSE Events", "CHA HA WBPUSHMTOI Events", "CHA Horizontal RING Events", "CHA Horizontal In Use RING Events", "CHA INGRESS Events", "CHA INGRESS_RETRY Events", "CHA ISMQ Events", "CHA MC Credit and Traffic Events", "CHA MISC Events", "CHA Misc Events", "CHA OCCUPANCY Events", "CHA PIPE REJECT Events", "CHA TOR Events", "CHA UCLK Events", "CHA Vertical In Use RING Events", "CHA Vertical RING Events", "CHA XPT Events", "IIO CLOCK Events", "IIO Debug Events", "IIO IOMMU Events", "IIO ITC Events", "IIO Miscellaneous Events", "IIO OTC Events", "IIO PCIe Completion Buffer Events", "IIO Payload Events", "IIO Transaction Events", "IRP AK Egress Events", "IRP BL Egress Events", "IRP CLOCK Events", "IRP Coherency Events", "IRP FAF Events", "IRP IRP Buffer Events", "IRP MISC Events", "IRP OUTBOUND_REQUESTS Events", "IRP P2P Events", "IRP STALL_CYCLES Events", "IRP TRANSACTIONS Events", "IRP WRITE_CACHE Events", "M2M AD CMS/Mesh Egress Credit Events", "M2M AD Egress Events", "M2M AD Ingress Events", "M2M AK CMS/Mesh Egress Credit Events", "M2M AK Egress Events", "M2M BL CMS/Mesh Egress Credit Events", "M2M BL Egress Events", "M2M BL Ingress Events", "M2M CMS Horizontal EGRESS Events", "M2M CMS Transgress Credit Events", "M2M CMS Transgress INGRESS Events", "M2M CMS Vertical EGRESS Events", "M2M DIRECT2CORE Events", "M2M DIRECT2UPI Events", "M2M DIRECTORY Events", "M2M Directory State Events", "M2M Distress Events", "M2M External Misc Events (eg. From MS2IDI)", "M2M Horizontal RING Events", "M2M Horizontal In Use RING Events", "M2M IMC Events", "M2M Mirror WriteQ EVENTS", "M2M Misc Events", "M2M OUTBOUND_TX Events", "M2M PACKET MATCH Events", "M2M Prefetch CAM Events", "M2M RPQ CREDIT Events", "M2M Scoreboard Events", "M2M TRACKER Events", "M2M Transgress Credit Events", "M2M UCLK Events", "M2M Vertical In Use RING Events", "M2M Vertical RING Events", "M2M WPQ CREDIT Events", "M2M WPQ EVENTS", "M2M Write Tracker Events", "M2PCIe CMS Horizontal EGRESS Events", "M2PCIe CMS Transgress Credit Events", "M2PCIe CMS Transgress INGRESS Events", "M2PCIe CMS Vertical EGRESS Events", "M2PCIe EGRESS Events", "M2PCIe EGRESS P2P Credit Events", "M2PCIe External Misc Events (eg. From MS2IDI)", "M2PCIe Horizontal RING Events", "M2PCIe Horizontal In Use RING Events", "M2PCIe IIO_CREDITS Events", "M2PCIe INGRESS Events", "M2PCIe INGRESS P2P Credit Events", "M2PCIe Misc Events", "M2PCIe UCLK Events", "M2PCIe Vertical In Use RING Events", "M2PCIe Vertical RING Events", "M3UPI ARB Events", "M3UPI CMS Horizontal EGRESS Events", "M3UPI CMS Transgress Credit Events", "M3UPI CMS Transgress INGRESS Events", "M3UPI CMS Vertical EGRESS Events", "M3UPI EGRESS Credit Events", "M3UPI External Misc Events (eg. From MS2IDI)", "M3UPI FlowQ Events", "M3UPI Horizontal RING Events", "M3UPI Horizontal In Use RING Events", "M3UPI INGRESS Arbitration Events", "M3UPI INGRESS Credit Events", "M3UPI INGRESS Events", "M3UPI INGRESS Flit Events", "M3UPI INGRESS Sloting Events", "M3UPI Link VN Credit Events", "M3UPI Misc Events", "M3UPI Special Egress Events", "M3UPI UCLK Events", "M3UPI Vertical In Use RING Events", "M3UPI Vertical RING Events", "M3UPI Writeback Events", "M3UPI XPT Events", "PCIE3 Link Cycle Events", "PCIE3 Misc Events", "PCIE3 Utilization Events", "PCU CORE_C_STATE_TRANSITION Events", "PCU FIVR Events", "PCU FREQ_MAX_LIMIT Events", "PCU FREQ_MIN_LIMIT Events", "PCU FREQ_TRANS Events", "PCU Frequency Clipping Events", "PCU MEMORY_PHASE_SHEDDING Events", "PCU PCLK Events", "PCU PKG_C_STATE_RESIDENCY Events", "PCU POWER_STATE_OCC Events", "PCU PROCHOT Events", "PCU VR_HOT Events", "UBOX EVENT_MSG Events", "UBOX LOCK Events", "UBOX M2U Events", "UBOX PHOLD Events", "UBOX RACU Events", "UPI_LL CFCLK Events", "UPI_LL CRC_ERRORS_RX Events", "UPI_LL DIRECT2CORE Events", "UPI_LL FLIT match Events", "UPI_LL Flit Events", "UPI_LL LL to M3 Events", "UPI_LL POWER Events", "UPI_LL POWER_RX Events", "UPI_LL POWER_TX Events", "UPI_LL RXQ Events", "UPI_LL RX_CREDITS_CONSUMED Events", "UPI_LL TXQ Events", "UPI_LL VNA_CREDIT_RETURN Events", "iMC ACT Events", "iMC CAS Events", "iMC DCLK Events", "iMC DRAM_PRE_ALL Events", "iMC DRAM_REFRESH Events", "iMC Debug Events", "iMC Error Events", "iMC PMM CMD Events", "iMC PMM MEMMODE COHERENCY Events", "iMC PMM MEMMODE SCOREBOARD Events", "iMC PMM RPQ Events", "iMC PMM WPQ Events", "iMC POWER Events", "iMC PRE Events", "iMC RDB Events", "iMC RPQ Events", "iMC TAG CHECK Events", "iMC WPQ Events", );
1,953,071
Python
.py
44,101
39.898528
1,355
0.659866
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,955
hsx_uc.py
andikleen_pmu-tools/ucevent/hsx_uc.py
# HSX hsxuc_events.v1.00p hsxuc_derived.v1.00p # aliases aliases = { "QPIRxMatch1": "Q_Py_PCI_RX_PMON_BOX_MATCH1", "QPIRxMask1": "Q_Py_PCI_RX_PMON_BOX_MASK1", "IRPFilter": "IRP_PCI_PMON_BOX_FILTER", "PCUFilter": "PCU_MSR_PMON_BOX_FILTER", "QPIRxMask0": "Q_Py_PCI_RX_PMON_BOX_MASK0", "CBoFilter0": "Cn_MSR_PMON_BOX_FILTER", "HA_AddrMatch0": "HAn_PCI_PMON_BOX_ADDRMATCH0", "QPITxMatch0": "Q_Py_PCI_TX_PMON_BOX_MATCH0", "HA_AddrMatch1": "HAn_PCI_PMON_BOX_ADDRMATCH1", "QPITxMask0": "Q_Py_PCI_TX_PMON_BOX_MASK0", "QPITxMask1": "Q_Py_PCI_TX_PMON_BOX_MASK1", "QPITxMatch1": "Q_Py_PCI_TX_PMON_BOX_MATCH1", "HA_OpcodeMatch": "HAn_PCI_PMON_BOX_OPCODEMATCH", "QPIRxMatch0": "Q_Py_PCI_RX_PMON_BOX_MATCH0", "CBoFilter1": "Cn_MSR_PMON_BOX_FILTER1", "UBoxFilter": "U_MSR_PMON_BOX_FILTER", } events = { # iMC: "iMC.ACT_COUNT": { "Box": "iMC", "Category": "iMC ACT Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.", "Desc": "DRAM Activate Count", "EvSel": 1, "ExtSel": "", }, "iMC.ACT_COUNT.BYP": { "Box": "iMC", "Category": "iMC ACT Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.", "Desc": "DRAM Activate Count", "EvSel": 1, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.ACT_COUNT.RD": { "Box": "iMC", "Category": "iMC ACT Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.", "Desc": "DRAM Activate Count", "EvSel": 1, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.ACT_COUNT.WR": { "Box": "iMC", "Category": "iMC ACT Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.", "Desc": "DRAM Activate Count", "EvSel": 1, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.BYP_CMDS": { "Box": "iMC", "Category": "iMC BYPASS Command Events", "Counters": "0-3", "EvSel": 161, "ExtSel": "", }, "iMC.BYP_CMDS.PRE": { "Box": "iMC", "Category": "iMC BYPASS Command Events", "Counters": "0-3", "EvSel": 161, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.BYP_CMDS.CAS": { "Box": "iMC", "Category": "iMC BYPASS Command Events", "Counters": "0-3", "EvSel": 161, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.BYP_CMDS.ACT": { "Box": "iMC", "Category": "iMC BYPASS Command Events", "Counters": "0-3", "EvSel": 161, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.CAS_COUNT": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", }, "iMC.CAS_COUNT.RD_UNDERFILL": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.CAS_COUNT.RD_RMM": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", "Umask": "bxx1xxxxx", }, "iMC.CAS_COUNT.RD_REG": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.CAS_COUNT.WR": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", "Umask": "b00001100", }, "iMC.CAS_COUNT.RD": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", "Umask": "b00000011", }, "iMC.CAS_COUNT.WR_RMM": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.CAS_COUNT.WR_WMM": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.CAS_COUNT.ALL": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", "Umask": "b00001111", }, "iMC.CAS_COUNT.RD_WMM": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", "Umask": "bxxx1xxxx", }, "iMC.DCLOCKTICKS": { "Box": "iMC", "Category": "iMC DCLK Events", "Counters": "0-3", "Desc": "DRAM Clockticks", "EvSel": 0, "ExtSel": "", }, "iMC.DRAM_PRE_ALL": { "Box": "iMC", "Category": "iMC DRAM_PRE_ALL Events", "Counters": "0-3", "Defn": "Counts the number of times that the precharge all command was sent.", "Desc": "DRAM Precharge All Commands", "EvSel": 6, "ExtSel": "", }, "iMC.DRAM_REFRESH": { "Box": "iMC", "Category": "iMC DRAM_REFRESH Events", "Counters": "0-3", "Defn": "Counts the number of refreshes issued.", "Desc": "Number of DRAM Refreshes Issued", "EvSel": 5, "ExtSel": "", }, "iMC.DRAM_REFRESH.HIGH": { "Box": "iMC", "Category": "iMC DRAM_REFRESH Events", "Counters": "0-3", "Defn": "Counts the number of refreshes issued.", "Desc": "Number of DRAM Refreshes Issued", "EvSel": 5, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.DRAM_REFRESH.PANIC": { "Box": "iMC", "Category": "iMC DRAM_REFRESH Events", "Counters": "0-3", "Defn": "Counts the number of refreshes issued.", "Desc": "Number of DRAM Refreshes Issued", "EvSel": 5, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.ECC_CORRECTABLE_ERRORS": { "Box": "iMC", "Category": "iMC ECC Events", "Counters": "0-3", "Defn": "Counts the number of ECC errors detected and corrected by the iMC on this channel. This counter is only useful with ECC DRAM devices. This count will increment one time for each correction regardless of the number of bits corrected. The iMC can correct up to 4 bit errors in independent channel mode and 8 bit erros in lockstep mode.", "Desc": "ECC Correctable Errors", "EvSel": 9, "ExtSel": "", }, "iMC.MAJOR_MODES": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Defn": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.", "Desc": "Cycles in a Major Mode", "EvSel": 7, "ExtSel": "", }, "iMC.MAJOR_MODES.ISOCH": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Defn": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.", "Desc": "Cycles in a Major Mode", "EvSel": 7, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.MAJOR_MODES.PARTIAL": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Defn": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.", "Desc": "Cycles in a Major Mode", "EvSel": 7, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.MAJOR_MODES.WRITE": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Defn": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.", "Desc": "Cycles in a Major Mode", "EvSel": 7, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.MAJOR_MODES.READ": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Defn": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.", "Desc": "Cycles in a Major Mode", "EvSel": 7, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.POWER_CHANNEL_DLLOFF": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles when all the ranks in the channel are in CKE Slow (DLLOFF) mode.", "Desc": "Channel DLLOFF Cycles", "EvSel": 132, "ExtSel": "", "Notes": "IBT = Input Buffer Termination = Off", }, "iMC.POWER_CHANNEL_PPD": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles when all the ranks in the channel are in PPD mode. If IBT=off is enabled, then this can be used to count those cycles. If it is not enabled, then this can count the number of cycles when that could have been taken advantage of.", "Desc": "Channel PPD Cycles", "EvSel": 133, "ExtSel": "", "MaxIncCyc": 4, "Notes": "IBT = Input Buffer Termination = On. ALL Ranks must be populated in order to measure", }, "iMC.POWER_CKE_CYCLES": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "ExtSel": "", "MaxIncCyc": 16, }, "iMC.POWER_CKE_CYCLES.RANK0": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "ExtSel": "", "MaxIncCyc": 16, "Umask": "b00000001", }, "iMC.POWER_CKE_CYCLES.RANK6": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "ExtSel": "", "MaxIncCyc": 16, "Umask": "b01000000", }, "iMC.POWER_CKE_CYCLES.RANK5": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "ExtSel": "", "MaxIncCyc": 16, "Umask": "b00100000", }, "iMC.POWER_CKE_CYCLES.RANK2": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "ExtSel": "", "MaxIncCyc": 16, "Umask": "b00000100", }, "iMC.POWER_CKE_CYCLES.RANK3": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "ExtSel": "", "MaxIncCyc": 16, "Umask": "b00001000", }, "iMC.POWER_CKE_CYCLES.RANK4": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "ExtSel": "", "MaxIncCyc": 16, "Umask": "b00010000", }, "iMC.POWER_CKE_CYCLES.RANK1": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "ExtSel": "", "MaxIncCyc": 16, "Umask": "b00000010", }, "iMC.POWER_CKE_CYCLES.RANK7": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "ExtSel": "", "MaxIncCyc": 16, "Umask": "b10000000", }, "iMC.POWER_CRITICAL_THROTTLE_CYCLES": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the iMC is in critical thermal throttling. When this happens, all traffic is blocked. This should be rare unless something bad is going on in the platform. There is no filtering by rank for this event.", "Desc": "Critical Throttle Cycles", "EvSel": 134, "ExtSel": "", }, "iMC.POWER_PCU_THROTTLING": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "EvSel": 66, "ExtSel": "", }, "iMC.POWER_SELF_REFRESH": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the iMC is in self-refresh and the iMC still has a clock. This happens in some package C-states. For example, the PCU may ask the iMC to enter self-refresh even though some of the cores are still processing. One use of this is for Monroe technology. Self-refresh is required during package C3 and C6, but there is no clock in the iMC at this time, so it is not possible to count these cases.", "Desc": "Clock-Enabled Self-Refresh", "EvSel": 67, "ExtSel": "", }, "iMC.POWER_THROTTLE_CYCLES": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 65, "ExtSel": "", }, "iMC.POWER_THROTTLE_CYCLES.RANK7": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 65, "ExtSel": "", "Umask": "b1xxxxxxx", }, "iMC.POWER_THROTTLE_CYCLES.RANK1": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 65, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.POWER_THROTTLE_CYCLES.RANK4": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 65, "ExtSel": "", "Umask": "bxxx1xxxx", }, "iMC.POWER_THROTTLE_CYCLES.RANK3": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 65, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.POWER_THROTTLE_CYCLES.RANK6": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 65, "ExtSel": "", "Umask": "bx1xxxxxx", }, "iMC.POWER_THROTTLE_CYCLES.RANK2": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 65, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.POWER_THROTTLE_CYCLES.RANK5": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 65, "ExtSel": "", "Umask": "bxx1xxxxx", }, "iMC.POWER_THROTTLE_CYCLES.RANK0": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 65, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.PREEMPTION": { "Box": "iMC", "Category": "iMC PREEMPTION Events", "Counters": "0-3", "Defn": "Counts the number of times a read in the iMC preempts another read or write. Generally reads to an open page are issued ahead of requests to closed pages. This improves the page hit rate of the system. However, high priority requests can cause pages of active requests to be closed in order to get them out. This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.", "Desc": "Read Preemption Count", "EvSel": 8, "ExtSel": "", }, "iMC.PREEMPTION.RD_PREEMPT_WR": { "Box": "iMC", "Category": "iMC PREEMPTION Events", "Counters": "0-3", "Defn": "Counts the number of times a read in the iMC preempts another read or write. Generally reads to an open page are issued ahead of requests to closed pages. This improves the page hit rate of the system. However, high priority requests can cause pages of active requests to be closed in order to get them out. This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.", "Desc": "Read Preemption Count", "EvSel": 8, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.PREEMPTION.RD_PREEMPT_RD": { "Box": "iMC", "Category": "iMC PREEMPTION Events", "Counters": "0-3", "Defn": "Counts the number of times a read in the iMC preempts another read or write. Generally reads to an open page are issued ahead of requests to closed pages. This improves the page hit rate of the system. However, high priority requests can cause pages of active requests to be closed in order to get them out. This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.", "Desc": "Read Preemption Count", "EvSel": 8, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.PRE_COUNT": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Precharge commands sent on this channel.", "Desc": "DRAM Precharge commands.", "EvSel": 2, "ExtSel": "", }, "iMC.PRE_COUNT.BYP": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Precharge commands sent on this channel.", "Desc": "DRAM Precharge commands.", "EvSel": 2, "ExtSel": "", "Umask": "bxxx1xxxx", }, "iMC.PRE_COUNT.PAGE_MISS": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Precharge commands sent on this channel.", "Desc": "DRAM Precharge commands.", "EvSel": 2, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.PRE_COUNT.RD": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Precharge commands sent on this channel.", "Desc": "DRAM Precharge commands.", "EvSel": 2, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.PRE_COUNT.PAGE_CLOSE": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Precharge commands sent on this channel.", "Desc": "DRAM Precharge commands.", "EvSel": 2, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.PRE_COUNT.WR": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Precharge commands sent on this channel.", "Desc": "DRAM Precharge commands.", "EvSel": 2, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.RD_CAS_PRIO": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "EvSel": 160, "ExtSel": "", }, "iMC.RD_CAS_PRIO.HIGH": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "EvSel": 160, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.RD_CAS_PRIO.PANIC": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "EvSel": 160, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.RD_CAS_PRIO.MED": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "EvSel": 160, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.RD_CAS_PRIO.LOW": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "EvSel": 160, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.RD_CAS_RANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", }, "iMC.RD_CAS_RANK0.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00001110", }, "iMC.RD_CAS_RANK0.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00000000", }, "iMC.RD_CAS_RANK0.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00001101", }, "iMC.RD_CAS_RANK0.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00010011", }, "iMC.RD_CAS_RANK0.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00000101", }, "iMC.RD_CAS_RANK0.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00010000", }, "iMC.RD_CAS_RANK0.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00000010", }, "iMC.RD_CAS_RANK0.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00001001", }, "iMC.RD_CAS_RANK0.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00001000", }, "iMC.RD_CAS_RANK0.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00000011", }, "iMC.RD_CAS_RANK0.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00000100", }, "iMC.RD_CAS_RANK0.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00001010", }, "iMC.RD_CAS_RANK0.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00000001", }, "iMC.RD_CAS_RANK0.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00010001", }, "iMC.RD_CAS_RANK0.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00000110", }, "iMC.RD_CAS_RANK0.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00010100", }, "iMC.RD_CAS_RANK0.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00001111", }, "iMC.RD_CAS_RANK0.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00000111", }, "iMC.RD_CAS_RANK0.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00001100", }, "iMC.RD_CAS_RANK0.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00001011", }, "iMC.RD_CAS_RANK0.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00010010", }, "iMC.RD_CAS_RANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", }, "iMC.RD_CAS_RANK1.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00000010", }, "iMC.RD_CAS_RANK1.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00010000", }, "iMC.RD_CAS_RANK1.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00001101", }, "iMC.RD_CAS_RANK1.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00010011", }, "iMC.RD_CAS_RANK1.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00000101", }, "iMC.RD_CAS_RANK1.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00000000", }, "iMC.RD_CAS_RANK1.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00001110", }, "iMC.RD_CAS_RANK1.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00001011", }, "iMC.RD_CAS_RANK1.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00010010", }, "iMC.RD_CAS_RANK1.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00001100", }, "iMC.RD_CAS_RANK1.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00000111", }, "iMC.RD_CAS_RANK1.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00000110", }, "iMC.RD_CAS_RANK1.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00010100", }, "iMC.RD_CAS_RANK1.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00001111", }, "iMC.RD_CAS_RANK1.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00010001", }, "iMC.RD_CAS_RANK1.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00001010", }, "iMC.RD_CAS_RANK1.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00000001", }, "iMC.RD_CAS_RANK1.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00000100", }, "iMC.RD_CAS_RANK1.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00000011", }, "iMC.RD_CAS_RANK1.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00001000", }, "iMC.RD_CAS_RANK1.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00001001", }, "iMC.RD_CAS_RANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 2", "EvSel": 178, "ExtSel": "", }, "iMC.RD_CAS_RANK2.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 2", "EvSel": 178, "ExtSel": "", "Umask": "b00000000", }, "iMC.RD_CAS_RANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", }, "iMC.RD_CAS_RANK4.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00000000", }, "iMC.RD_CAS_RANK4.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00001110", }, "iMC.RD_CAS_RANK4.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00010000", }, "iMC.RD_CAS_RANK4.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00001101", }, "iMC.RD_CAS_RANK4.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00010011", }, "iMC.RD_CAS_RANK4.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00000101", }, "iMC.RD_CAS_RANK4.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00000010", }, "iMC.RD_CAS_RANK4.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00000100", }, "iMC.RD_CAS_RANK4.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00000011", }, "iMC.RD_CAS_RANK4.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00001001", }, "iMC.RD_CAS_RANK4.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00001000", }, "iMC.RD_CAS_RANK4.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00010001", }, "iMC.RD_CAS_RANK4.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00001010", }, "iMC.RD_CAS_RANK4.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00000001", }, "iMC.RD_CAS_RANK4.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00000111", }, "iMC.RD_CAS_RANK4.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00000110", }, "iMC.RD_CAS_RANK4.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00010100", }, "iMC.RD_CAS_RANK4.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00001111", }, "iMC.RD_CAS_RANK4.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00001011", }, "iMC.RD_CAS_RANK4.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00010010", }, "iMC.RD_CAS_RANK4.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00001100", }, "iMC.RD_CAS_RANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", }, "iMC.RD_CAS_RANK5.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00000110", }, "iMC.RD_CAS_RANK5.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00010100", }, "iMC.RD_CAS_RANK5.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00001111", }, "iMC.RD_CAS_RANK5.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00000111", }, "iMC.RD_CAS_RANK5.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00001100", }, "iMC.RD_CAS_RANK5.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00001011", }, "iMC.RD_CAS_RANK5.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00010010", }, "iMC.RD_CAS_RANK5.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00001001", }, "iMC.RD_CAS_RANK5.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00001000", }, "iMC.RD_CAS_RANK5.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00000011", }, "iMC.RD_CAS_RANK5.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00000100", }, "iMC.RD_CAS_RANK5.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00001010", }, "iMC.RD_CAS_RANK5.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00000001", }, "iMC.RD_CAS_RANK5.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00010001", }, "iMC.RD_CAS_RANK5.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00000010", }, "iMC.RD_CAS_RANK5.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00001110", }, "iMC.RD_CAS_RANK5.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00000000", }, "iMC.RD_CAS_RANK5.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00010011", }, "iMC.RD_CAS_RANK5.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00001101", }, "iMC.RD_CAS_RANK5.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00000101", }, "iMC.RD_CAS_RANK5.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00010000", }, "iMC.RD_CAS_RANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", }, "iMC.RD_CAS_RANK6.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00010001", }, "iMC.RD_CAS_RANK6.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00000001", }, "iMC.RD_CAS_RANK6.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00001010", }, "iMC.RD_CAS_RANK6.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00000011", }, "iMC.RD_CAS_RANK6.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00000100", }, "iMC.RD_CAS_RANK6.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00001000", }, "iMC.RD_CAS_RANK6.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00001001", }, "iMC.RD_CAS_RANK6.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00010010", }, "iMC.RD_CAS_RANK6.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00001011", }, "iMC.RD_CAS_RANK6.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00001100", }, "iMC.RD_CAS_RANK6.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00000111", }, "iMC.RD_CAS_RANK6.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00010100", }, "iMC.RD_CAS_RANK6.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00001111", }, "iMC.RD_CAS_RANK6.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00000110", }, "iMC.RD_CAS_RANK6.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00010000", }, "iMC.RD_CAS_RANK6.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00010011", }, "iMC.RD_CAS_RANK6.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00001101", }, "iMC.RD_CAS_RANK6.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00000101", }, "iMC.RD_CAS_RANK6.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00000000", }, "iMC.RD_CAS_RANK6.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00001110", }, "iMC.RD_CAS_RANK6.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00000010", }, "iMC.RD_CAS_RANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", }, "iMC.RD_CAS_RANK7.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00000110", }, "iMC.RD_CAS_RANK7.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00010100", }, "iMC.RD_CAS_RANK7.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00001111", }, "iMC.RD_CAS_RANK7.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00000111", }, "iMC.RD_CAS_RANK7.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00001100", }, "iMC.RD_CAS_RANK7.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00001011", }, "iMC.RD_CAS_RANK7.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00010010", }, "iMC.RD_CAS_RANK7.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00001001", }, "iMC.RD_CAS_RANK7.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00001000", }, "iMC.RD_CAS_RANK7.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00000100", }, "iMC.RD_CAS_RANK7.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00000011", }, "iMC.RD_CAS_RANK7.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00001010", }, "iMC.RD_CAS_RANK7.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00000001", }, "iMC.RD_CAS_RANK7.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00010001", }, "iMC.RD_CAS_RANK7.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00000010", }, "iMC.RD_CAS_RANK7.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00001110", }, "iMC.RD_CAS_RANK7.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00000000", }, "iMC.RD_CAS_RANK7.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00001101", }, "iMC.RD_CAS_RANK7.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00010011", }, "iMC.RD_CAS_RANK7.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00000101", }, "iMC.RD_CAS_RANK7.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00010000", }, "iMC.RPQ_CYCLES_NE": { "Box": "iMC", "Category": "iMC RPQ Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the Read Pending Queue is not empty. This can then be used to calculate the average occupancy (in conjunction with the Read Pending Queue Occupancy count). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This filter is to be used in conjunction with the occupancy filter so that one can correctly track the average occupancies for schedulable entries and scheduled requests.", "Desc": "Read Pending Queue Not Empty", "EvSel": 17, "ExtSel": "", }, "iMC.RPQ_INSERTS": { "Box": "iMC", "Category": "iMC RPQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.", "Desc": "Read Pending Queue Allocations", "EvSel": 16, "ExtSel": "", }, "iMC.VMSE_MXB_WR_OCCUPANCY": { "Box": "iMC", "Category": "iMC VMSE Events", "Counters": "0-3", "Desc": "VMSE MXB write buffer occupancy", "EvSel": 145, "ExtSel": "", "MaxIncCyc": 32, "SubCtr": 1, }, "iMC.VMSE_WR_PUSH": { "Box": "iMC", "Category": "iMC VMSE Events", "Counters": "0-3", "Desc": "VMSE WR PUSH issued", "EvSel": 144, "ExtSel": "", }, "iMC.VMSE_WR_PUSH.WMM": { "Box": "iMC", "Category": "iMC VMSE Events", "Counters": "0-3", "Desc": "VMSE WR PUSH issued", "EvSel": 144, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.VMSE_WR_PUSH.RMM": { "Box": "iMC", "Category": "iMC VMSE Events", "Counters": "0-3", "Desc": "VMSE WR PUSH issued", "EvSel": 144, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.WMM_TO_RMM": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Desc": "Transition from WMM to RMM because of low threshold", "EvSel": 192, "ExtSel": "", }, "iMC.WMM_TO_RMM.LOW_THRESH": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Desc": "Transition from WMM to RMM because of low threshold", "EvSel": 192, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.WMM_TO_RMM.VMSE_RETRY": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Desc": "Transition from WMM to RMM because of low threshold", "EvSel": 192, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.WMM_TO_RMM.STARVE": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Desc": "Transition from WMM to RMM because of low threshold", "EvSel": 192, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.WPQ_CYCLES_FULL": { "Box": "iMC", "Category": "iMC WPQ Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the Write Pending Queue is full. When the WPQ is full, the HA will not be able to issue any additional read requests into the iMC. This count should be similar count in the HA which tracks the number of cycles that the HA has no WPQ credits, just somewhat smaller to account for the credit return overhead.", "Desc": "Write Pending Queue Full Cycles", "EvSel": 34, "ExtSel": "", }, "iMC.WPQ_CYCLES_NE": { "Box": "iMC", "Category": "iMC WPQ Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the Write Pending Queue is not empty. This can then be used to calculate the average queue occupancy (in conjunction with the WPQ Occupancy Accumulation count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have \"posted\" to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies.", "Desc": "Write Pending Queue Not Empty", "EvSel": 33, "ExtSel": "", }, "iMC.WPQ_READ_HIT": { "Box": "iMC", "Category": "iMC WPQ Events", "Counters": "0-3", "Defn": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.", "Desc": "Write Pending Queue CAM Match", "EvSel": 35, "ExtSel": "", }, "iMC.WPQ_WRITE_HIT": { "Box": "iMC", "Category": "iMC WPQ Events", "Counters": "0-3", "Defn": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.", "Desc": "Write Pending Queue CAM Match", "EvSel": 36, "ExtSel": "", }, "iMC.WRONG_MM": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Desc": "Not getting the requested Major Mode", "EvSel": 193, "ExtSel": "", }, "iMC.WR_CAS_RANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", }, "iMC.WR_CAS_RANK0.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00000001", }, "iMC.WR_CAS_RANK0.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00001010", }, "iMC.WR_CAS_RANK0.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00010001", }, "iMC.WR_CAS_RANK0.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00001001", }, "iMC.WR_CAS_RANK0.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00001000", }, "iMC.WR_CAS_RANK0.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00000100", }, "iMC.WR_CAS_RANK0.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00000011", }, "iMC.WR_CAS_RANK0.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00001100", }, "iMC.WR_CAS_RANK0.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00010010", }, "iMC.WR_CAS_RANK0.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00001011", }, "iMC.WR_CAS_RANK0.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00010100", }, "iMC.WR_CAS_RANK0.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00001111", }, "iMC.WR_CAS_RANK0.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00000110", }, "iMC.WR_CAS_RANK0.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00000111", }, "iMC.WR_CAS_RANK0.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00001101", }, "iMC.WR_CAS_RANK0.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00010011", }, "iMC.WR_CAS_RANK0.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00000101", }, "iMC.WR_CAS_RANK0.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00010000", }, "iMC.WR_CAS_RANK0.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00001110", }, "iMC.WR_CAS_RANK0.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00000000", }, "iMC.WR_CAS_RANK0.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00000010", }, "iMC.WR_CAS_RANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", }, "iMC.WR_CAS_RANK1.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00000010", }, "iMC.WR_CAS_RANK1.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00000000", }, "iMC.WR_CAS_RANK1.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00001110", }, "iMC.WR_CAS_RANK1.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00010000", }, "iMC.WR_CAS_RANK1.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00001101", }, "iMC.WR_CAS_RANK1.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00010011", }, "iMC.WR_CAS_RANK1.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00000101", }, "iMC.WR_CAS_RANK1.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00000111", }, "iMC.WR_CAS_RANK1.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00000110", }, "iMC.WR_CAS_RANK1.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00001111", }, "iMC.WR_CAS_RANK1.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00010100", }, "iMC.WR_CAS_RANK1.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00001011", }, "iMC.WR_CAS_RANK1.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00010010", }, "iMC.WR_CAS_RANK1.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00001100", }, "iMC.WR_CAS_RANK1.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00000100", }, "iMC.WR_CAS_RANK1.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00000011", }, "iMC.WR_CAS_RANK1.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00001000", }, "iMC.WR_CAS_RANK1.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00001001", }, "iMC.WR_CAS_RANK1.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00010001", }, "iMC.WR_CAS_RANK1.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00001010", }, "iMC.WR_CAS_RANK1.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00000001", }, "iMC.WR_CAS_RANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 2", "EvSel": 186, "ExtSel": "", }, "iMC.WR_CAS_RANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 3", "EvSel": 187, "ExtSel": "", }, "iMC.WR_CAS_RANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", }, "iMC.WR_CAS_RANK4.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00000111", }, "iMC.WR_CAS_RANK4.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00000110", }, "iMC.WR_CAS_RANK4.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00001111", }, "iMC.WR_CAS_RANK4.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00010100", }, "iMC.WR_CAS_RANK4.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00001011", }, "iMC.WR_CAS_RANK4.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00010010", }, "iMC.WR_CAS_RANK4.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00001100", }, "iMC.WR_CAS_RANK4.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00000011", }, "iMC.WR_CAS_RANK4.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00000100", }, "iMC.WR_CAS_RANK4.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00001000", }, "iMC.WR_CAS_RANK4.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00001001", }, "iMC.WR_CAS_RANK4.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00010001", }, "iMC.WR_CAS_RANK4.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00001010", }, "iMC.WR_CAS_RANK4.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00000001", }, "iMC.WR_CAS_RANK4.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00000010", }, "iMC.WR_CAS_RANK4.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00000000", }, "iMC.WR_CAS_RANK4.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00001110", }, "iMC.WR_CAS_RANK4.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00010000", }, "iMC.WR_CAS_RANK4.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00001101", }, "iMC.WR_CAS_RANK4.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00010011", }, "iMC.WR_CAS_RANK4.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00000101", }, "iMC.WR_CAS_RANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", }, "iMC.WR_CAS_RANK5.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00000001", }, "iMC.WR_CAS_RANK5.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00001010", }, "iMC.WR_CAS_RANK5.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00010001", }, "iMC.WR_CAS_RANK5.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00001001", }, "iMC.WR_CAS_RANK5.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00001000", }, "iMC.WR_CAS_RANK5.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00000100", }, "iMC.WR_CAS_RANK5.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00000011", }, "iMC.WR_CAS_RANK5.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00001100", }, "iMC.WR_CAS_RANK5.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00010010", }, "iMC.WR_CAS_RANK5.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00001011", }, "iMC.WR_CAS_RANK5.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00010100", }, "iMC.WR_CAS_RANK5.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00001111", }, "iMC.WR_CAS_RANK5.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00000110", }, "iMC.WR_CAS_RANK5.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00000111", }, "iMC.WR_CAS_RANK5.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00010011", }, "iMC.WR_CAS_RANK5.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00001101", }, "iMC.WR_CAS_RANK5.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00000101", }, "iMC.WR_CAS_RANK5.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00010000", }, "iMC.WR_CAS_RANK5.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00001110", }, "iMC.WR_CAS_RANK5.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00000000", }, "iMC.WR_CAS_RANK5.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00000010", }, "iMC.WR_CAS_RANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", }, "iMC.WR_CAS_RANK6.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00000000", }, "iMC.WR_CAS_RANK6.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00001110", }, "iMC.WR_CAS_RANK6.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00010000", }, "iMC.WR_CAS_RANK6.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00001101", }, "iMC.WR_CAS_RANK6.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00010011", }, "iMC.WR_CAS_RANK6.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00000101", }, "iMC.WR_CAS_RANK6.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00000010", }, "iMC.WR_CAS_RANK6.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00000100", }, "iMC.WR_CAS_RANK6.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00000011", }, "iMC.WR_CAS_RANK6.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00001001", }, "iMC.WR_CAS_RANK6.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00001000", }, "iMC.WR_CAS_RANK6.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00010001", }, "iMC.WR_CAS_RANK6.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00000001", }, "iMC.WR_CAS_RANK6.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00001010", }, "iMC.WR_CAS_RANK6.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00000111", }, "iMC.WR_CAS_RANK6.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00001111", }, "iMC.WR_CAS_RANK6.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00010100", }, "iMC.WR_CAS_RANK6.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00000110", }, "iMC.WR_CAS_RANK6.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00010010", }, "iMC.WR_CAS_RANK6.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00001011", }, "iMC.WR_CAS_RANK6.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00001100", }, "iMC.WR_CAS_RANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", }, "iMC.WR_CAS_RANK7.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00000010", }, "iMC.WR_CAS_RANK7.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00001110", }, "iMC.WR_CAS_RANK7.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00000000", }, "iMC.WR_CAS_RANK7.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00010011", }, "iMC.WR_CAS_RANK7.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00001101", }, "iMC.WR_CAS_RANK7.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00000101", }, "iMC.WR_CAS_RANK7.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00010000", }, "iMC.WR_CAS_RANK7.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00001111", }, "iMC.WR_CAS_RANK7.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00010100", }, "iMC.WR_CAS_RANK7.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00000110", }, "iMC.WR_CAS_RANK7.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00000111", }, "iMC.WR_CAS_RANK7.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00001100", }, "iMC.WR_CAS_RANK7.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00010010", }, "iMC.WR_CAS_RANK7.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00001011", }, "iMC.WR_CAS_RANK7.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00001001", }, "iMC.WR_CAS_RANK7.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00001000", }, "iMC.WR_CAS_RANK7.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00000100", }, "iMC.WR_CAS_RANK7.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00000011", }, "iMC.WR_CAS_RANK7.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00000001", }, "iMC.WR_CAS_RANK7.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00001010", }, "iMC.WR_CAS_RANK7.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00010001", }, # UBOX: "UBOX.EVENT_MSG": { "Box": "UBOX", "Category": "UBOX EVENT_MSG Events", "Counters": "0-1", "Defn": "Virtual Logical Wire (legacy) message were received from Uncore. Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.", "Desc": "VLW Received", "EvSel": 66, "ExtSel": "", }, "UBOX.EVENT_MSG.DOORBELL_RCVD": { "Box": "UBOX", "Category": "UBOX EVENT_MSG Events", "Counters": "0-1", "Defn": "Virtual Logical Wire (legacy) message were received from Uncore. Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.", "Desc": "VLW Received", "EvSel": 66, "ExtSel": "", "Umask": "bxxxx1xxx", }, "UBOX.PHOLD_CYCLES": { "Box": "UBOX", "Category": "UBOX PHOLD Events", "Counters": "0-1", "Defn": "PHOLD cycles. Filter from source CoreID.", "Desc": "Cycles PHOLD Assert to Ack", "EvSel": 69, "ExtSel": "", }, "UBOX.PHOLD_CYCLES.ASSERT_TO_ACK": { "Box": "UBOX", "Category": "UBOX PHOLD Events", "Counters": "0-1", "Defn": "PHOLD cycles. Filter from source CoreID.", "Desc": "Cycles PHOLD Assert to Ack", "EvSel": 69, "ExtSel": "", "Umask": "bxxxxxxx1", }, "UBOX.RACU_REQUESTS": { "Box": "UBOX", "Category": "UBOX RACU Events", "Counters": "0-1", "Desc": "RACU Request", "EvSel": 70, "ExtSel": "", "Notes": "This will be dropped because PHOLD is not implemented this way", }, # SBO: "SBO.BOUNCE_CONTROL": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Desc": "Bounce Control", "EvSel": 10, "ExtSel": "", }, "SBO.CLOCKTICKS": { "Box": "SBO", "Category": "SBO UCLK Events", "Counters": "0-3", "Desc": "Uncore Clocks", "EvSel": 0, "ExtSel": "", }, "SBO.FAST_ASSERTED": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles either the local or incoming distress signals are asserted. Incoming distress includes up, dn and across.", "Desc": "FaST wire asserted", "EvSel": 9, "ExtSel": "", }, "SBO.RING_AD_USED": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", }, "SBO.RING_AD_USED.DOWN_ODD": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxx1xxx", }, "SBO.RING_AD_USED.UP_ODD": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxxxx1x", }, "SBO.RING_AD_USED.DOWN": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "b00001100", }, "SBO.RING_AD_USED.DOWN_EVEN": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxxx1xx", }, "SBO.RING_AD_USED.UP_EVEN": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxxxxx1", }, "SBO.RING_AD_USED.UP": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "b00000011", }, "SBO.RING_AK_USED": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", }, "SBO.RING_AK_USED.UP": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "b00000011", }, "SBO.RING_AK_USED.UP_EVEN": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxxxxx1", }, "SBO.RING_AK_USED.DOWN": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "b00001100", }, "SBO.RING_AK_USED.UP_ODD": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxxxx1x", }, "SBO.RING_AK_USED.DOWN_EVEN": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxxx1xx", }, "SBO.RING_AK_USED.DOWN_ODD": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxx1xxx", }, "SBO.RING_BL_USED": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", }, "SBO.RING_BL_USED.DOWN": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "b00001100", }, "SBO.RING_BL_USED.DOWN_EVEN": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxxx1xx", }, "SBO.RING_BL_USED.UP_ODD": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxxxx1x", }, "SBO.RING_BL_USED.DOWN_ODD": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxx1xxx", }, "SBO.RING_BL_USED.UP": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "b00000011", }, "SBO.RING_BL_USED.UP_EVEN": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxxxxx1", }, "SBO.RING_BOUNCES": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Desc": "Number of LLC responses that bounced on the Ring.", "EvSel": 5, "ExtSel": "", "MaxIncCyc": 2, }, "SBO.RING_BOUNCES.AD_CACHE": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Desc": "Number of LLC responses that bounced on the Ring.", "EvSel": 5, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxxxxx1", }, "SBO.RING_BOUNCES.BL_CORE": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Desc": "Number of LLC responses that bounced on the Ring.", "EvSel": 5, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxxx1xx", }, "SBO.RING_BOUNCES.IV_CORE": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Desc": "Number of LLC responses that bounced on the Ring.", "EvSel": 5, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxx1xxx", }, "SBO.RING_BOUNCES.AK_CORE": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Desc": "Number of LLC responses that bounced on the Ring.", "EvSel": 5, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxxxx1x", }, "SBO.RING_IV_USED": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. There is only 1 IV ring in HSX. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "BL Ring in Use", "EvSel": 30, "ExtSel": "", "Notes": "IV messages are split into two parts. In any cycle, a ring stop can see up to one (half-)packet moving in the UP direction and one (half-)packet moving in the DN direction.", }, "SBO.RING_IV_USED.UP": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. There is only 1 IV ring in HSX. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "BL Ring in Use", "EvSel": 30, "ExtSel": "", "Notes": "IV messages are split into two parts. In any cycle, a ring stop can see up to one (half-)packet moving in the UP direction and one (half-)packet moving in the DN direction.", "Umask": "b00000011", }, "SBO.RING_IV_USED.DN": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. There is only 1 IV ring in HSX. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "BL Ring in Use", "EvSel": 30, "ExtSel": "", "Notes": "IV messages are split into two parts. In any cycle, a ring stop can see up to one (half-)packet moving in the UP direction and one (half-)packet moving in the DN direction.", "Umask": "b00001100", }, "SBO.RxR_BYPASS": { "Box": "SBO", "Category": "SBO INGRESS Events", "Counters": "0-3", "Defn": "Bypass the Sbo Ingress.", "Desc": "Bypass", "EvSel": 18, "ExtSel": "", "MaxIncCyc": 12, "SubCtr": 1, }, "SBO.RxR_BYPASS.IV": { "Box": "SBO", "Category": "SBO INGRESS Events", "Counters": "0-3", "Defn": "Bypass the Sbo Ingress.", "Desc": "Bypass", "EvSel": 18, "ExtSel": "", "MaxIncCyc": 12, "SubCtr": 1, "Umask": "b00100000", }, "SBO.RxR_BYPASS.AD_BNC": { "Box": "SBO", "Category": "SBO INGRESS Events", "Counters": "0-3", "Defn": "Bypass the Sbo Ingress.", "Desc": "Bypass", "EvSel": 18, "ExtSel": "", "MaxIncCyc": 12, "SubCtr": 1, "Umask": "b00000010", }, "SBO.RxR_BYPASS.BL_CRD": { "Box": "SBO", "Category": "SBO INGRESS Events", "Counters": "0-3", "Defn": "Bypass the Sbo Ingress.", "Desc": "Bypass", "EvSel": 18, "ExtSel": "", "MaxIncCyc": 12, "SubCtr": 1, "Umask": "b00000100", }, "SBO.RxR_BYPASS.AD_CRD": { "Box": "SBO", "Category": "SBO INGRESS Events", "Counters": "0-3", "Defn": "Bypass the Sbo Ingress.", "Desc": "Bypass", "EvSel": 18, "ExtSel": "", "MaxIncCyc": 12, "SubCtr": 1, "Umask": "b00000001", }, "SBO.RxR_BYPASS.AK": { "Box": "SBO", "Category": "SBO INGRESS Events", "Counters": "0-3", "Defn": "Bypass the Sbo Ingress.", "Desc": "Bypass", "EvSel": 18, "ExtSel": "", "MaxIncCyc": 12, "SubCtr": 1, "Umask": "b00010000", }, "SBO.RxR_BYPASS.BL_BNC": { "Box": "SBO", "Category": "SBO INGRESS Events", "Counters": "0-3", "Defn": "Bypass the Sbo Ingress.", "Desc": "Bypass", "EvSel": 18, "ExtSel": "", "MaxIncCyc": 12, "SubCtr": 1, "Umask": "b00001000", }, "SBO.RxR_INSERTS": { "Box": "SBO", "Category": "SBO INGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Sbo Ingress The Ingress is used to queue up requests received from the ring.", "Desc": "Ingress Allocations", "EvSel": 19, "ExtSel": "", }, "SBO.RxR_INSERTS.IV": { "Box": "SBO", "Category": "SBO INGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Sbo Ingress The Ingress is used to queue up requests received from the ring.", "Desc": "Ingress Allocations", "EvSel": 19, "ExtSel": "", "Umask": "bxx1xxxxx", }, "SBO.RxR_INSERTS.AD_BNC": { "Box": "SBO", "Category": "SBO INGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Sbo Ingress The Ingress is used to queue up requests received from the ring.", "Desc": "Ingress Allocations", "EvSel": 19, "ExtSel": "", "Umask": "bxxxxxx1x", }, "SBO.RxR_INSERTS.BL_CRD": { "Box": "SBO", "Category": "SBO INGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Sbo Ingress The Ingress is used to queue up requests received from the ring.", "Desc": "Ingress Allocations", "EvSel": 19, "ExtSel": "", "Umask": "bxxxxx1xx", }, "SBO.RxR_INSERTS.AK": { "Box": "SBO", "Category": "SBO INGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Sbo Ingress The Ingress is used to queue up requests received from the ring.", "Desc": "Ingress Allocations", "EvSel": 19, "ExtSel": "", "Umask": "bxxx1xxxx", }, "SBO.RxR_INSERTS.BL_BNC": { "Box": "SBO", "Category": "SBO INGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Sbo Ingress The Ingress is used to queue up requests received from the ring.", "Desc": "Ingress Allocations", "EvSel": 19, "ExtSel": "", "Umask": "bxxxx1xxx", }, "SBO.RxR_INSERTS.AD_CRD": { "Box": "SBO", "Category": "SBO INGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Sbo Ingress The Ingress is used to queue up requests received from the ring.", "Desc": "Ingress Allocations", "EvSel": 19, "ExtSel": "", "Umask": "bxxxxxxx1", }, "SBO.RxR_OCCUPANCY": { "Box": "SBO", "Category": "SBO INGRESS Events", "Counters": "0-3", "Defn": "Occupancy event for the Ingress buffers in the Sbo. The Ingress is used to queue up requests received from the ring.", "Desc": "Ingress Occupancy", "EvSel": 17, "ExtSel": "", "MaxIncCyc": 12, "SubCtr": 1, }, "SBO.RxR_OCCUPANCY.AK": { "Box": "SBO", "Category": "SBO INGRESS Events", "Counters": "0-3", "Defn": "Occupancy event for the Ingress buffers in the Sbo. The Ingress is used to queue up requests received from the ring.", "Desc": "Ingress Occupancy", "EvSel": 17, "ExtSel": "", "MaxIncCyc": 12, "SubCtr": 1, "Umask": "b00010000", }, "SBO.RxR_OCCUPANCY.AD_CRD": { "Box": "SBO", "Category": "SBO INGRESS Events", "Counters": "0-3", "Defn": "Occupancy event for the Ingress buffers in the Sbo. The Ingress is used to queue up requests received from the ring.", "Desc": "Ingress Occupancy", "EvSel": 17, "ExtSel": "", "MaxIncCyc": 12, "SubCtr": 1, "Umask": "b00000001", }, "SBO.RxR_OCCUPANCY.BL_BNC": { "Box": "SBO", "Category": "SBO INGRESS Events", "Counters": "0-3", "Defn": "Occupancy event for the Ingress buffers in the Sbo. The Ingress is used to queue up requests received from the ring.", "Desc": "Ingress Occupancy", "EvSel": 17, "ExtSel": "", "MaxIncCyc": 12, "SubCtr": 1, "Umask": "b00001000", }, "SBO.RxR_OCCUPANCY.BL_CRD": { "Box": "SBO", "Category": "SBO INGRESS Events", "Counters": "0-3", "Defn": "Occupancy event for the Ingress buffers in the Sbo. The Ingress is used to queue up requests received from the ring.", "Desc": "Ingress Occupancy", "EvSel": 17, "ExtSel": "", "MaxIncCyc": 12, "SubCtr": 1, "Umask": "b00000100", }, "SBO.RxR_OCCUPANCY.AD_BNC": { "Box": "SBO", "Category": "SBO INGRESS Events", "Counters": "0-3", "Defn": "Occupancy event for the Ingress buffers in the Sbo. The Ingress is used to queue up requests received from the ring.", "Desc": "Ingress Occupancy", "EvSel": 17, "ExtSel": "", "MaxIncCyc": 12, "SubCtr": 1, "Umask": "b00000010", }, "SBO.RxR_OCCUPANCY.IV": { "Box": "SBO", "Category": "SBO INGRESS Events", "Counters": "0-3", "Defn": "Occupancy event for the Ingress buffers in the Sbo. The Ingress is used to queue up requests received from the ring.", "Desc": "Ingress Occupancy", "EvSel": 17, "ExtSel": "", "MaxIncCyc": 12, "SubCtr": 1, "Umask": "b00100000", }, "SBO.TxR_ADS_USED": { "Box": "SBO", "Category": "SBO EGRESS Events", "Counters": "0-3", "EvSel": 4, "ExtSel": "", }, "SBO.TxR_ADS_USED.AD": { "Box": "SBO", "Category": "SBO EGRESS Events", "Counters": "0-3", "EvSel": 4, "ExtSel": "", "Umask": "bxxxxxxx1", }, "SBO.TxR_ADS_USED.BL": { "Box": "SBO", "Category": "SBO EGRESS Events", "Counters": "0-3", "EvSel": 4, "ExtSel": "", "Umask": "bxxxxx1xx", }, "SBO.TxR_ADS_USED.AK": { "Box": "SBO", "Category": "SBO EGRESS Events", "Counters": "0-3", "EvSel": 4, "ExtSel": "", "Umask": "bxxxxxx1x", }, "SBO.TxR_INSERTS": { "Box": "SBO", "Category": "SBO EGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Sbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "ExtSel": "", }, "SBO.TxR_INSERTS.AD_BNC": { "Box": "SBO", "Category": "SBO EGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Sbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "ExtSel": "", "Umask": "bxxxxxx1x", }, "SBO.TxR_INSERTS.IV": { "Box": "SBO", "Category": "SBO EGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Sbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "ExtSel": "", "Umask": "bxx1xxxxx", }, "SBO.TxR_INSERTS.BL_CRD": { "Box": "SBO", "Category": "SBO EGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Sbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "ExtSel": "", "Umask": "bxxxxx1xx", }, "SBO.TxR_INSERTS.AK": { "Box": "SBO", "Category": "SBO EGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Sbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "ExtSel": "", "Umask": "bxxx1xxxx", }, "SBO.TxR_INSERTS.AD_CRD": { "Box": "SBO", "Category": "SBO EGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Sbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "ExtSel": "", "Umask": "bxxxxxxx1", }, "SBO.TxR_INSERTS.BL_BNC": { "Box": "SBO", "Category": "SBO EGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Sbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "ExtSel": "", "Umask": "bxxxx1xxx", }, "SBO.TxR_OCCUPANCY": { "Box": "SBO", "Category": "SBO EGRESS Events", "Counters": "0-3", "Defn": "Occupancy event for the Egress buffers in the Sbo. The egress is used to queue up requests destined for the ring.", "Desc": "Egress Occupancy", "EvSel": 1, "ExtSel": "", "MaxIncCyc": 12, "SubCtr": 1, }, "SBO.TxR_OCCUPANCY.BL_BNC": { "Box": "SBO", "Category": "SBO EGRESS Events", "Counters": "0-3", "Defn": "Occupancy event for the Egress buffers in the Sbo. The egress is used to queue up requests destined for the ring.", "Desc": "Egress Occupancy", "EvSel": 1, "ExtSel": "", "MaxIncCyc": 12, "SubCtr": 1, "Umask": "b00001000", }, "SBO.TxR_OCCUPANCY.AK": { "Box": "SBO", "Category": "SBO EGRESS Events", "Counters": "0-3", "Defn": "Occupancy event for the Egress buffers in the Sbo. The egress is used to queue up requests destined for the ring.", "Desc": "Egress Occupancy", "EvSel": 1, "ExtSel": "", "MaxIncCyc": 12, "SubCtr": 1, "Umask": "b00010000", }, "SBO.TxR_OCCUPANCY.AD_CRD": { "Box": "SBO", "Category": "SBO EGRESS Events", "Counters": "0-3", "Defn": "Occupancy event for the Egress buffers in the Sbo. The egress is used to queue up requests destined for the ring.", "Desc": "Egress Occupancy", "EvSel": 1, "ExtSel": "", "MaxIncCyc": 12, "SubCtr": 1, "Umask": "b00000001", }, "SBO.TxR_OCCUPANCY.IV": { "Box": "SBO", "Category": "SBO EGRESS Events", "Counters": "0-3", "Defn": "Occupancy event for the Egress buffers in the Sbo. The egress is used to queue up requests destined for the ring.", "Desc": "Egress Occupancy", "EvSel": 1, "ExtSel": "", "MaxIncCyc": 12, "SubCtr": 1, "Umask": "b00100000", }, "SBO.TxR_OCCUPANCY.AD_BNC": { "Box": "SBO", "Category": "SBO EGRESS Events", "Counters": "0-3", "Defn": "Occupancy event for the Egress buffers in the Sbo. The egress is used to queue up requests destined for the ring.", "Desc": "Egress Occupancy", "EvSel": 1, "ExtSel": "", "MaxIncCyc": 12, "SubCtr": 1, "Umask": "b00000010", }, "SBO.TxR_OCCUPANCY.BL_CRD": { "Box": "SBO", "Category": "SBO EGRESS Events", "Counters": "0-3", "Defn": "Occupancy event for the Egress buffers in the Sbo. The egress is used to queue up requests destined for the ring.", "Desc": "Egress Occupancy", "EvSel": 1, "ExtSel": "", "MaxIncCyc": 12, "SubCtr": 1, "Umask": "b00000100", }, # HA: "HA.ADDR_OPC_MATCH": { "Box": "HA", "Category": "HA ADDR_OPCODE_MATCH Events", "Counters": "0-3", "Desc": "QPI Address/Opcode Match", "EvSel": 32, "ExtSel": "", }, "HA.ADDR_OPC_MATCH.AD": { "Box": "HA", "Category": "HA ADDR_OPCODE_MATCH Events", "Counters": "0-3", "Desc": "QPI Address/Opcode Match", "EvSel": 32, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.ADDR_OPC_MATCH.ADDR": { "Box": "HA", "Category": "HA ADDR_OPCODE_MATCH Events", "Counters": "0-3", "Desc": "QPI Address/Opcode Match", "EvSel": 32, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.ADDR_OPC_MATCH.AK": { "Box": "HA", "Category": "HA ADDR_OPCODE_MATCH Events", "Counters": "0-3", "Desc": "QPI Address/Opcode Match", "EvSel": 32, "ExtSel": "", "Umask": "bxxx1xxxx", }, "HA.ADDR_OPC_MATCH.BL": { "Box": "HA", "Category": "HA ADDR_OPCODE_MATCH Events", "Counters": "0-3", "Desc": "QPI Address/Opcode Match", "EvSel": 32, "ExtSel": "", "Umask": "bxxxx1xxx", }, "HA.ADDR_OPC_MATCH.OPC": { "Box": "HA", "Category": "HA ADDR_OPCODE_MATCH Events", "Counters": "0-3", "Desc": "QPI Address/Opcode Match", "EvSel": 32, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.ADDR_OPC_MATCH.FILT": { "Box": "HA", "Category": "HA ADDR_OPCODE_MATCH Events", "Counters": "0-3", "Desc": "QPI Address/Opcode Match", "EvSel": 32, "ExtSel": "", "Umask": "b00000011", }, "HA.BT_CYCLES_NE": { "Box": "HA", "Category": "HA BT (Backup Tracker) Events", "Counters": "0-3", "Defn": "Cycles the Backup Tracker (BT) is not empty. The BT is the actual HOM tracker in IVT.", "Desc": "BT Cycles Not Empty", "EvSel": 66, "ExtSel": "", "Notes": "Will not count case HT is empty and a Bypass happens.", }, "HA.BT_OCCUPANCY": { "Box": "HA", "Category": "HA BT (Backup Tracker) Events", "Counters": "0-3", "Defn": "Accumulates the occupancy of the HA BT pool in every cycle. This can be used with the \"not empty\" stat to calculate average queue occupancy or the \"allocations\" stat in order to calculate average queue latency. HA BTs are allocated as soon as a request enters the HA and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "BT Occupancy", "EvSel": 67, "ExtSel": "", "MaxIncCyc": 512, }, "HA.BYPASS_IMC": { "Box": "HA", "Category": "HA BYPASS Events", "Counters": "0-3", "Defn": "Counts the number of times when the HA was able to bypass was attempted. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filted by when the bypass was taken and when it was not.", "Desc": "HA to iMC Bypass", "EvSel": 20, "ExtSel": "", "Notes": "Only read transactions use iMC bypass", }, "HA.BYPASS_IMC.NOT_TAKEN": { "Box": "HA", "Category": "HA BYPASS Events", "Counters": "0-3", "Defn": "Counts the number of times when the HA was able to bypass was attempted. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filted by when the bypass was taken and when it was not.", "Desc": "HA to iMC Bypass", "EvSel": 20, "ExtSel": "", "Notes": "Only read transactions use iMC bypass", "Umask": "bxxxxxx1x", }, "HA.BYPASS_IMC.TAKEN": { "Box": "HA", "Category": "HA BYPASS Events", "Counters": "0-3", "Defn": "Counts the number of times when the HA was able to bypass was attempted. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filted by when the bypass was taken and when it was not.", "Desc": "HA to iMC Bypass", "EvSel": 20, "ExtSel": "", "Notes": "Only read transactions use iMC bypass", "Umask": "bxxxxxxx1", }, "HA.CLOCKTICKS": { "Box": "HA", "Category": "HA UCLK Events", "Counters": "0-3", "Defn": "Counts the number of uclks in the HA. This will be slightly different than the count in the Ubox because of enable/freeze delays. The HA is on the other side of the die from the fixed Ubox uclk counter, so the drift could be somewhat larger than in units that are closer like the QPI Agent.", "Desc": "uclks", "EvSel": 0, "ExtSel": "", }, "HA.CONFLICT_CYCLES": { "Box": "HA", "Category": "HA CONFLICTS Events", "Counters": 1, "Defn": "Counters the number of cycles there was a conflict in the HA because threads in two different sockets were requesting the same address at the same time", "Desc": "Conflict Checks", "EvSel": 11, "Filter": "N", "ExtSel": "", }, "HA.DIRECT2CORE_COUNT": { "Box": "HA", "Category": "HA DIRECT2CORE Events", "Counters": "0-3", "Defn": "Number of Direct2Core messages sent", "Desc": "Direct2Core Messages Sent", "EvSel": 17, "ExtSel": "", "Notes": "Will not be implemented since OUTBOUND_TX_BL:0x1 will count DRS to CORE which is effectively the same thing as D2C count", }, "HA.DIRECT2CORE_CYCLES_DISABLED": { "Box": "HA", "Category": "HA DIRECT2CORE Events", "Counters": "0-3", "Defn": "Number of cycles in which Direct2Core was disabled", "Desc": "Cycles when Direct2Core was Disabled", "EvSel": 18, "ExtSel": "", }, "HA.DIRECT2CORE_TXN_OVERRIDE": { "Box": "HA", "Category": "HA DIRECT2CORE Events", "Counters": "0-3", "Defn": "Number of Reads where Direct2Core overridden", "Desc": "Number of Reads that had Direct2Core Overridden", "EvSel": 19, "ExtSel": "", }, "HA.DIRECTORY_LAT_OPT": { "Box": "HA", "Category": "HA DIRECTORY Events", "Counters": "0-3", "Defn": "Directory Latency Optimization Data Return Path Taken. When directory mode is enabled and the directory retuned for a read is Dir=I, then data can be returned using a faster path if certain conditions are met (credits, free pipeline, etc).", "Desc": "Directory Lat Opt Return", "EvSel": 65, "ExtSel": "", }, "HA.DIRECTORY_LOOKUP": { "Box": "HA", "Category": "HA DIRECTORY Events", "Counters": "0-3", "Defn": "Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to.", "Desc": "Directory Lookups", "EvSel": 12, "ExtSel": "", "Notes": "Only valid for parts that implement the Directory", }, "HA.DIRECTORY_LOOKUP.NO_SNP": { "Box": "HA", "Category": "HA DIRECTORY Events", "Counters": "0-3", "Defn": "Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to.", "Desc": "Directory Lookups", "EvSel": 12, "ExtSel": "", "Notes": "Only valid for parts that implement the Directory", "Umask": "bxxxxxx1x", }, "HA.DIRECTORY_LOOKUP.SNP": { "Box": "HA", "Category": "HA DIRECTORY Events", "Counters": "0-3", "Defn": "Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to.", "Desc": "Directory Lookups", "EvSel": 12, "ExtSel": "", "Notes": "Only valid for parts that implement the Directory", "Umask": "bxxxxxxx1", }, "HA.DIRECTORY_UPDATE": { "Box": "HA", "Category": "HA DIRECTORY Events", "Counters": "0-3", "Defn": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.", "Desc": "Directory Updates", "EvSel": 13, "ExtSel": "", "Notes": "Only valid for parts that implement the Directory", }, "HA.DIRECTORY_UPDATE.ANY": { "Box": "HA", "Category": "HA DIRECTORY Events", "Counters": "0-3", "Defn": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.", "Desc": "Directory Updates", "EvSel": 13, "ExtSel": "", "Notes": "Only valid for parts that implement the Directory", "Umask": "bxxxxxx11", }, "HA.DIRECTORY_UPDATE.SET": { "Box": "HA", "Category": "HA DIRECTORY Events", "Counters": "0-3", "Defn": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.", "Desc": "Directory Updates", "EvSel": 13, "ExtSel": "", "Notes": "Only valid for parts that implement the Directory", "Umask": "bxxxxxxx1", }, "HA.DIRECTORY_UPDATE.CLEAR": { "Box": "HA", "Category": "HA DIRECTORY Events", "Counters": "0-3", "Defn": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.", "Desc": "Directory Updates", "EvSel": 13, "ExtSel": "", "Notes": "Only valid for parts that implement the Directory", "Umask": "bxxxxxx1x", }, "HA.HITME_HIT": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 113, "ExtSel": "", }, "HA.HITME_HIT.RSPFWDI_REMOTE": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 113, "ExtSel": "", "Umask": "bxxx1xxxx", }, "HA.HITME_HIT.EVICTS": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 113, "ExtSel": "", "Umask": "b01000010", }, "HA.HITME_HIT.RSPFWDS": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 113, "ExtSel": "", "Umask": "bx1xxxxxx", }, "HA.HITME_HIT.READ_OR_INVITOE": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 113, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.HITME_HIT.INVALS": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 113, "ExtSel": "", "Umask": "b00100110", }, "HA.HITME_HIT.ALL": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 113, "ExtSel": "", "Umask": "b11111111", }, "HA.HITME_HIT.HOM": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 113, "ExtSel": "", "Umask": "b00001111", }, "HA.HITME_HIT.RSP": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 113, "ExtSel": "", "Umask": "b1xxxxxxx", }, "HA.HITME_HIT.WBMTOI": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 113, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.HITME_HIT.ALLOCS": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 113, "ExtSel": "", "Umask": "b01110000", }, "HA.HITME_HIT.WBMTOE_OR_S": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 113, "ExtSel": "", "Umask": "bxxxx1xxx", }, "HA.HITME_HIT.ACKCNFLTWBI": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 113, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.HITME_HIT.RSPFWDI_LOCAL": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 113, "ExtSel": "", "Umask": "bxx1xxxxx", }, "HA.HITME_HIT_PV_BITS_SET": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Accumulates Number of PV bits set on HitMe Cache Hits", "EvSel": 114, "ExtSel": "", }, "HA.HITME_HIT_PV_BITS_SET.RSP": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Accumulates Number of PV bits set on HitMe Cache Hits", "EvSel": 114, "ExtSel": "", "Umask": "b1xxxxxxx", }, "HA.HITME_HIT_PV_BITS_SET.HOM": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Accumulates Number of PV bits set on HitMe Cache Hits", "EvSel": 114, "ExtSel": "", "Umask": "b00001111", }, "HA.HITME_HIT_PV_BITS_SET.ALL": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Accumulates Number of PV bits set on HitMe Cache Hits", "EvSel": 114, "ExtSel": "", "Umask": "b11111111", }, "HA.HITME_HIT_PV_BITS_SET.READ_OR_INVITOE": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Accumulates Number of PV bits set on HitMe Cache Hits", "EvSel": 114, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.HITME_HIT_PV_BITS_SET.RSPFWDS": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Accumulates Number of PV bits set on HitMe Cache Hits", "EvSel": 114, "ExtSel": "", "Umask": "bx1xxxxxx", }, "HA.HITME_HIT_PV_BITS_SET.RSPFWDI_REMOTE": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Accumulates Number of PV bits set on HitMe Cache Hits", "EvSel": 114, "ExtSel": "", "Umask": "bxxx1xxxx", }, "HA.HITME_HIT_PV_BITS_SET.RSPFWDI_LOCAL": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Accumulates Number of PV bits set on HitMe Cache Hits", "EvSel": 114, "ExtSel": "", "Umask": "bxx1xxxxx", }, "HA.HITME_HIT_PV_BITS_SET.ACKCNFLTWBI": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Accumulates Number of PV bits set on HitMe Cache Hits", "EvSel": 114, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.HITME_HIT_PV_BITS_SET.WBMTOE_OR_S": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Accumulates Number of PV bits set on HitMe Cache Hits", "EvSel": 114, "ExtSel": "", "Umask": "bxxxx1xxx", }, "HA.HITME_HIT_PV_BITS_SET.WBMTOI": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Accumulates Number of PV bits set on HitMe Cache Hits", "EvSel": 114, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.HITME_LOOKUP": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of times HitMe Cache is accessed", "EvSel": 112, "ExtSel": "", }, "HA.HITME_LOOKUP.ACKCNFLTWBI": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of times HitMe Cache is accessed", "EvSel": 112, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.HITME_LOOKUP.RSPFWDI_LOCAL": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of times HitMe Cache is accessed", "EvSel": 112, "ExtSel": "", "Umask": "bxx1xxxxx", }, "HA.HITME_LOOKUP.ALLOCS": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of times HitMe Cache is accessed", "EvSel": 112, "ExtSel": "", "Umask": "b01110000", }, "HA.HITME_LOOKUP.WBMTOI": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of times HitMe Cache is accessed", "EvSel": 112, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.HITME_LOOKUP.WBMTOE_OR_S": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of times HitMe Cache is accessed", "EvSel": 112, "ExtSel": "", "Umask": "bxxxx1xxx", }, "HA.HITME_LOOKUP.INVALS": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of times HitMe Cache is accessed", "EvSel": 112, "ExtSel": "", "Umask": "b00100110", }, "HA.HITME_LOOKUP.ALL": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of times HitMe Cache is accessed", "EvSel": 112, "ExtSel": "", "Umask": "b11111111", }, "HA.HITME_LOOKUP.RSPFWDS": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of times HitMe Cache is accessed", "EvSel": 112, "ExtSel": "", "Umask": "bx1xxxxxx", }, "HA.HITME_LOOKUP.READ_OR_INVITOE": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of times HitMe Cache is accessed", "EvSel": 112, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.HITME_LOOKUP.RSP": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of times HitMe Cache is accessed", "EvSel": 112, "ExtSel": "", "Umask": "b1xxxxxxx", }, "HA.HITME_LOOKUP.HOM": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of times HitMe Cache is accessed", "EvSel": 112, "ExtSel": "", "Umask": "b00001111", }, "HA.HITME_LOOKUP.RSPFWDI_REMOTE": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of times HitMe Cache is accessed", "EvSel": 112, "ExtSel": "", "Umask": "bxxx1xxxx", }, "HA.IGR_NO_CREDIT_CYCLES": { "Box": "HA", "Category": "HA QPI_IGR_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.", "Desc": "Cycles without QPI Ingress Credits", "EvSel": 34, "ExtSel": "", }, "HA.IGR_NO_CREDIT_CYCLES.BL_QPI1": { "Box": "HA", "Category": "HA QPI_IGR_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.", "Desc": "Cycles without QPI Ingress Credits", "EvSel": 34, "ExtSel": "", "Umask": "bxxxx1xxx", }, "HA.IGR_NO_CREDIT_CYCLES.BL_QPI2": { "Box": "HA", "Category": "HA QPI_IGR_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.", "Desc": "Cycles without QPI Ingress Credits", "EvSel": 34, "ExtSel": "", "Umask": "bxx1xxxxx", }, "HA.IGR_NO_CREDIT_CYCLES.BL_QPI0": { "Box": "HA", "Category": "HA QPI_IGR_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.", "Desc": "Cycles without QPI Ingress Credits", "EvSel": 34, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.IGR_NO_CREDIT_CYCLES.AD_QPI2": { "Box": "HA", "Category": "HA QPI_IGR_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.", "Desc": "Cycles without QPI Ingress Credits", "EvSel": 34, "ExtSel": "", "Umask": "bxxx1xxxx", }, "HA.IGR_NO_CREDIT_CYCLES.AD_QPI1": { "Box": "HA", "Category": "HA QPI_IGR_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.", "Desc": "Cycles without QPI Ingress Credits", "EvSel": 34, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.IGR_NO_CREDIT_CYCLES.AD_QPI0": { "Box": "HA", "Category": "HA QPI_IGR_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.", "Desc": "Cycles without QPI Ingress Credits", "EvSel": 34, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.IMC_READS": { "Box": "HA", "Category": "HA IMC_READS Events", "Counters": "0-3", "Defn": "Count of the number of reads issued to any of the memory controller channels. This can be filtered by the priority of the reads.", "Desc": "HA to iMC Normal Priority Reads Issued", "EvSel": 23, "ExtSel": "", "MaxIncCyc": 4, "Notes": "Does not count reads using the bypass path. That is counted separately in HA_IMC.BYPASS", }, "HA.IMC_READS.NORMAL": { "Box": "HA", "Category": "HA IMC_READS Events", "Counters": "0-3", "Defn": "Count of the number of reads issued to any of the memory controller channels. This can be filtered by the priority of the reads.", "Desc": "HA to iMC Normal Priority Reads Issued", "EvSel": 23, "ExtSel": "", "MaxIncCyc": 4, "Notes": "Does not count reads using the bypass path. That is counted separately in HA_IMC.BYPASS", "Umask": "b00000001", }, "HA.IMC_RETRY": { "Box": "HA", "Category": "HA IMC_MISC Events", "Counters": "0-3", "Desc": "Retry Events", "EvSel": 30, "ExtSel": "", }, "HA.IMC_WRITES": { "Box": "HA", "Category": "HA IMC_WRITES Events", "Counters": "0-3", "Defn": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.", "Desc": "HA to iMC Full Line Writes Issued", "EvSel": 26, "ExtSel": "", }, "HA.IMC_WRITES.PARTIAL": { "Box": "HA", "Category": "HA IMC_WRITES Events", "Counters": "0-3", "Defn": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.", "Desc": "HA to iMC Full Line Writes Issued", "EvSel": 26, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.IMC_WRITES.ALL": { "Box": "HA", "Category": "HA IMC_WRITES Events", "Counters": "0-3", "Defn": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.", "Desc": "HA to iMC Full Line Writes Issued", "EvSel": 26, "ExtSel": "", "Umask": "b00001111", }, "HA.IMC_WRITES.FULL_ISOCH": { "Box": "HA", "Category": "HA IMC_WRITES Events", "Counters": "0-3", "Defn": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.", "Desc": "HA to iMC Full Line Writes Issued", "EvSel": 26, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.IMC_WRITES.PARTIAL_ISOCH": { "Box": "HA", "Category": "HA IMC_WRITES Events", "Counters": "0-3", "Defn": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.", "Desc": "HA to iMC Full Line Writes Issued", "EvSel": 26, "ExtSel": "", "Umask": "bxxxx1xxx", }, "HA.IMC_WRITES.FULL": { "Box": "HA", "Category": "HA IMC_WRITES Events", "Counters": "0-3", "Defn": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.", "Desc": "HA to iMC Full Line Writes Issued", "EvSel": 26, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.OSB": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.", "Desc": "OSB Snoop Broadcast", "EvSel": 83, "ExtSel": "", }, "HA.OSB.REMOTE": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.", "Desc": "OSB Snoop Broadcast", "EvSel": 83, "ExtSel": "", "Umask": "bxxxx1xxx", }, "HA.OSB.READS_LOCAL_USEFUL": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.", "Desc": "OSB Snoop Broadcast", "EvSel": 83, "ExtSel": "", "Umask": "bxx1xxxxx", }, "HA.OSB.REMOTE_USEFUL": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.", "Desc": "OSB Snoop Broadcast", "EvSel": 83, "ExtSel": "", "Umask": "bx1xxxxxx", }, "HA.OSB.READS_LOCAL": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.", "Desc": "OSB Snoop Broadcast", "EvSel": 83, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.OSB.CANCELLED": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.", "Desc": "OSB Snoop Broadcast", "EvSel": 83, "ExtSel": "", "Umask": "bxxx1xxxx", }, "HA.OSB.INVITOE_LOCAL": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.", "Desc": "OSB Snoop Broadcast", "EvSel": 83, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.OSB_EDR": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return", "Desc": "OSB Early Data Return", "EvSel": 84, "ExtSel": "", }, "HA.OSB_EDR.READS_LOCAL_I": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return", "Desc": "OSB Early Data Return", "EvSel": 84, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.OSB_EDR.READS_REMOTE_I": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return", "Desc": "OSB Early Data Return", "EvSel": 84, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.OSB_EDR.ALL": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return", "Desc": "OSB Early Data Return", "EvSel": 84, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.OSB_EDR.READS_LOCAL_S": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return", "Desc": "OSB Early Data Return", "EvSel": 84, "ExtSel": "", "Umask": "bxxxx1xxx", }, "HA.OSB_EDR.READS_REMOTE_S": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return", "Desc": "OSB Early Data Return", "EvSel": 84, "ExtSel": "", "Umask": "bxxx1xxxx", }, "HA.REQUESTS": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "Read and Write Requests", "EvSel": 1, "ExtSel": "", }, "HA.REQUESTS.READS_LOCAL": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "Read and Write Requests", "EvSel": 1, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.REQUESTS.INVITOE_LOCAL": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "Read and Write Requests", "EvSel": 1, "ExtSel": "", "Umask": "bxxx1xxxx", }, "HA.REQUESTS.INVITOE_REMOTE": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "Read and Write Requests", "EvSel": 1, "ExtSel": "", "Umask": "bxx1xxxxx", }, "HA.REQUESTS.WRITES": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "Read and Write Requests", "EvSel": 1, "ExtSel": "", "Umask": "b00001100", }, "HA.REQUESTS.READS": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "Read and Write Requests", "EvSel": 1, "ExtSel": "", "Umask": "b00000011", }, "HA.REQUESTS.WRITES_REMOTE": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "Read and Write Requests", "EvSel": 1, "ExtSel": "", "Umask": "bxxxx1xxx", }, "HA.REQUESTS.WRITES_LOCAL": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "Read and Write Requests", "EvSel": 1, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.REQUESTS.READS_REMOTE": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "Read and Write Requests", "EvSel": 1, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.RING_AD_USED": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AD Ring in Use", "EvSel": 62, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", }, "HA.RING_AD_USED.CCW_ODD": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AD Ring in Use", "EvSel": 62, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxx1xxx", }, "HA.RING_AD_USED.CCW": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AD Ring in Use", "EvSel": 62, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00001100", }, "HA.RING_AD_USED.CCW_EVEN": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AD Ring in Use", "EvSel": 62, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxx1xx", }, "HA.RING_AD_USED.CW_EVEN": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AD Ring in Use", "EvSel": 62, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxxx1", }, "HA.RING_AD_USED.CW": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AD Ring in Use", "EvSel": 62, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00000011", }, "HA.RING_AD_USED.CW_ODD": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AD Ring in Use", "EvSel": 62, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxx1x", }, "HA.RING_AK_USED": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AK Ring in Use", "EvSel": 63, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", }, "HA.RING_AK_USED.CCW_ODD": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AK Ring in Use", "EvSel": 63, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxx1xxx", }, "HA.RING_AK_USED.CCW": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AK Ring in Use", "EvSel": 63, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00001100", }, "HA.RING_AK_USED.CCW_EVEN": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AK Ring in Use", "EvSel": 63, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxx1xx", }, "HA.RING_AK_USED.CW": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AK Ring in Use", "EvSel": 63, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00000011", }, "HA.RING_AK_USED.CW_EVEN": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AK Ring in Use", "EvSel": 63, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxxx1", }, "HA.RING_AK_USED.CW_ODD": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AK Ring in Use", "EvSel": 63, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxx1x", }, "HA.RING_BL_USED": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA BL Ring in Use", "EvSel": 64, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", }, "HA.RING_BL_USED.CCW_EVEN": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA BL Ring in Use", "EvSel": 64, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxx1xx", }, "HA.RING_BL_USED.CW": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA BL Ring in Use", "EvSel": 64, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00000011", }, "HA.RING_BL_USED.CW_EVEN": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA BL Ring in Use", "EvSel": 64, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxxx1", }, "HA.RING_BL_USED.CW_ODD": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA BL Ring in Use", "EvSel": 64, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxx1x", }, "HA.RING_BL_USED.CCW_ODD": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA BL Ring in Use", "EvSel": 64, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxx1xxx", }, "HA.RING_BL_USED.CCW": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA BL Ring in Use", "EvSel": 64, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00001100", }, "HA.RPQ_CYCLES_NO_REG_CREDITS": { "Box": "HA", "Category": "HA RPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and \"special\" requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "iMC RPQ Credits Empty - Regular", "EvSel": 21, "ExtSel": "", "MaxIncCyc": 4, }, "HA.RPQ_CYCLES_NO_REG_CREDITS.CHN3": { "Box": "HA", "Category": "HA RPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and \"special\" requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "iMC RPQ Credits Empty - Regular", "EvSel": 21, "ExtSel": "", "MaxIncCyc": 4, "Umask": "b00001000", }, "HA.RPQ_CYCLES_NO_REG_CREDITS.CHN1": { "Box": "HA", "Category": "HA RPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and \"special\" requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "iMC RPQ Credits Empty - Regular", "EvSel": 21, "ExtSel": "", "MaxIncCyc": 4, "Umask": "b00000010", }, "HA.RPQ_CYCLES_NO_REG_CREDITS.CHN0": { "Box": "HA", "Category": "HA RPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and \"special\" requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "iMC RPQ Credits Empty - Regular", "EvSel": 21, "ExtSel": "", "MaxIncCyc": 4, "Umask": "b00000001", }, "HA.RPQ_CYCLES_NO_REG_CREDITS.CHN2": { "Box": "HA", "Category": "HA RPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and \"special\" requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "iMC RPQ Credits Empty - Regular", "EvSel": 21, "ExtSel": "", "MaxIncCyc": 4, "Umask": "b00000100", }, "HA.SBO0_CREDITS_ACQUIRED": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of Sbo 0 credits acquired in a given cycle, per ring.", "Desc": "SBo0 Credits Acquired", "EvSel": 104, "ExtSel": "", }, "HA.SBO0_CREDITS_ACQUIRED.AD": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of Sbo 0 credits acquired in a given cycle, per ring.", "Desc": "SBo0 Credits Acquired", "EvSel": 104, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.SBO0_CREDITS_ACQUIRED.BL": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of Sbo 0 credits acquired in a given cycle, per ring.", "Desc": "SBo0 Credits Acquired", "EvSel": 104, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.SBO0_CREDIT_OCCUPANCY": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of Sbo 0 credits in use in a given cycle, per ring.", "Desc": "SBo0 Credits Occupancy", "EvSel": 106, "ExtSel": "", }, "HA.SBO0_CREDIT_OCCUPANCY.BL": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of Sbo 0 credits in use in a given cycle, per ring.", "Desc": "SBo0 Credits Occupancy", "EvSel": 106, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.SBO0_CREDIT_OCCUPANCY.AD": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of Sbo 0 credits in use in a given cycle, per ring.", "Desc": "SBo0 Credits Occupancy", "EvSel": 106, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.SBO1_CREDITS_ACQUIRED": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of Sbo 1 credits acquired in a given cycle, per ring.", "Desc": "SBo1 Credits Acquired", "EvSel": 105, "ExtSel": "", }, "HA.SBO1_CREDITS_ACQUIRED.BL": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of Sbo 1 credits acquired in a given cycle, per ring.", "Desc": "SBo1 Credits Acquired", "EvSel": 105, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.SBO1_CREDITS_ACQUIRED.AD": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of Sbo 1 credits acquired in a given cycle, per ring.", "Desc": "SBo1 Credits Acquired", "EvSel": 105, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.SBO1_CREDIT_OCCUPANCY": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of Sbo 1 credits in use in a given cycle, per ring.", "Desc": "SBo1 Credits Occupancy", "EvSel": 107, "ExtSel": "", }, "HA.SBO1_CREDIT_OCCUPANCY.AD": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of Sbo 1 credits in use in a given cycle, per ring.", "Desc": "SBo1 Credits Occupancy", "EvSel": 107, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.SBO1_CREDIT_OCCUPANCY.BL": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of Sbo 1 credits in use in a given cycle, per ring.", "Desc": "SBo1 Credits Occupancy", "EvSel": 107, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.SNOOPS_RSP_AFTER_DATA": { "Box": "HA", "Category": "HA SNOOPS Events", "Counters": "0-3", "Defn": "Counts the number of reads when the snoop was on the critical path to the data return.", "Desc": "Data beat the Snoop Responses", "EvSel": 10, "ExtSel": "", "MaxIncCyc": 127, }, "HA.SNOOPS_RSP_AFTER_DATA.LOCAL": { "Box": "HA", "Category": "HA SNOOPS Events", "Counters": "0-3", "Defn": "Counts the number of reads when the snoop was on the critical path to the data return.", "Desc": "Data beat the Snoop Responses", "EvSel": 10, "ExtSel": "", "MaxIncCyc": 127, "Umask": "b00000001", }, "HA.SNOOPS_RSP_AFTER_DATA.REMOTE": { "Box": "HA", "Category": "HA SNOOPS Events", "Counters": "0-3", "Defn": "Counts the number of reads when the snoop was on the critical path to the data return.", "Desc": "Data beat the Snoop Responses", "EvSel": 10, "ExtSel": "", "MaxIncCyc": 127, "Umask": "b00000010", }, "HA.SNOOP_CYCLES_NE": { "Box": "HA", "Category": "HA SNOOPS Events", "Counters": "0-3", "Defn": "Counts cycles when one or more snoops are outstanding.", "Desc": "Cycles with Snoops Outstanding", "EvSel": 8, "ExtSel": "", }, "HA.SNOOP_CYCLES_NE.LOCAL": { "Box": "HA", "Category": "HA SNOOPS Events", "Counters": "0-3", "Defn": "Counts cycles when one or more snoops are outstanding.", "Desc": "Cycles with Snoops Outstanding", "EvSel": 8, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.SNOOP_CYCLES_NE.REMOTE": { "Box": "HA", "Category": "HA SNOOPS Events", "Counters": "0-3", "Defn": "Counts cycles when one or more snoops are outstanding.", "Desc": "Cycles with Snoops Outstanding", "EvSel": 8, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.SNOOP_CYCLES_NE.ALL": { "Box": "HA", "Category": "HA SNOOPS Events", "Counters": "0-3", "Defn": "Counts cycles when one or more snoops are outstanding.", "Desc": "Cycles with Snoops Outstanding", "EvSel": 8, "ExtSel": "", "Umask": "b00000011", }, "HA.SNOOP_OCCUPANCY": { "Box": "HA", "Category": "HA SNOOPS Events", "Counters": "0-3", "Defn": "Accumulates the occupancy of either the local HA tracker pool that have snoops pending in every cycle. This can be used in conjection with the \"not empty\" stat to calculate average queue occupancy or the \"allocations\" stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if an HT (HomeTracker) entry is available and this occupancy is decremented when all the snoop responses have returned.", "Desc": "Tracker Snoops Outstanding Accumulator", "EvSel": 9, "ExtSel": "", "MaxIncCyc": 127, "SubCtr": 1, }, "HA.SNOOP_OCCUPANCY.LOCAL": { "Box": "HA", "Category": "HA SNOOPS Events", "Counters": "0-3", "Defn": "Accumulates the occupancy of either the local HA tracker pool that have snoops pending in every cycle. This can be used in conjection with the \"not empty\" stat to calculate average queue occupancy or the \"allocations\" stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if an HT (HomeTracker) entry is available and this occupancy is decremented when all the snoop responses have returned.", "Desc": "Tracker Snoops Outstanding Accumulator", "EvSel": 9, "ExtSel": "", "MaxIncCyc": 127, "SubCtr": 1, "Umask": "b00000001", }, "HA.SNOOP_OCCUPANCY.REMOTE": { "Box": "HA", "Category": "HA SNOOPS Events", "Counters": "0-3", "Defn": "Accumulates the occupancy of either the local HA tracker pool that have snoops pending in every cycle. This can be used in conjection with the \"not empty\" stat to calculate average queue occupancy or the \"allocations\" stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if an HT (HomeTracker) entry is available and this occupancy is decremented when all the snoop responses have returned.", "Desc": "Tracker Snoops Outstanding Accumulator", "EvSel": 9, "ExtSel": "", "MaxIncCyc": 127, "SubCtr": 1, "Umask": "b00000010", }, "HA.SNOOP_RESP": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 33, "ExtSel": "", }, "HA.SNOOP_RESP.RSPI": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 33, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.SNOOP_RESP.RSPS": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 33, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.SNOOP_RESP.RSPSFWD": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 33, "ExtSel": "", "Umask": "bxxxx1xxx", }, "HA.SNOOP_RESP.RSPCNFLCT": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 33, "ExtSel": "", "Umask": "bx1xxxxxx", }, "HA.SNOOP_RESP.RSPIFWD": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 33, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.SNOOP_RESP.RSP_FWD_WB": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 33, "ExtSel": "", "Umask": "bxx1xxxxx", }, "HA.SNOOP_RESP.RSP_WB": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 33, "ExtSel": "", "Umask": "bxxx1xxxx", }, "HA.SNP_RESP_RECV_LOCAL": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 96, "ExtSel": "", }, "HA.SNP_RESP_RECV_LOCAL.RSPxFWDxWB": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 96, "ExtSel": "", "Umask": "bxx1xxxxx", }, "HA.SNP_RESP_RECV_LOCAL.RSPIFWD": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 96, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.SNP_RESP_RECV_LOCAL.RSPCNFLCT": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 96, "ExtSel": "", "Umask": "bx1xxxxxx", }, "HA.SNP_RESP_RECV_LOCAL.OTHER": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 96, "ExtSel": "", "Umask": "b1xxxxxxx", }, "HA.SNP_RESP_RECV_LOCAL.RSPSFWD": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 96, "ExtSel": "", "Umask": "bxxxx1xxx", }, "HA.SNP_RESP_RECV_LOCAL.RSPxWB": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 96, "ExtSel": "", "Umask": "bxxx1xxxx", }, "HA.SNP_RESP_RECV_LOCAL.RSPS": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 96, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.SNP_RESP_RECV_LOCAL.RSPI": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 96, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.STALL_NO_SBO_CREDIT": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.", "Desc": "Stall on No Sbo Credits", "EvSel": 108, "ExtSel": "", }, "HA.STALL_NO_SBO_CREDIT.SBO0_BL": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.", "Desc": "Stall on No Sbo Credits", "EvSel": 108, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.STALL_NO_SBO_CREDIT.SBO1_BL": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.", "Desc": "Stall on No Sbo Credits", "EvSel": 108, "ExtSel": "", "Umask": "bxxxx1xxx", }, "HA.STALL_NO_SBO_CREDIT.SBO1_AD": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.", "Desc": "Stall on No Sbo Credits", "EvSel": 108, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.STALL_NO_SBO_CREDIT.SBO0_AD": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.", "Desc": "Stall on No Sbo Credits", "EvSel": 108, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.TAD_REQUESTS_G0": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 0", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, }, "HA.TAD_REQUESTS_G0.REGION3": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 0", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00001000", }, "HA.TAD_REQUESTS_G0.REGION1": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 0", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000010", }, "HA.TAD_REQUESTS_G0.REGION6": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 0", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b01000000", }, "HA.TAD_REQUESTS_G0.REGION0": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 0", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000001", }, "HA.TAD_REQUESTS_G0.REGION4": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 0", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00010000", }, "HA.TAD_REQUESTS_G0.REGION7": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 0", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b10000000", }, "HA.TAD_REQUESTS_G0.REGION2": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 0", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000100", }, "HA.TAD_REQUESTS_G0.REGION5": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 0", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00100000", }, "HA.TAD_REQUESTS_G1": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 1", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, }, "HA.TAD_REQUESTS_G1.REGION8": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 1", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000001", }, "HA.TAD_REQUESTS_G1.REGION9": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 1", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000010", }, "HA.TAD_REQUESTS_G1.REGION11": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 1", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00001000", }, "HA.TAD_REQUESTS_G1.REGION10": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 1", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000100", }, "HA.TRACKER_CYCLES_FULL": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the local HA tracker pool is completely used. This can be used with edge detect to identify the number of situations when the pool became fully utilized. This should not be confused with RTID credit usage -- which must be tracked inside each cbo individually -- but represents the actual tracker buffer structure. In other words, the system could be starved for RTIDs but not fill up the HA trackers. HA trackers are allocated as soon as a request enters the HA and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "Tracker Cycles Full", "EvSel": 2, "ExtSel": "", }, "HA.TRACKER_CYCLES_FULL.ALL": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the local HA tracker pool is completely used. This can be used with edge detect to identify the number of situations when the pool became fully utilized. This should not be confused with RTID credit usage -- which must be tracked inside each cbo individually -- but represents the actual tracker buffer structure. In other words, the system could be starved for RTIDs but not fill up the HA trackers. HA trackers are allocated as soon as a request enters the HA and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "Tracker Cycles Full", "EvSel": 2, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.TRACKER_CYCLES_FULL.GP": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the local HA tracker pool is completely used. This can be used with edge detect to identify the number of situations when the pool became fully utilized. This should not be confused with RTID credit usage -- which must be tracked inside each cbo individually -- but represents the actual tracker buffer structure. In other words, the system could be starved for RTIDs but not fill up the HA trackers. HA trackers are allocated as soon as a request enters the HA and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "Tracker Cycles Full", "EvSel": 2, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.TRACKER_CYCLES_NE": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the local HA tracker pool is not empty. This can be used with edge detect to identify the number of situations when the pool became empty. This should not be confused with RTID credit usage -- which must be tracked inside each cbo individually -- but represents the actual tracker buffer structure. In other words, this buffer could be completely empty, but there may still be credits in use by the CBos. This stat can be used in conjunction with the occupancy accumulation stat in order to calculate average queue occpancy. HA trackers are allocated as soon as a request enters the HA if an HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "Tracker Cycles Not Empty", "EvSel": 3, "ExtSel": "", }, "HA.TRACKER_CYCLES_NE.ALL": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the local HA tracker pool is not empty. This can be used with edge detect to identify the number of situations when the pool became empty. This should not be confused with RTID credit usage -- which must be tracked inside each cbo individually -- but represents the actual tracker buffer structure. In other words, this buffer could be completely empty, but there may still be credits in use by the CBos. This stat can be used in conjunction with the occupancy accumulation stat in order to calculate average queue occpancy. HA trackers are allocated as soon as a request enters the HA if an HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "Tracker Cycles Not Empty", "EvSel": 3, "ExtSel": "", "Umask": "b00000011", }, "HA.TRACKER_CYCLES_NE.REMOTE": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the local HA tracker pool is not empty. This can be used with edge detect to identify the number of situations when the pool became empty. This should not be confused with RTID credit usage -- which must be tracked inside each cbo individually -- but represents the actual tracker buffer structure. In other words, this buffer could be completely empty, but there may still be credits in use by the CBos. This stat can be used in conjunction with the occupancy accumulation stat in order to calculate average queue occpancy. HA trackers are allocated as soon as a request enters the HA if an HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "Tracker Cycles Not Empty", "EvSel": 3, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.TRACKER_CYCLES_NE.LOCAL": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the local HA tracker pool is not empty. This can be used with edge detect to identify the number of situations when the pool became empty. This should not be confused with RTID credit usage -- which must be tracked inside each cbo individually -- but represents the actual tracker buffer structure. In other words, this buffer could be completely empty, but there may still be credits in use by the CBos. This stat can be used in conjunction with the occupancy accumulation stat in order to calculate average queue occpancy. HA trackers are allocated as soon as a request enters the HA if an HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "Tracker Cycles Not Empty", "EvSel": 3, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.TRACKER_OCCUPANCY": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Accumulates the occupancy of the local HA tracker pool in every cycle. This can be used in conjection with the \"not empty\" stat to calculate average queue occupancy or the \"allocations\" stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if a HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "Tracker Occupancy Accumultor", "EvSel": 4, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, }, "HA.TRACKER_OCCUPANCY.READS_REMOTE": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Accumulates the occupancy of the local HA tracker pool in every cycle. This can be used in conjection with the \"not empty\" stat to calculate average queue occupancy or the \"allocations\" stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if a HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "Tracker Occupancy Accumultor", "EvSel": 4, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bxxxx1xxx", }, "HA.TRACKER_OCCUPANCY.WRITES_LOCAL": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Accumulates the occupancy of the local HA tracker pool in every cycle. This can be used in conjection with the \"not empty\" stat to calculate average queue occupancy or the \"allocations\" stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if a HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "Tracker Occupancy Accumultor", "EvSel": 4, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bxxx1xxxx", }, "HA.TRACKER_OCCUPANCY.WRITES_REMOTE": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Accumulates the occupancy of the local HA tracker pool in every cycle. This can be used in conjection with the \"not empty\" stat to calculate average queue occupancy or the \"allocations\" stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if a HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "Tracker Occupancy Accumultor", "EvSel": 4, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bxx1xxxxx", }, "HA.TRACKER_OCCUPANCY.READS_LOCAL": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Accumulates the occupancy of the local HA tracker pool in every cycle. This can be used in conjection with the \"not empty\" stat to calculate average queue occupancy or the \"allocations\" stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if a HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "Tracker Occupancy Accumultor", "EvSel": 4, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bxxxxx1xx", }, "HA.TRACKER_OCCUPANCY.INVITOE_REMOTE": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Accumulates the occupancy of the local HA tracker pool in every cycle. This can be used in conjection with the \"not empty\" stat to calculate average queue occupancy or the \"allocations\" stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if a HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "Tracker Occupancy Accumultor", "EvSel": 4, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "b1xxxxxxx", }, "HA.TRACKER_OCCUPANCY.INVITOE_LOCAL": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Accumulates the occupancy of the local HA tracker pool in every cycle. This can be used in conjection with the \"not empty\" stat to calculate average queue occupancy or the \"allocations\" stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if a HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "Tracker Occupancy Accumultor", "EvSel": 4, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bx1xxxxxx", }, "HA.TRACKER_PENDING_OCCUPANCY": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Accumulates the number of transactions that have data from the memory controller until they get scheduled to the Egress. This can be used to calculate the queuing latency for two things. (1) If the system is waiting for snoops, this will increase. (2) If the system can't schedule to the Egress because of either (a) Egress Credits or (b) QPI BL IGR credits for remote requests.", "Desc": "Data Pending Occupancy Accumultor", "EvSel": 5, "ExtSel": "", "MaxIncCyc": 127, "SubCtr": 1, }, "HA.TRACKER_PENDING_OCCUPANCY.LOCAL": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Accumulates the number of transactions that have data from the memory controller until they get scheduled to the Egress. This can be used to calculate the queuing latency for two things. (1) If the system is waiting for snoops, this will increase. (2) If the system can't schedule to the Egress because of either (a) Egress Credits or (b) QPI BL IGR credits for remote requests.", "Desc": "Data Pending Occupancy Accumultor", "EvSel": 5, "ExtSel": "", "MaxIncCyc": 127, "SubCtr": 1, "Umask": "b00000001", }, "HA.TRACKER_PENDING_OCCUPANCY.REMOTE": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Accumulates the number of transactions that have data from the memory controller until they get scheduled to the Egress. This can be used to calculate the queuing latency for two things. (1) If the system is waiting for snoops, this will increase. (2) If the system can't schedule to the Egress because of either (a) Egress Credits or (b) QPI BL IGR credits for remote requests.", "Desc": "Data Pending Occupancy Accumultor", "EvSel": 5, "ExtSel": "", "MaxIncCyc": 127, "SubCtr": 1, "Umask": "b00000010", }, "HA.TxR_AD_CYCLES_FULL": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "AD Egress Full", "Desc": "AD Egress Full", "EvSel": 42, "ExtSel": "", }, "HA.TxR_AD_CYCLES_FULL.SCHED1": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "AD Egress Full", "Desc": "AD Egress Full", "EvSel": 42, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.TxR_AD_CYCLES_FULL.ALL": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "AD Egress Full", "Desc": "AD Egress Full", "EvSel": 42, "ExtSel": "", "Umask": "bxxxxxx11", }, "HA.TxR_AD_CYCLES_FULL.SCHED0": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "AD Egress Full", "Desc": "AD Egress Full", "EvSel": 42, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.TxR_AK": { "Box": "HA", "Category": "HA OUTBOUND_TX Events", "Counters": "0-3", "Desc": "Outbound Ring Transactions on AK", "EvSel": 14, "ExtSel": "", }, "HA.TxR_AK_CYCLES_FULL": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "AK Egress Full", "Desc": "AK Egress Full", "EvSel": 50, "ExtSel": "", }, "HA.TxR_AK_CYCLES_FULL.SCHED1": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "AK Egress Full", "Desc": "AK Egress Full", "EvSel": 50, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.TxR_AK_CYCLES_FULL.ALL": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "AK Egress Full", "Desc": "AK Egress Full", "EvSel": 50, "ExtSel": "", "Umask": "bxxxxxx11", }, "HA.TxR_AK_CYCLES_FULL.SCHED0": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "AK Egress Full", "Desc": "AK Egress Full", "EvSel": 50, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.TxR_BL": { "Box": "HA", "Category": "HA OUTBOUND_TX Events", "Counters": "0-3", "Defn": "Counts the number of DRS messages sent out on the BL ring. This can be filtered by the destination.", "Desc": "Outbound DRS Ring Transactions to Cache", "EvSel": 16, "ExtSel": "", }, "HA.TxR_BL.DRS_QPI": { "Box": "HA", "Category": "HA OUTBOUND_TX Events", "Counters": "0-3", "Defn": "Counts the number of DRS messages sent out on the BL ring. This can be filtered by the destination.", "Desc": "Outbound DRS Ring Transactions to Cache", "EvSel": 16, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.TxR_BL.DRS_CORE": { "Box": "HA", "Category": "HA OUTBOUND_TX Events", "Counters": "0-3", "Defn": "Counts the number of DRS messages sent out on the BL ring. This can be filtered by the destination.", "Desc": "Outbound DRS Ring Transactions to Cache", "EvSel": 16, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.TxR_BL.DRS_CACHE": { "Box": "HA", "Category": "HA OUTBOUND_TX Events", "Counters": "0-3", "Defn": "Counts the number of DRS messages sent out on the BL ring. This can be filtered by the destination.", "Desc": "Outbound DRS Ring Transactions to Cache", "EvSel": 16, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.TxR_BL_CYCLES_FULL": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "BL Egress Full", "Desc": "BL Egress Full", "EvSel": 54, "ExtSel": "", }, "HA.TxR_BL_CYCLES_FULL.SCHED1": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "BL Egress Full", "Desc": "BL Egress Full", "EvSel": 54, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.TxR_BL_CYCLES_FULL.ALL": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "BL Egress Full", "Desc": "BL Egress Full", "EvSel": 54, "ExtSel": "", "Umask": "bxxxxxx11", }, "HA.TxR_BL_CYCLES_FULL.SCHED0": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "BL Egress Full", "Desc": "BL Egress Full", "EvSel": 54, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.TxR_BL_OCCUPANCY": { "Box": "HA", "Category": "HA BL_EGRESS Events", "Counters": "0-3", "Defn": "BL Egress Occupancy", "Desc": "BL Egress Occupancy", "EvSel": 52, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, }, "HA.TxR_STARVED": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.", "Desc": "Injection Starvation", "EvSel": 109, "ExtSel": "", }, "HA.TxR_STARVED.AK": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.", "Desc": "Injection Starvation", "EvSel": 109, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.TxR_STARVED.BL": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.", "Desc": "Injection Starvation", "EvSel": 109, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.WPQ_CYCLES_NO_REG_CREDITS": { "Box": "HA", "Category": "HA WPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and \"special\" requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "HA iMC CHN0 WPQ Credits Empty - Regular", "EvSel": 24, "ExtSel": "", "MaxIncCyc": 4, }, "HA.WPQ_CYCLES_NO_REG_CREDITS.CHN2": { "Box": "HA", "Category": "HA WPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and \"special\" requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "HA iMC CHN0 WPQ Credits Empty - Regular", "EvSel": 24, "ExtSel": "", "MaxIncCyc": 4, "Umask": "b00000100", }, "HA.WPQ_CYCLES_NO_REG_CREDITS.CHN0": { "Box": "HA", "Category": "HA WPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and \"special\" requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "HA iMC CHN0 WPQ Credits Empty - Regular", "EvSel": 24, "ExtSel": "", "MaxIncCyc": 4, "Umask": "b00000001", }, "HA.WPQ_CYCLES_NO_REG_CREDITS.CHN1": { "Box": "HA", "Category": "HA WPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and \"special\" requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "HA iMC CHN0 WPQ Credits Empty - Regular", "EvSel": 24, "ExtSel": "", "MaxIncCyc": 4, "Umask": "b00000010", }, "HA.WPQ_CYCLES_NO_REG_CREDITS.CHN3": { "Box": "HA", "Category": "HA WPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and \"special\" requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "HA iMC CHN0 WPQ Credits Empty - Regular", "EvSel": 24, "ExtSel": "", "MaxIncCyc": 4, "Umask": "b00001000", }, # CBO: "CBO.BOUNCE_CONTROL": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Desc": "Bounce Control", "EvSel": 10, "ExtSel": "", }, "CBO.CLOCKTICKS": { "Box": "CBO", "Category": "CBO UCLK Events", "Counters": "0-3", "Desc": "Uncore Clocks", "EvSel": 0, "ExtSel": "", }, "CBO.COUNTER0_OCCUPANCY": { "Box": "CBO", "Category": "CBO OCCUPANCY Events", "Counters": "0-3", "Defn": "Since occupancy counts can only be captured in the Cbo's 0 counter, this event allows a user to capture occupancy related information by filtering the Cb0 occupancy count captured in Counter 0. The filtering available is found in the control register - threshold, invert and edge detect. E.g. setting threshold to 1 can effectively monitor how many cycles the monitored queue has an entry.", "Desc": "Counter 0 Occupancy", "EvSel": 31, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, }, "CBO.FAST_ASSERTED": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles either the local distress or incoming distress signals are asserted. Incoming distress includes both up and dn.", "Desc": "FaST wire asserted", "EvSel": 9, "ExtSel": "", }, "CBO.LLC_LOOKUP": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "Bit 0 of the umask must always be set for this event. This allows us to match a given state (or states). The state is programmed in Cn_MSR_PMON_BOX_FILTER.state. The state field is a bit mask, so you can select (and monitor) multiple states at a time. 0 = I (miss), 1 = S, 2 = E, 3 = M, 4 = F. For example, if you wanted to monitor F and S hits, you could set 10010b in the 5-bit state field. To monitor any lookup, set the field to 0x1F.", }, "CBO.LLC_LOOKUP.REMOTE_SNOOP": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "Bit 0 of the umask must always be set for this event. This allows us to match a given state (or states). The state is programmed in Cn_MSR_PMON_BOX_FILTER.state. The state field is a bit mask, so you can select (and monitor) multiple states at a time. 0 = I (miss), 1 = S, 2 = E, 3 = M, 4 = F. For example, if you wanted to monitor F and S hits, you could set 10010b in the 5-bit state field. To monitor any lookup, set the field to 0x1F.", "Umask": "b00001001", }, "CBO.LLC_LOOKUP.READ": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "Bit 0 of the umask must always be set for this event. This allows us to match a given state (or states). The state is programmed in Cn_MSR_PMON_BOX_FILTER.state. The state field is a bit mask, so you can select (and monitor) multiple states at a time. 0 = I (miss), 1 = S, 2 = E, 3 = M, 4 = F. For example, if you wanted to monitor F and S hits, you could set 10010b in the 5-bit state field. To monitor any lookup, set the field to 0x1F.", "Umask": "b00100001", }, "CBO.LLC_LOOKUP.DATA_READ": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "Bit 0 of the umask must always be set for this event. This allows us to match a given state (or states). The state is programmed in Cn_MSR_PMON_BOX_FILTER.state. The state field is a bit mask, so you can select (and monitor) multiple states at a time. 0 = I (miss), 1 = S, 2 = E, 3 = M, 4 = F. For example, if you wanted to monitor F and S hits, you could set 10010b in the 5-bit state field. To monitor any lookup, set the field to 0x1F.", "Umask": "b00000011", }, "CBO.LLC_LOOKUP.WRITE": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "Bit 0 of the umask must always be set for this event. This allows us to match a given state (or states). The state is programmed in Cn_MSR_PMON_BOX_FILTER.state. The state field is a bit mask, so you can select (and monitor) multiple states at a time. 0 = I (miss), 1 = S, 2 = E, 3 = M, 4 = F. For example, if you wanted to monitor F and S hits, you could set 10010b in the 5-bit state field. To monitor any lookup, set the field to 0x1F.", "Umask": "b00000101", }, "CBO.LLC_LOOKUP.ANY": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "Bit 0 of the umask must always be set for this event. This allows us to match a given state (or states). The state is programmed in Cn_MSR_PMON_BOX_FILTER.state. The state field is a bit mask, so you can select (and monitor) multiple states at a time. 0 = I (miss), 1 = S, 2 = E, 3 = M, 4 = F. For example, if you wanted to monitor F and S hits, you could set 10010b in the 5-bit state field. To monitor any lookup, set the field to 0x1F.", "Umask": "b00010001", }, "CBO.LLC_LOOKUP.NID": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "Bit 0 of the umask must always be set for this event. This allows us to match a given state (or states). The state is programmed in Cn_MSR_PMON_BOX_FILTER.state. The state field is a bit mask, so you can select (and monitor) multiple states at a time. 0 = I (miss), 1 = S, 2 = E, 3 = M, 4 = F. For example, if you wanted to monitor F and S hits, you could set 10010b in the 5-bit state field. To monitor any lookup, set the field to 0x1F.", "Umask": "b01000001", }, "CBO.LLC_VICTIMS": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", }, "CBO.LLC_VICTIMS.E_STATE": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CBO.LLC_VICTIMS.M_STATE": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CBO.LLC_VICTIMS.NID": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CBO.LLC_VICTIMS.MISS": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CBO.LLC_VICTIMS.I_STATE": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CBO.LLC_VICTIMS.F_STATE": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CBO.MISC": { "Box": "CBO", "Category": "CBO MISC Events", "Counters": "0-3", "Defn": "Miscellaneous events in the Cbo.", "Desc": "Cbo Misc", "EvSel": 57, "ExtSel": "", }, "CBO.MISC.STARTED": { "Box": "CBO", "Category": "CBO MISC Events", "Counters": "0-3", "Defn": "Miscellaneous events in the Cbo.", "Desc": "Cbo Misc", "EvSel": 57, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CBO.MISC.RFO_HIT_S": { "Box": "CBO", "Category": "CBO MISC Events", "Counters": "0-3", "Defn": "Miscellaneous events in the Cbo.", "Desc": "Cbo Misc", "EvSel": 57, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CBO.MISC.CVZERO_PREFETCH_VICTIM": { "Box": "CBO", "Category": "CBO MISC Events", "Counters": "0-3", "Defn": "Miscellaneous events in the Cbo.", "Desc": "Cbo Misc", "EvSel": 57, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CBO.MISC.CVZERO_PREFETCH_MISS": { "Box": "CBO", "Category": "CBO MISC Events", "Counters": "0-3", "Defn": "Miscellaneous events in the Cbo.", "Desc": "Cbo Misc", "EvSel": 57, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CBO.MISC.RSPI_WAS_FSE": { "Box": "CBO", "Category": "CBO MISC Events", "Counters": "0-3", "Defn": "Miscellaneous events in the Cbo.", "Desc": "Cbo Misc", "EvSel": 57, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CBO.MISC.WC_ALIASING": { "Box": "CBO", "Category": "CBO MISC Events", "Counters": "0-3", "Defn": "Miscellaneous events in the Cbo.", "Desc": "Cbo Misc", "EvSel": 57, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CBO.RING_AD_USED": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", }, "CBO.RING_AD_USED.UP": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "b00000011", }, "CBO.RING_AD_USED.UP_EVEN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxxxxx1", }, "CBO.RING_AD_USED.DOWN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "b00001100", }, "CBO.RING_AD_USED.DOWN_EVEN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxxx1xx", }, "CBO.RING_AD_USED.UP_ODD": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxxxx1x", }, "CBO.RING_AD_USED.DOWN_ODD": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxx1xxx", }, "CBO.RING_AD_USED.ALL": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "b00001111", }, "CBO.RING_AK_USED": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", }, "CBO.RING_AK_USED.UP_EVEN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxxxxx1", }, "CBO.RING_AK_USED.UP": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "b00000011", }, "CBO.RING_AK_USED.DOWN_ODD": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxx1xxx", }, "CBO.RING_AK_USED.ALL": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "b00001111", }, "CBO.RING_AK_USED.UP_ODD": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxxxx1x", }, "CBO.RING_AK_USED.DOWN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "b00001100", }, "CBO.RING_AK_USED.DOWN_EVEN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxxx1xx", }, "CBO.RING_BL_USED": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", }, "CBO.RING_BL_USED.DOWN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "b00001100", }, "CBO.RING_BL_USED.DOWN_EVEN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxxx1xx", }, "CBO.RING_BL_USED.UP_ODD": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxxxx1x", }, "CBO.RING_BL_USED.DOWN_ODD": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxx1xxx", }, "CBO.RING_BL_USED.ALL": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "b00001111", }, "CBO.RING_BL_USED.UP": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "b00000011", }, "CBO.RING_BL_USED.UP_EVEN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxxxxx1", }, "CBO.RING_BOUNCES": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Desc": "Number of LLC responses that bounced on the Ring.", "EvSel": 5, "ExtSel": "", }, "CBO.RING_BOUNCES.AK": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Desc": "Number of LLC responses that bounced on the Ring.", "EvSel": 5, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CBO.RING_BOUNCES.BL": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Desc": "Number of LLC responses that bounced on the Ring.", "EvSel": 5, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CBO.RING_BOUNCES.IV": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Desc": "Number of LLC responses that bounced on the Ring.", "EvSel": 5, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CBO.RING_BOUNCES.AD": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Desc": "Number of LLC responses that bounced on the Ring.", "EvSel": 5, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CBO.RING_IV_USED": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring in HSX Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "BL Ring in Use", "EvSel": 30, "ExtSel": "", "Notes": "IV messages are split into two parts. In any cycle, a ring stop can see up to one (half-)packet moving in the UP direction and one (half-)packet moving in the DN direction.", }, "CBO.RING_IV_USED.DN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring in HSX Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "BL Ring in Use", "EvSel": 30, "ExtSel": "", "Notes": "IV messages are split into two parts. In any cycle, a ring stop can see up to one (half-)packet moving in the UP direction and one (half-)packet moving in the DN direction.", "Umask": "b00001100", }, "CBO.RING_IV_USED.ANY": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring in HSX Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "BL Ring in Use", "EvSel": 30, "ExtSel": "", "Notes": "IV messages are split into two parts. In any cycle, a ring stop can see up to one (half-)packet moving in the UP direction and one (half-)packet moving in the DN direction.", "Umask": "b00001111", }, "CBO.RING_IV_USED.UP": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring in HSX Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "BL Ring in Use", "EvSel": 30, "ExtSel": "", "Notes": "IV messages are split into two parts. In any cycle, a ring stop can see up to one (half-)packet moving in the UP direction and one (half-)packet moving in the DN direction.", "Umask": "b00000011", }, "CBO.RING_IV_USED.DOWN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring in HSX Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "BL Ring in Use", "EvSel": 30, "ExtSel": "", "Notes": "IV messages are split into two parts. In any cycle, a ring stop can see up to one (half-)packet moving in the UP direction and one (half-)packet moving in the DN direction.", "Umask": "b11001100", }, "CBO.RING_SRC_THRTL": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Desc": "Number of cycles the Cbo is actively throttling traffic onto the Ring in order to limit bounce traffic.", "EvSel": 7, "ExtSel": "", }, "CBO.RxR_EXT_STARVED": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-3", "Defn": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.", "Desc": "Ingress Arbiter Blocking Cycles", "EvSel": 18, "ExtSel": "", }, "CBO.RxR_EXT_STARVED.IRQ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-3", "Defn": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.", "Desc": "Ingress Arbiter Blocking Cycles", "EvSel": 18, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CBO.RxR_EXT_STARVED.PRQ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-3", "Defn": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.", "Desc": "Ingress Arbiter Blocking Cycles", "EvSel": 18, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CBO.RxR_EXT_STARVED.IPQ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-3", "Defn": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.", "Desc": "Ingress Arbiter Blocking Cycles", "EvSel": 18, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CBO.RxR_EXT_STARVED.ISMQ_BIDS": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-3", "Defn": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.", "Desc": "Ingress Arbiter Blocking Cycles", "EvSel": 18, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CBO.RxR_INSERTS": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-3", "Defn": "Counts number of allocations per cycle into the specified Ingress queue.", "Desc": "Ingress Allocations", "EvSel": 19, "ExtSel": "", "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", }, "CBO.RxR_INSERTS.IPQ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-3", "Defn": "Counts number of allocations per cycle into the specified Ingress queue.", "Desc": "Ingress Allocations", "EvSel": 19, "ExtSel": "", "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "Umask": "bxxxxx1xx", }, "CBO.RxR_INSERTS.IRQ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-3", "Defn": "Counts number of allocations per cycle into the specified Ingress queue.", "Desc": "Ingress Allocations", "EvSel": 19, "ExtSel": "", "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "Umask": "bxxxxxxx1", }, "CBO.RxR_INSERTS.PRQ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-3", "Defn": "Counts number of allocations per cycle into the specified Ingress queue.", "Desc": "Ingress Allocations", "EvSel": 19, "ExtSel": "", "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "Umask": "bxxx1xxxx", }, "CBO.RxR_INSERTS.IRQ_REJ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-3", "Defn": "Counts number of allocations per cycle into the specified Ingress queue.", "Desc": "Ingress Allocations", "EvSel": 19, "ExtSel": "", "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "Umask": "bxxxxxx1x", }, "CBO.RxR_INSERTS.PRQ_REJ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-3", "Defn": "Counts number of allocations per cycle into the specified Ingress queue.", "Desc": "Ingress Allocations", "EvSel": 19, "ExtSel": "", "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "Umask": "bxx1xxxxx", }, "CBO.RxR_IPQ_RETRY": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Defn": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.", "Desc": "Probe Queue Retries", "EvSel": 49, "ExtSel": "", }, "CBO.RxR_IPQ_RETRY.ADDR_CONFLICT": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Defn": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.", "Desc": "Probe Queue Retries", "EvSel": 49, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CBO.RxR_IPQ_RETRY.ANY": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Defn": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.", "Desc": "Probe Queue Retries", "EvSel": 49, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CBO.RxR_IPQ_RETRY.FULL": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Defn": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.", "Desc": "Probe Queue Retries", "EvSel": 49, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CBO.RxR_IPQ_RETRY.QPI_CREDITS": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Defn": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.", "Desc": "Probe Queue Retries", "EvSel": 49, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CBO.RxR_IPQ_RETRY2": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Defn": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.", "Desc": "Probe Queue Retries", "EvSel": 40, "ExtSel": "", }, "CBO.RxR_IPQ_RETRY2.TARGET": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Defn": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.", "Desc": "Probe Queue Retries", "EvSel": 40, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CBO.RxR_IPQ_RETRY2.AD_SBO": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Defn": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.", "Desc": "Probe Queue Retries", "EvSel": 40, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CBO.RxR_IRQ_RETRY": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Desc": "Ingress Request Queue Rejects", "EvSel": 50, "ExtSel": "", }, "CBO.RxR_IRQ_RETRY.QPI_CREDITS": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Desc": "Ingress Request Queue Rejects", "EvSel": 50, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CBO.RxR_IRQ_RETRY.IIO_CREDITS": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Desc": "Ingress Request Queue Rejects", "EvSel": 50, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CBO.RxR_IRQ_RETRY.NID": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Desc": "Ingress Request Queue Rejects", "EvSel": 50, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CBO.RxR_IRQ_RETRY.ADDR_CONFLICT": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Desc": "Ingress Request Queue Rejects", "EvSel": 50, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CBO.RxR_IRQ_RETRY.RTID": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Desc": "Ingress Request Queue Rejects", "EvSel": 50, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CBO.RxR_IRQ_RETRY.ANY": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Desc": "Ingress Request Queue Rejects", "EvSel": 50, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CBO.RxR_IRQ_RETRY.FULL": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Desc": "Ingress Request Queue Rejects", "EvSel": 50, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CBO.RxR_IRQ_RETRY2": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Desc": "Ingress Request Queue Rejects", "EvSel": 41, "ExtSel": "", }, "CBO.RxR_IRQ_RETRY2.TARGET": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Desc": "Ingress Request Queue Rejects", "EvSel": 41, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CBO.RxR_IRQ_RETRY2.AD_SBO": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Desc": "Ingress Request Queue Rejects", "EvSel": 41, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CBO.RxR_IRQ_RETRY2.BL_SBO": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Desc": "Ingress Request Queue Rejects", "EvSel": 41, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CBO.RxR_ISMQ_RETRY": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 51, "ExtSel": "", }, "CBO.RxR_ISMQ_RETRY.NID": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 51, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CBO.RxR_ISMQ_RETRY.WB_CREDITS": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 51, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CBO.RxR_ISMQ_RETRY.IIO_CREDITS": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 51, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CBO.RxR_ISMQ_RETRY.QPI_CREDITS": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 51, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CBO.RxR_ISMQ_RETRY.FULL": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 51, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CBO.RxR_ISMQ_RETRY.ANY": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 51, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CBO.RxR_ISMQ_RETRY.RTID": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 51, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CBO.RxR_ISMQ_RETRY2": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Desc": "ISMQ Request Queue Rejects", "EvSel": 42, "ExtSel": "", }, "CBO.RxR_ISMQ_RETRY2.BL_SBO": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Desc": "ISMQ Request Queue Rejects", "EvSel": 42, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CBO.RxR_ISMQ_RETRY2.AD_SBO": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Desc": "ISMQ Request Queue Rejects", "EvSel": 42, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CBO.RxR_ISMQ_RETRY2.TARGET": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Desc": "ISMQ Request Queue Rejects", "EvSel": 42, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CBO.RxR_OCCUPANCY": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": 0, "Defn": "Counts number of entries in the specified Ingress queue in each cycle.", "Desc": "Ingress Occupancy", "EvSel": 17, "ExtSel": "", "MaxIncCyc": 20, "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "SubCtr": 1, }, "CBO.RxR_OCCUPANCY.IRQ_REJ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": 0, "Defn": "Counts number of entries in the specified Ingress queue in each cycle.", "Desc": "Ingress Occupancy", "EvSel": 17, "ExtSel": "", "MaxIncCyc": 20, "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "SubCtr": 1, "Umask": "b00000010", }, "CBO.RxR_OCCUPANCY.PRQ_REJ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": 0, "Defn": "Counts number of entries in the specified Ingress queue in each cycle.", "Desc": "Ingress Occupancy", "EvSel": 17, "ExtSel": "", "MaxIncCyc": 20, "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "SubCtr": 1, "Umask": "b00100000", }, "CBO.RxR_OCCUPANCY.IRQ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": 0, "Defn": "Counts number of entries in the specified Ingress queue in each cycle.", "Desc": "Ingress Occupancy", "EvSel": 17, "ExtSel": "", "MaxIncCyc": 20, "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "SubCtr": 1, "Umask": "b00000001", }, "CBO.RxR_OCCUPANCY.IPQ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": 0, "Defn": "Counts number of entries in the specified Ingress queue in each cycle.", "Desc": "Ingress Occupancy", "EvSel": 17, "ExtSel": "", "MaxIncCyc": 20, "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "SubCtr": 1, "Umask": "b00000100", }, "CBO.SBO_CREDITS_ACQUIRED": { "Box": "CBO", "Category": "CBO SBO Credit Events", "Counters": "0-3", "Defn": "Number of Sbo credits acquired in a given cycle, per ring. Each Cbo is assigned an Sbo it can communicate with.", "Desc": "SBo Credits Acquired", "EvSel": 61, "ExtSel": "", }, "CBO.SBO_CREDITS_ACQUIRED.AD": { "Box": "CBO", "Category": "CBO SBO Credit Events", "Counters": "0-3", "Defn": "Number of Sbo credits acquired in a given cycle, per ring. Each Cbo is assigned an Sbo it can communicate with.", "Desc": "SBo Credits Acquired", "EvSel": 61, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CBO.SBO_CREDITS_ACQUIRED.BL": { "Box": "CBO", "Category": "CBO SBO Credit Events", "Counters": "0-3", "Defn": "Number of Sbo credits acquired in a given cycle, per ring. Each Cbo is assigned an Sbo it can communicate with.", "Desc": "SBo Credits Acquired", "EvSel": 61, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CBO.SBO_CREDIT_OCCUPANCY": { "Box": "CBO", "Category": "CBO SBO Credit Events", "Counters": 0, "Defn": "Number of Sbo credits in use in a given cycle, per ring. Each Cbo is assigned an Sbo it can communicate with.", "Desc": "SBo Credits Occupancy", "EvSel": 62, "ExtSel": "", "MaxIncCyc": 7, "Notes": "Each Cbo has 3 AD and 2 BL credits into its assigned Sbo.", "SubCtr": 1, }, "CBO.SBO_CREDIT_OCCUPANCY.AD": { "Box": "CBO", "Category": "CBO SBO Credit Events", "Counters": 0, "Defn": "Number of Sbo credits in use in a given cycle, per ring. Each Cbo is assigned an Sbo it can communicate with.", "Desc": "SBo Credits Occupancy", "EvSel": 62, "ExtSel": "", "MaxIncCyc": 7, "Notes": "Each Cbo has 3 AD and 2 BL credits into its assigned Sbo.", "SubCtr": 1, "Umask": "bxxxxxxx1", }, "CBO.SBO_CREDIT_OCCUPANCY.BL": { "Box": "CBO", "Category": "CBO SBO Credit Events", "Counters": 0, "Defn": "Number of Sbo credits in use in a given cycle, per ring. Each Cbo is assigned an Sbo it can communicate with.", "Desc": "SBo Credits Occupancy", "EvSel": 62, "ExtSel": "", "MaxIncCyc": 7, "Notes": "Each Cbo has 3 AD and 2 BL credits into its assigned Sbo.", "SubCtr": 1, "Umask": "bxxxxxx1x", }, "CBO.TOR_INSERTS": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", }, "CBO.TOR_INSERTS.REMOTE_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b10000001", }, "CBO.TOR_INSERTS.OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b00000001", }, "CBO.TOR_INSERTS.NID_MISS_ALL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b01001010", }, "CBO.TOR_INSERTS.NID_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b01000001", }, "CBO.TOR_INSERTS.MISS_LOCAL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b00101010", }, "CBO.TOR_INSERTS.NID_WB": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b01010000", }, "CBO.TOR_INSERTS.ALL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b00001000", }, "CBO.TOR_INSERTS.EVICTION": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b00000100", }, "CBO.TOR_INSERTS.NID_MISS_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b01000011", }, "CBO.TOR_INSERTS.MISS_LOCAL_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b00100011", }, "CBO.TOR_INSERTS.LOCAL_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b00100001", }, "CBO.TOR_INSERTS.NID_EVICTION": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b01000100", }, "CBO.TOR_INSERTS.WB": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b00010000", }, "CBO.TOR_INSERTS.MISS_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b00000011", }, "CBO.TOR_INSERTS.NID_ALL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b01001000", }, "CBO.TOR_INSERTS.MISS_REMOTE_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b10000011", }, "CBO.TOR_INSERTS.REMOTE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b10001000", }, "CBO.TOR_INSERTS.LOCAL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b00101000", }, "CBO.TOR_INSERTS.MISS_REMOTE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b10001010", }, "CBO.TOR_OCCUPANCY": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, }, "CBO.TOR_OCCUPANCY.NID_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b01000001", }, "CBO.TOR_OCCUPANCY.REMOTE_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b10000001", }, "CBO.TOR_OCCUPANCY.OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b00000001", }, "CBO.TOR_OCCUPANCY.NID_MISS_ALL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b01001010", }, "CBO.TOR_OCCUPANCY.ALL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b00001000", }, "CBO.TOR_OCCUPANCY.NID_MISS_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b01000011", }, "CBO.TOR_OCCUPANCY.EVICTION": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b00000100", }, "CBO.TOR_OCCUPANCY.MISS_LOCAL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b00101010", }, "CBO.TOR_OCCUPANCY.NID_WB": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b01010000", }, "CBO.TOR_OCCUPANCY.MISS_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b00000011", }, "CBO.TOR_OCCUPANCY.WB": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b00010000", }, "CBO.TOR_OCCUPANCY.MISS_ALL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b00001010", }, "CBO.TOR_OCCUPANCY.NID_ALL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b01001000", }, "CBO.TOR_OCCUPANCY.LOCAL_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b00100001", }, "CBO.TOR_OCCUPANCY.MISS_LOCAL_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b00100011", }, "CBO.TOR_OCCUPANCY.NID_EVICTION": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b01000100", }, "CBO.TOR_OCCUPANCY.MISS_REMOTE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b10001010", }, "CBO.TOR_OCCUPANCY.MISS_REMOTE_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b10000011", }, "CBO.TOR_OCCUPANCY.REMOTE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b10001000", }, "CBO.TOR_OCCUPANCY.LOCAL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b00101000", }, "CBO.TxR_ADS_USED": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-3", "EvSel": 4, "ExtSel": "", }, "CBO.TxR_ADS_USED.BL": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-3", "EvSel": 4, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CBO.TxR_ADS_USED.AK": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-3", "EvSel": 4, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CBO.TxR_ADS_USED.AD": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-3", "EvSel": 4, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CBO.TxR_INSERTS": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "ExtSel": "", }, "CBO.TxR_INSERTS.IV_CACHE": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CBO.TxR_INSERTS.BL_CORE": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CBO.TxR_INSERTS.BL_CACHE": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CBO.TxR_INSERTS.AD_CORE": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CBO.TxR_INSERTS.AD_CACHE": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CBO.TxR_INSERTS.AK_CACHE": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CBO.TxR_INSERTS.AK_CORE": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "ExtSel": "", "Umask": "bxx1xxxxx", }, # R3QPI: "R3QPI.CLOCKTICKS": { "Box": "R3QPI", "Category": "R3QPI UCLK Events", "Counters": "0-2", "Defn": "Counts the number of uclks in the QPI uclk domain. This could be slightly different than the count in the Ubox because of enable/freeze delays. However, because the QPI Agent is close to the Ubox, they generally should not diverge by more than a handful of cycles.", "Desc": "Number of uclks in domain", "EvSel": 1, "ExtSel": "", }, "R3QPI.C_HI_AD_CREDITS_EMPTY": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 31, "ExtSel": "", }, "R3QPI.C_HI_AD_CREDITS_EMPTY.CBO11": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 31, "ExtSel": "", "Umask": "bxxxx1xxx", }, "R3QPI.C_HI_AD_CREDITS_EMPTY.CBO10": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 31, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R3QPI.C_HI_AD_CREDITS_EMPTY.CBO_15_17": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 31, "ExtSel": "", "Umask": "b1xxxxxxx", }, "R3QPI.C_HI_AD_CREDITS_EMPTY.CBO12": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 31, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R3QPI.C_HI_AD_CREDITS_EMPTY.CBO14_16": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 31, "ExtSel": "", "Umask": "bx1xxxxxx", }, "R3QPI.C_HI_AD_CREDITS_EMPTY.CBO8": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 31, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R3QPI.C_HI_AD_CREDITS_EMPTY.CBO9": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 31, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R3QPI.C_HI_AD_CREDITS_EMPTY.CBO13": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 31, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R3QPI.C_LO_AD_CREDITS_EMPTY": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 34, "ExtSel": "", }, "R3QPI.C_LO_AD_CREDITS_EMPTY.CBO0": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 34, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R3QPI.C_LO_AD_CREDITS_EMPTY.CBO4": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 34, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R3QPI.C_LO_AD_CREDITS_EMPTY.CBO6": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 34, "ExtSel": "", "Umask": "bx1xxxxxx", }, "R3QPI.C_LO_AD_CREDITS_EMPTY.CBO1": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 34, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R3QPI.C_LO_AD_CREDITS_EMPTY.CBO5": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 34, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R3QPI.C_LO_AD_CREDITS_EMPTY.CBO3": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 34, "ExtSel": "", "Umask": "bxxxx1xxx", }, "R3QPI.C_LO_AD_CREDITS_EMPTY.CBO7": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 34, "ExtSel": "", "Umask": "b1xxxxxxx", }, "R3QPI.C_LO_AD_CREDITS_EMPTY.CBO2": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 34, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R3QPI.HA_R2_BL_CREDITS_EMPTY": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to either HA or R2 on the BL Ring", "Desc": "HA/R2 AD Credits Empty", "EvSel": 45, "ExtSel": "", "Notes": "Counter 0 counts lack of credits to the lesser numbered Cboxes (0-8) Counter 1 counts lack of credits to Cbox to the higher numbered CBoxes (8-13,15+17,16+18)", }, "R3QPI.HA_R2_BL_CREDITS_EMPTY.HA0": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to either HA or R2 on the BL Ring", "Desc": "HA/R2 AD Credits Empty", "EvSel": 45, "ExtSel": "", "Notes": "Counter 0 counts lack of credits to the lesser numbered Cboxes (0-8) Counter 1 counts lack of credits to Cbox to the higher numbered CBoxes (8-13,15+17,16+18)", "Umask": "bxxxxxxx1", }, "R3QPI.HA_R2_BL_CREDITS_EMPTY.HA1": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to either HA or R2 on the BL Ring", "Desc": "HA/R2 AD Credits Empty", "EvSel": 45, "ExtSel": "", "Notes": "Counter 0 counts lack of credits to the lesser numbered Cboxes (0-8) Counter 1 counts lack of credits to Cbox to the higher numbered CBoxes (8-13,15+17,16+18)", "Umask": "bxxxxxx1x", }, "R3QPI.HA_R2_BL_CREDITS_EMPTY.R2_NCS": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to either HA or R2 on the BL Ring", "Desc": "HA/R2 AD Credits Empty", "EvSel": 45, "ExtSel": "", "Notes": "Counter 0 counts lack of credits to the lesser numbered Cboxes (0-8) Counter 1 counts lack of credits to Cbox to the higher numbered CBoxes (8-13,15+17,16+18)", "Umask": "bxxxx1xxx", }, "R3QPI.HA_R2_BL_CREDITS_EMPTY.R2_NCB": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to either HA or R2 on the BL Ring", "Desc": "HA/R2 AD Credits Empty", "EvSel": 45, "ExtSel": "", "Notes": "Counter 0 counts lack of credits to the lesser numbered Cboxes (0-8) Counter 1 counts lack of credits to Cbox to the higher numbered CBoxes (8-13,15+17,16+18)", "Umask": "bxxxxx1xx", }, "R3QPI.QPI0_AD_CREDITS_EMPTY": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI0 on the AD Ring", "Desc": "QPI0 AD Credits Empty", "EvSel": 32, "ExtSel": "", }, "R3QPI.QPI0_AD_CREDITS_EMPTY.VN0_SNP": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI0 on the AD Ring", "Desc": "QPI0 AD Credits Empty", "EvSel": 32, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R3QPI.QPI0_AD_CREDITS_EMPTY.VN0_NDR": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI0 on the AD Ring", "Desc": "QPI0 AD Credits Empty", "EvSel": 32, "ExtSel": "", "Umask": "bxxxx1xxx", }, "R3QPI.QPI0_AD_CREDITS_EMPTY.VN1_SNP": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI0 on the AD Ring", "Desc": "QPI0 AD Credits Empty", "EvSel": 32, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R3QPI.QPI0_AD_CREDITS_EMPTY.VNA": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI0 on the AD Ring", "Desc": "QPI0 AD Credits Empty", "EvSel": 32, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R3QPI.QPI0_AD_CREDITS_EMPTY.VN1_NDR": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI0 on the AD Ring", "Desc": "QPI0 AD Credits Empty", "EvSel": 32, "ExtSel": "", "Umask": "bx1xxxxxx", }, "R3QPI.QPI0_AD_CREDITS_EMPTY.VN0_HOM": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI0 on the AD Ring", "Desc": "QPI0 AD Credits Empty", "EvSel": 32, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R3QPI.QPI0_AD_CREDITS_EMPTY.VN1_HOM": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI0 on the AD Ring", "Desc": "QPI0 AD Credits Empty", "EvSel": 32, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R3QPI.QPI0_BL_CREDITS_EMPTY": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI0 on the BL Ring", "Desc": "QPI0 BL Credits Empty", "EvSel": 33, "ExtSel": "", }, "R3QPI.QPI0_BL_CREDITS_EMPTY.VN1_SNP": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI0 on the BL Ring", "Desc": "QPI0 BL Credits Empty", "EvSel": 33, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R3QPI.QPI0_BL_CREDITS_EMPTY.VNA": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI0 on the BL Ring", "Desc": "QPI0 BL Credits Empty", "EvSel": 33, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R3QPI.QPI0_BL_CREDITS_EMPTY.VN1_NDR": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI0 on the BL Ring", "Desc": "QPI0 BL Credits Empty", "EvSel": 33, "ExtSel": "", "Umask": "bx1xxxxxx", }, "R3QPI.QPI0_BL_CREDITS_EMPTY.VN1_HOM": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI0 on the BL Ring", "Desc": "QPI0 BL Credits Empty", "EvSel": 33, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R3QPI.QPI1_AD_CREDITS_EMPTY": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI1 on the AD Ring", "Desc": "QPI1 AD Credits Empty", "EvSel": 46, "ExtSel": "", }, "R3QPI.QPI1_AD_CREDITS_EMPTY.VN1_HOM": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI1 on the AD Ring", "Desc": "QPI1 AD Credits Empty", "EvSel": 46, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R3QPI.QPI1_AD_CREDITS_EMPTY.VN1_SNP": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI1 on the AD Ring", "Desc": "QPI1 AD Credits Empty", "EvSel": 46, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R3QPI.QPI1_AD_CREDITS_EMPTY.VNA": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI1 on the AD Ring", "Desc": "QPI1 AD Credits Empty", "EvSel": 46, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R3QPI.QPI1_AD_CREDITS_EMPTY.VN1_NDR": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI1 on the AD Ring", "Desc": "QPI1 AD Credits Empty", "EvSel": 46, "ExtSel": "", "Umask": "bx1xxxxxx", }, "R3QPI.QPI1_BL_CREDITS_EMPTY": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI1 on the BL Ring", "Desc": "QPI1 BL Credits Empty", "EvSel": 47, "ExtSel": "", }, "R3QPI.QPI1_BL_CREDITS_EMPTY.VN0_NDR": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI1 on the BL Ring", "Desc": "QPI1 BL Credits Empty", "EvSel": 47, "ExtSel": "", "Umask": "bxxxx1xxx", }, "R3QPI.QPI1_BL_CREDITS_EMPTY.VN1_SNP": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI1 on the BL Ring", "Desc": "QPI1 BL Credits Empty", "EvSel": 47, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R3QPI.QPI1_BL_CREDITS_EMPTY.VN0_SNP": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI1 on the BL Ring", "Desc": "QPI1 BL Credits Empty", "EvSel": 47, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R3QPI.QPI1_BL_CREDITS_EMPTY.VN1_NDR": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI1 on the BL Ring", "Desc": "QPI1 BL Credits Empty", "EvSel": 47, "ExtSel": "", "Umask": "bx1xxxxxx", }, "R3QPI.QPI1_BL_CREDITS_EMPTY.VNA": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI1 on the BL Ring", "Desc": "QPI1 BL Credits Empty", "EvSel": 47, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R3QPI.QPI1_BL_CREDITS_EMPTY.VN0_HOM": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI1 on the BL Ring", "Desc": "QPI1 BL Credits Empty", "EvSel": 47, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R3QPI.QPI1_BL_CREDITS_EMPTY.VN1_HOM": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI1 on the BL Ring", "Desc": "QPI1 BL Credits Empty", "EvSel": 47, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R3QPI.RING_AD_USED": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", }, "R3QPI.RING_AD_USED.CW_ODD": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxx1x", }, "R3QPI.RING_AD_USED.CW_EVEN": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxxx1", }, "R3QPI.RING_AD_USED.CW": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00000011", }, "R3QPI.RING_AD_USED.CCW_EVEN": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxx1xx", }, "R3QPI.RING_AD_USED.CCW": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00001100", }, "R3QPI.RING_AD_USED.CCW_ODD": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxx1xxx", }, "R3QPI.RING_AK_USED": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", }, "R3QPI.RING_AK_USED.CCW_EVEN": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxx1xx", }, "R3QPI.RING_AK_USED.CW": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00000011", }, "R3QPI.RING_AK_USED.CW_EVEN": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxxx1", }, "R3QPI.RING_AK_USED.CW_ODD": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxx1x", }, "R3QPI.RING_AK_USED.CCW_ODD": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxx1xxx", }, "R3QPI.RING_AK_USED.CCW": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00001100", }, "R3QPI.RING_BL_USED": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", }, "R3QPI.RING_BL_USED.CCW": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00001100", }, "R3QPI.RING_BL_USED.CCW_ODD": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxx1xxx", }, "R3QPI.RING_BL_USED.CCW_EVEN": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxx1xx", }, "R3QPI.RING_BL_USED.CW_ODD": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxx1x", }, "R3QPI.RING_BL_USED.CW_EVEN": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxxx1", }, "R3QPI.RING_BL_USED.CW": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00000011", }, "R3QPI.RING_IV_USED": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.", "Desc": "R3 IV Ring in Use", "EvSel": 10, "ExtSel": "", "Notes": "IV messages are split into two parts. In any cycle, a ring stop can see up to one (half-)packet moving in the CW direction and one (half-)packet moving in the CCW direction.", }, "R3QPI.RING_IV_USED.CW": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.", "Desc": "R3 IV Ring in Use", "EvSel": 10, "ExtSel": "", "Notes": "IV messages are split into two parts. In any cycle, a ring stop can see up to one (half-)packet moving in the CW direction and one (half-)packet moving in the CCW direction.", "Umask": "b00000011", }, "R3QPI.RING_IV_USED.ANY": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.", "Desc": "R3 IV Ring in Use", "EvSel": 10, "ExtSel": "", "Notes": "IV messages are split into two parts. In any cycle, a ring stop can see up to one (half-)packet moving in the CW direction and one (half-)packet moving in the CCW direction.", "Umask": "b00001111", }, "R3QPI.RING_IV_USED.CCW": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.", "Desc": "R3 IV Ring in Use", "EvSel": 10, "ExtSel": "", "Notes": "IV messages are split into two parts. In any cycle, a ring stop can see up to one (half-)packet moving in the CW direction and one (half-)packet moving in the CCW direction.", "Umask": "b11001100", }, "R3QPI.RING_SINK_STARVED": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Number of cycles the ringstop is in starvation (per ring)", "Desc": "Ring Stop Starved", "EvSel": 14, "ExtSel": "", "MaxIncCyc": 2, }, "R3QPI.RING_SINK_STARVED.AK": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Number of cycles the ringstop is in starvation (per ring)", "Desc": "Ring Stop Starved", "EvSel": 14, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxxxx1x", }, "R3QPI.RxR_CYCLES_NE": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Cycles Not Empty", "EvSel": 16, "ExtSel": "", }, "R3QPI.RxR_CYCLES_NE.SNP": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Cycles Not Empty", "EvSel": 16, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R3QPI.RxR_CYCLES_NE.HOM": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Cycles Not Empty", "EvSel": 16, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R3QPI.RxR_CYCLES_NE.NDR": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Cycles Not Empty", "EvSel": 16, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R3QPI.RxR_CYCLES_NE_VN1": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the QPI VN1 Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress Cycles Not Empty", "EvSel": 20, "ExtSel": "", }, "R3QPI.RxR_CYCLES_NE_VN1.NCB": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the QPI VN1 Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress Cycles Not Empty", "EvSel": 20, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R3QPI.RxR_CYCLES_NE_VN1.DRS": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the QPI VN1 Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress Cycles Not Empty", "EvSel": 20, "ExtSel": "", "Umask": "bxxxx1xxx", }, "R3QPI.RxR_CYCLES_NE_VN1.SNP": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the QPI VN1 Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress Cycles Not Empty", "EvSel": 20, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R3QPI.RxR_CYCLES_NE_VN1.NCS": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the QPI VN1 Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress Cycles Not Empty", "EvSel": 20, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R3QPI.RxR_CYCLES_NE_VN1.HOM": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the QPI VN1 Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress Cycles Not Empty", "EvSel": 20, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R3QPI.RxR_CYCLES_NE_VN1.NDR": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the QPI VN1 Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress Cycles Not Empty", "EvSel": 20, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R3QPI.RxR_INSERTS": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Allocations", "EvSel": 17, "ExtSel": "", }, "R3QPI.RxR_INSERTS.NCB": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Allocations", "EvSel": 17, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R3QPI.RxR_INSERTS.DRS": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Allocations", "EvSel": 17, "ExtSel": "", "Umask": "bxxxx1xxx", }, "R3QPI.RxR_INSERTS.SNP": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Allocations", "EvSel": 17, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R3QPI.RxR_INSERTS.HOM": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Allocations", "EvSel": 17, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R3QPI.RxR_INSERTS.NCS": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Allocations", "EvSel": 17, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R3QPI.RxR_INSERTS.NDR": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Allocations", "EvSel": 17, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R3QPI.RxR_INSERTS_VN1": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the QPI VN1 Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress Allocations", "EvSel": 21, "ExtSel": "", }, "R3QPI.RxR_INSERTS_VN1.NDR": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the QPI VN1 Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress Allocations", "EvSel": 21, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R3QPI.RxR_INSERTS_VN1.NCS": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the QPI VN1 Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress Allocations", "EvSel": 21, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R3QPI.RxR_INSERTS_VN1.HOM": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the QPI VN1 Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress Allocations", "EvSel": 21, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R3QPI.RxR_INSERTS_VN1.SNP": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the QPI VN1 Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress Allocations", "EvSel": 21, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R3QPI.RxR_INSERTS_VN1.NCB": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the QPI VN1 Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress Allocations", "EvSel": 21, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R3QPI.RxR_INSERTS_VN1.DRS": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the QPI VN1 Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress Allocations", "EvSel": 21, "ExtSel": "", "Umask": "bxxxx1xxx", }, "R3QPI.RxR_OCCUPANCY_VN1": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": 0, "Defn": "Accumulates the occupancy of a given QPI VN1 Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI VN1 Ingress Not Empty event to calculate average occupancy or the QPI VN1 Ingress Allocations event in order to calculate average queuing latency.", "Desc": "VN1 Ingress Occupancy Accumulator", "EvSel": 19, "ExtSel": "", "MaxIncCyc": 32, "Notes": "Supposed to be 0x16", "SubCtr": 1, }, "R3QPI.RxR_OCCUPANCY_VN1.NCB": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": 0, "Defn": "Accumulates the occupancy of a given QPI VN1 Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI VN1 Ingress Not Empty event to calculate average occupancy or the QPI VN1 Ingress Allocations event in order to calculate average queuing latency.", "Desc": "VN1 Ingress Occupancy Accumulator", "EvSel": 19, "ExtSel": "", "MaxIncCyc": 32, "Notes": "Supposed to be 0x16", "SubCtr": 1, "Umask": "bxxx1xxxx", }, "R3QPI.RxR_OCCUPANCY_VN1.DRS": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": 0, "Defn": "Accumulates the occupancy of a given QPI VN1 Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI VN1 Ingress Not Empty event to calculate average occupancy or the QPI VN1 Ingress Allocations event in order to calculate average queuing latency.", "Desc": "VN1 Ingress Occupancy Accumulator", "EvSel": 19, "ExtSel": "", "MaxIncCyc": 32, "Notes": "Supposed to be 0x16", "SubCtr": 1, "Umask": "bxxxx1xxx", }, "R3QPI.RxR_OCCUPANCY_VN1.SNP": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": 0, "Defn": "Accumulates the occupancy of a given QPI VN1 Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI VN1 Ingress Not Empty event to calculate average occupancy or the QPI VN1 Ingress Allocations event in order to calculate average queuing latency.", "Desc": "VN1 Ingress Occupancy Accumulator", "EvSel": 19, "ExtSel": "", "MaxIncCyc": 32, "Notes": "Supposed to be 0x16", "SubCtr": 1, "Umask": "bxxxxxx1x", }, "R3QPI.RxR_OCCUPANCY_VN1.HOM": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": 0, "Defn": "Accumulates the occupancy of a given QPI VN1 Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI VN1 Ingress Not Empty event to calculate average occupancy or the QPI VN1 Ingress Allocations event in order to calculate average queuing latency.", "Desc": "VN1 Ingress Occupancy Accumulator", "EvSel": 19, "ExtSel": "", "MaxIncCyc": 32, "Notes": "Supposed to be 0x16", "SubCtr": 1, "Umask": "bxxxxxxx1", }, "R3QPI.RxR_OCCUPANCY_VN1.NCS": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": 0, "Defn": "Accumulates the occupancy of a given QPI VN1 Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI VN1 Ingress Not Empty event to calculate average occupancy or the QPI VN1 Ingress Allocations event in order to calculate average queuing latency.", "Desc": "VN1 Ingress Occupancy Accumulator", "EvSel": 19, "ExtSel": "", "MaxIncCyc": 32, "Notes": "Supposed to be 0x16", "SubCtr": 1, "Umask": "bxx1xxxxx", }, "R3QPI.RxR_OCCUPANCY_VN1.NDR": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": 0, "Defn": "Accumulates the occupancy of a given QPI VN1 Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI VN1 Ingress Not Empty event to calculate average occupancy or the QPI VN1 Ingress Allocations event in order to calculate average queuing latency.", "Desc": "VN1 Ingress Occupancy Accumulator", "EvSel": 19, "ExtSel": "", "MaxIncCyc": 32, "Notes": "Supposed to be 0x16", "SubCtr": 1, "Umask": "bxxxxx1xx", }, "R3QPI.SBO0_CREDITS_ACQUIRED": { "Box": "R3QPI", "Category": "R3QPI SBO Credit Events", "Counters": "0-1", "Defn": "Number of Sbo 0 credits acquired in a given cycle, per ring.", "Desc": "SBo0 Credits Acquired", "EvSel": 40, "ExtSel": "", "MaxIncCyc": 2, }, "R3QPI.SBO0_CREDITS_ACQUIRED.AD": { "Box": "R3QPI", "Category": "R3QPI SBO Credit Events", "Counters": "0-1", "Defn": "Number of Sbo 0 credits acquired in a given cycle, per ring.", "Desc": "SBo0 Credits Acquired", "EvSel": 40, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000001", }, "R3QPI.SBO0_CREDITS_ACQUIRED.BL": { "Box": "R3QPI", "Category": "R3QPI SBO Credit Events", "Counters": "0-1", "Defn": "Number of Sbo 0 credits acquired in a given cycle, per ring.", "Desc": "SBo0 Credits Acquired", "EvSel": 40, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000010", }, "R3QPI.SBO1_CREDITS_ACQUIRED": { "Box": "R3QPI", "Category": "R3QPI SBO Credit Events", "Counters": "0-1", "Defn": "Number of Sbo 1 credits acquired in a given cycle, per ring.", "Desc": "SBo1 Credits Acquired", "EvSel": 41, "ExtSel": "", "MaxIncCyc": 2, }, "R3QPI.SBO1_CREDITS_ACQUIRED.BL": { "Box": "R3QPI", "Category": "R3QPI SBO Credit Events", "Counters": "0-1", "Defn": "Number of Sbo 1 credits acquired in a given cycle, per ring.", "Desc": "SBo1 Credits Acquired", "EvSel": 41, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000010", }, "R3QPI.SBO1_CREDITS_ACQUIRED.AD": { "Box": "R3QPI", "Category": "R3QPI SBO Credit Events", "Counters": "0-1", "Defn": "Number of Sbo 1 credits acquired in a given cycle, per ring.", "Desc": "SBo1 Credits Acquired", "EvSel": 41, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000001", }, "R3QPI.STALL_NO_SBO_CREDIT": { "Box": "R3QPI", "Category": "R3QPI SBO Credit Events", "Counters": "0-1", "Defn": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.", "Desc": "Stall on No Sbo Credits", "EvSel": 44, "ExtSel": "", "MaxIncCyc": 4, }, "R3QPI.STALL_NO_SBO_CREDIT.SBO0_BL": { "Box": "R3QPI", "Category": "R3QPI SBO Credit Events", "Counters": "0-1", "Defn": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.", "Desc": "Stall on No Sbo Credits", "EvSel": 44, "ExtSel": "", "MaxIncCyc": 4, "Umask": "b00000100", }, "R3QPI.STALL_NO_SBO_CREDIT.SBO0_AD": { "Box": "R3QPI", "Category": "R3QPI SBO Credit Events", "Counters": "0-1", "Defn": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.", "Desc": "Stall on No Sbo Credits", "EvSel": 44, "ExtSel": "", "MaxIncCyc": 4, "Umask": "b00000001", }, "R3QPI.STALL_NO_SBO_CREDIT.SBO1_AD": { "Box": "R3QPI", "Category": "R3QPI SBO Credit Events", "Counters": "0-1", "Defn": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.", "Desc": "Stall on No Sbo Credits", "EvSel": 44, "ExtSel": "", "MaxIncCyc": 4, "Umask": "b00000010", }, "R3QPI.STALL_NO_SBO_CREDIT.SBO1_BL": { "Box": "R3QPI", "Category": "R3QPI SBO Credit Events", "Counters": "0-1", "Defn": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.", "Desc": "Stall on No Sbo Credits", "EvSel": 44, "ExtSel": "", "MaxIncCyc": 4, "Umask": "b00001000", }, "R3QPI.TxR_NACK": { "Box": "R3QPI", "Category": "R3QPI EGRESS Events", "Counters": "0-1", "Desc": "Egress CCW NACK", "EvSel": 38, "ExtSel": "", }, "R3QPI.TxR_NACK.UP_BL": { "Box": "R3QPI", "Category": "R3QPI EGRESS Events", "Counters": "0-1", "Desc": "Egress CCW NACK", "EvSel": 38, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R3QPI.TxR_NACK.DN_AK": { "Box": "R3QPI", "Category": "R3QPI EGRESS Events", "Counters": "0-1", "Desc": "Egress CCW NACK", "EvSel": 38, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R3QPI.TxR_NACK.DN_AD": { "Box": "R3QPI", "Category": "R3QPI EGRESS Events", "Counters": "0-1", "Desc": "Egress CCW NACK", "EvSel": 38, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R3QPI.TxR_NACK.UP_AK": { "Box": "R3QPI", "Category": "R3QPI EGRESS Events", "Counters": "0-1", "Desc": "Egress CCW NACK", "EvSel": 38, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R3QPI.TxR_NACK.DN_BL": { "Box": "R3QPI", "Category": "R3QPI EGRESS Events", "Counters": "0-1", "Desc": "Egress CCW NACK", "EvSel": 38, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R3QPI.TxR_NACK.UP_AD": { "Box": "R3QPI", "Category": "R3QPI EGRESS Events", "Counters": "0-1", "Desc": "Egress CCW NACK", "EvSel": 38, "ExtSel": "", "Umask": "bxxxx1xxx", }, "R3QPI.VN0_CREDITS_REJECT": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.", "Desc": "VN0 Credit Acquisition Failed on DRS", "EvSel": 55, "ExtSel": "", }, "R3QPI.VN0_CREDITS_REJECT.NCS": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.", "Desc": "VN0 Credit Acquisition Failed on DRS", "EvSel": 55, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R3QPI.VN0_CREDITS_REJECT.HOM": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.", "Desc": "VN0 Credit Acquisition Failed on DRS", "EvSel": 55, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R3QPI.VN0_CREDITS_REJECT.NDR": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.", "Desc": "VN0 Credit Acquisition Failed on DRS", "EvSel": 55, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R3QPI.VN0_CREDITS_REJECT.DRS": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.", "Desc": "VN0 Credit Acquisition Failed on DRS", "EvSel": 55, "ExtSel": "", "Umask": "bxxxx1xxx", }, "R3QPI.VN0_CREDITS_REJECT.NCB": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.", "Desc": "VN0 Credit Acquisition Failed on DRS", "EvSel": 55, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R3QPI.VN0_CREDITS_REJECT.SNP": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.", "Desc": "VN0 Credit Acquisition Failed on DRS", "EvSel": 55, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R3QPI.VN0_CREDITS_USED": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.", "Desc": "VN0 Credit Used", "EvSel": 54, "ExtSel": "", }, "R3QPI.VN0_CREDITS_USED.NCS": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.", "Desc": "VN0 Credit Used", "EvSel": 54, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R3QPI.VN0_CREDITS_USED.HOM": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.", "Desc": "VN0 Credit Used", "EvSel": 54, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R3QPI.VN0_CREDITS_USED.NDR": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.", "Desc": "VN0 Credit Used", "EvSel": 54, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R3QPI.VN0_CREDITS_USED.NCB": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.", "Desc": "VN0 Credit Used", "EvSel": 54, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R3QPI.VN0_CREDITS_USED.DRS": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.", "Desc": "VN0 Credit Used", "EvSel": 54, "ExtSel": "", "Umask": "bxxxx1xxx", }, "R3QPI.VN0_CREDITS_USED.SNP": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.", "Desc": "VN0 Credit Used", "EvSel": 54, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R3QPI.VN1_CREDITS_REJECT": { "Box": "R3QPI", "Category": "R3QPI LINK_VN1_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.", "Desc": "VN1 Credit Acquisition Failed on DRS", "EvSel": 57, "ExtSel": "", }, "R3QPI.VN1_CREDITS_REJECT.NDR": { "Box": "R3QPI", "Category": "R3QPI LINK_VN1_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.", "Desc": "VN1 Credit Acquisition Failed on DRS", "EvSel": 57, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R3QPI.VN1_CREDITS_REJECT.HOM": { "Box": "R3QPI", "Category": "R3QPI LINK_VN1_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.", "Desc": "VN1 Credit Acquisition Failed on DRS", "EvSel": 57, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R3QPI.VN1_CREDITS_REJECT.NCS": { "Box": "R3QPI", "Category": "R3QPI LINK_VN1_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.", "Desc": "VN1 Credit Acquisition Failed on DRS", "EvSel": 57, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R3QPI.VN1_CREDITS_REJECT.SNP": { "Box": "R3QPI", "Category": "R3QPI LINK_VN1_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.", "Desc": "VN1 Credit Acquisition Failed on DRS", "EvSel": 57, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R3QPI.VN1_CREDITS_REJECT.DRS": { "Box": "R3QPI", "Category": "R3QPI LINK_VN1_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.", "Desc": "VN1 Credit Acquisition Failed on DRS", "EvSel": 57, "ExtSel": "", "Umask": "bxxxx1xxx", }, "R3QPI.VN1_CREDITS_REJECT.NCB": { "Box": "R3QPI", "Category": "R3QPI LINK_VN1_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.", "Desc": "VN1 Credit Acquisition Failed on DRS", "EvSel": 57, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R3QPI.VN1_CREDITS_USED": { "Box": "R3QPI", "Category": "R3QPI LINK_VN1_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.", "Desc": "VN1 Credit Used", "EvSel": 56, "ExtSel": "", }, "R3QPI.VN1_CREDITS_USED.NCS": { "Box": "R3QPI", "Category": "R3QPI LINK_VN1_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.", "Desc": "VN1 Credit Used", "EvSel": 56, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R3QPI.VN1_CREDITS_USED.HOM": { "Box": "R3QPI", "Category": "R3QPI LINK_VN1_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.", "Desc": "VN1 Credit Used", "EvSel": 56, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R3QPI.VN1_CREDITS_USED.NDR": { "Box": "R3QPI", "Category": "R3QPI LINK_VN1_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.", "Desc": "VN1 Credit Used", "EvSel": 56, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R3QPI.VN1_CREDITS_USED.NCB": { "Box": "R3QPI", "Category": "R3QPI LINK_VN1_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.", "Desc": "VN1 Credit Used", "EvSel": 56, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R3QPI.VN1_CREDITS_USED.DRS": { "Box": "R3QPI", "Category": "R3QPI LINK_VN1_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.", "Desc": "VN1 Credit Used", "EvSel": 56, "ExtSel": "", "Umask": "bxxxx1xxx", }, "R3QPI.VN1_CREDITS_USED.SNP": { "Box": "R3QPI", "Category": "R3QPI LINK_VN1_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.", "Desc": "VN1 Credit Used", "EvSel": 56, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R3QPI.VNA_CREDITS_ACQUIRED": { "Box": "R3QPI", "Category": "R3QPI LINK_VNA_CREDITS Events", "Counters": "0-1", "Defn": "Number of QPI VNA Credit acquisitions. This event can be used in conjunction with the VNA In-Use Accumulator to calculate the average lifetime of a credit holder. VNA credits are used by all message classes in order to communicate across QPI. If a packet is unable to acquire credits, it will then attempt to use credts from the VN0 pool. Note that a single packet may require multiple flit buffers (i.e. when data is being transfered). Therefore, this event will increment by the number of credits acquired in each cycle. Filtering based on message class is not provided. One can count the number of packets transfered in a given message class using an qfclk event.", "Desc": "VNA credit Acquisitions", "EvSel": 51, "ExtSel": "", "MaxIncCyc": 4, }, "R3QPI.VNA_CREDITS_ACQUIRED.BL": { "Box": "R3QPI", "Category": "R3QPI LINK_VNA_CREDITS Events", "Counters": "0-1", "Defn": "Number of QPI VNA Credit acquisitions. This event can be used in conjunction with the VNA In-Use Accumulator to calculate the average lifetime of a credit holder. VNA credits are used by all message classes in order to communicate across QPI. If a packet is unable to acquire credits, it will then attempt to use credts from the VN0 pool. Note that a single packet may require multiple flit buffers (i.e. when data is being transfered). Therefore, this event will increment by the number of credits acquired in each cycle. Filtering based on message class is not provided. One can count the number of packets transfered in a given message class using an qfclk event.", "Desc": "VNA credit Acquisitions", "EvSel": 51, "ExtSel": "", "MaxIncCyc": 4, "Umask": "bxxxxx1xx", }, "R3QPI.VNA_CREDITS_ACQUIRED.AD": { "Box": "R3QPI", "Category": "R3QPI LINK_VNA_CREDITS Events", "Counters": "0-1", "Defn": "Number of QPI VNA Credit acquisitions. This event can be used in conjunction with the VNA In-Use Accumulator to calculate the average lifetime of a credit holder. VNA credits are used by all message classes in order to communicate across QPI. If a packet is unable to acquire credits, it will then attempt to use credts from the VN0 pool. Note that a single packet may require multiple flit buffers (i.e. when data is being transfered). Therefore, this event will increment by the number of credits acquired in each cycle. Filtering based on message class is not provided. One can count the number of packets transfered in a given message class using an qfclk event.", "Desc": "VNA credit Acquisitions", "EvSel": 51, "ExtSel": "", "MaxIncCyc": 4, "Umask": "bxxxxxxx1", }, "R3QPI.VNA_CREDITS_REJECT": { "Box": "R3QPI", "Category": "R3QPI LINK_VNA_CREDITS Events", "Counters": "0-1", "Defn": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.", "Desc": "VNA Credit Reject", "EvSel": 52, "ExtSel": "", }, "R3QPI.VNA_CREDITS_REJECT.SNP": { "Box": "R3QPI", "Category": "R3QPI LINK_VNA_CREDITS Events", "Counters": "0-1", "Defn": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.", "Desc": "VNA Credit Reject", "EvSel": 52, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R3QPI.VNA_CREDITS_REJECT.DRS": { "Box": "R3QPI", "Category": "R3QPI LINK_VNA_CREDITS Events", "Counters": "0-1", "Defn": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.", "Desc": "VNA Credit Reject", "EvSel": 52, "ExtSel": "", "Umask": "bxxxx1xxx", }, "R3QPI.VNA_CREDITS_REJECT.NCB": { "Box": "R3QPI", "Category": "R3QPI LINK_VNA_CREDITS Events", "Counters": "0-1", "Defn": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.", "Desc": "VNA Credit Reject", "EvSel": 52, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R3QPI.VNA_CREDITS_REJECT.NDR": { "Box": "R3QPI", "Category": "R3QPI LINK_VNA_CREDITS Events", "Counters": "0-1", "Defn": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.", "Desc": "VNA Credit Reject", "EvSel": 52, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R3QPI.VNA_CREDITS_REJECT.HOM": { "Box": "R3QPI", "Category": "R3QPI LINK_VNA_CREDITS Events", "Counters": "0-1", "Defn": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.", "Desc": "VNA Credit Reject", "EvSel": 52, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R3QPI.VNA_CREDITS_REJECT.NCS": { "Box": "R3QPI", "Category": "R3QPI LINK_VNA_CREDITS Events", "Counters": "0-1", "Defn": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.", "Desc": "VNA Credit Reject", "EvSel": 52, "ExtSel": "", "Umask": "bxx1xxxxx", }, # QPI_LL: "QPI_LL.CLOCKTICKS": { "Box": "QPI_LL", "Category": "QPI_LL CFCLK Events", "Counters": "0-3", "Defn": "Counts the number of clocks in the QPI LL. This clock runs at 1/4th the \"GT/s\" speed of the QPI link. For example, a 4GT/s link will have qfclk or 1GHz. HSX does not support dynamic link speeds, so this frequency is fixed.", "Desc": "Number of qfclks", "EvSel": 20, "ExtSel": "", }, "QPI_LL.CTO_COUNT": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Counters": "0-3", "Defn": "Counts the number of CTO (cluster trigger outs) events that were asserted across the two slots. If both slots trigger in a given cycle, the event will increment by 2. You can use edge detect to count the number of cases when both events triggered.", "Desc": "Count of CTO Events", "EvSel": 56, "Filter": "QPIMask0[17:0],QPIMatch0[17:0],QPIMask1[19:16],QPIMatch1[19:16]", "ExtSel": "", "MaxIncCyc": 2, "SubCtr": 1, }, "QPI_LL.DIRECT2CORE": { "Box": "QPI_LL", "Category": "QPI_LL DIRECT2CORE Events", "Counters": "0-3", "Defn": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.", "Desc": "Direct 2 Core Spawning", "EvSel": 19, "ExtSel": "", }, "QPI_LL.DIRECT2CORE.FAILURE_CREDITS_RBT": { "Box": "QPI_LL", "Category": "QPI_LL DIRECT2CORE Events", "Counters": "0-3", "Defn": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.", "Desc": "Direct 2 Core Spawning", "EvSel": 19, "ExtSel": "", "Umask": "bxxxx1xxx", }, "QPI_LL.DIRECT2CORE.FAILURE_CREDITS_MISS": { "Box": "QPI_LL", "Category": "QPI_LL DIRECT2CORE Events", "Counters": "0-3", "Defn": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.", "Desc": "Direct 2 Core Spawning", "EvSel": 19, "ExtSel": "", "Umask": "bxx1xxxxx", }, "QPI_LL.DIRECT2CORE.FAILURE_RBT_MISS": { "Box": "QPI_LL", "Category": "QPI_LL DIRECT2CORE Events", "Counters": "0-3", "Defn": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.", "Desc": "Direct 2 Core Spawning", "EvSel": 19, "ExtSel": "", "Umask": "bx1xxxxxx", }, "QPI_LL.DIRECT2CORE.FAILURE_CREDITS_RBT_MISS": { "Box": "QPI_LL", "Category": "QPI_LL DIRECT2CORE Events", "Counters": "0-3", "Defn": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.", "Desc": "Direct 2 Core Spawning", "EvSel": 19, "ExtSel": "", "Umask": "b1xxxxxxx", }, "QPI_LL.DIRECT2CORE.FAILURE_CREDITS": { "Box": "QPI_LL", "Category": "QPI_LL DIRECT2CORE Events", "Counters": "0-3", "Defn": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.", "Desc": "Direct 2 Core Spawning", "EvSel": 19, "ExtSel": "", "Umask": "bxxxxxx1x", }, "QPI_LL.DIRECT2CORE.FAILURE_MISS": { "Box": "QPI_LL", "Category": "QPI_LL DIRECT2CORE Events", "Counters": "0-3", "Defn": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.", "Desc": "Direct 2 Core Spawning", "EvSel": 19, "ExtSel": "", "Umask": "bxxx1xxxx", }, "QPI_LL.DIRECT2CORE.SUCCESS_RBT_HIT": { "Box": "QPI_LL", "Category": "QPI_LL DIRECT2CORE Events", "Counters": "0-3", "Defn": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.", "Desc": "Direct 2 Core Spawning", "EvSel": 19, "ExtSel": "", "Umask": "bxxxxxxx1", }, "QPI_LL.DIRECT2CORE.FAILURE_RBT_HIT": { "Box": "QPI_LL", "Category": "QPI_LL DIRECT2CORE Events", "Counters": "0-3", "Defn": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.", "Desc": "Direct 2 Core Spawning", "EvSel": 19, "ExtSel": "", "Umask": "bxxxxx1xx", }, "QPI_LL.L1_POWER_CYCLES": { "Box": "QPI_LL", "Category": "QPI_LL POWER Events", "Counters": "0-3", "Defn": "Number of QPI qfclk cycles spent in L1 power mode. L1 is a mode that totally shuts down a QPI link. Use edge detect to count the number of instances when the QPI link entered L1. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. Because L1 totally shuts down the link, it takes a good amount of time to exit this mode.", "Desc": "Cycles in L1", "EvSel": 18, "ExtSel": "", }, "QPI_LL.RxL0P_POWER_CYCLES": { "Box": "QPI_LL", "Category": "QPI_LL POWER_RX Events", "Counters": "0-3", "Defn": "Number of QPI qfclk cycles spent in L0p power mode. L0p is a mode where we disable 1/2 of the QPI lanes, decreasing our bandwidth in order to save power. It increases snoop and data transfer latencies and decreases overall bandwidth. This mode can be very useful in NUMA optimized workloads that largely only utilize QPI for snoops and their responses. Use edge detect to count the number of instances when the QPI link entered L0p. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another.", "Desc": "Cycles in L0p", "EvSel": 16, "ExtSel": "", "Notes": "Using .edge_det to count transitions does not function if L1_POWER_CYCLES > 0.", }, "QPI_LL.RxL0_POWER_CYCLES": { "Box": "QPI_LL", "Category": "QPI_LL POWER_RX Events", "Counters": "0-3", "Defn": "Number of QPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.", "Desc": "Cycles in L0", "EvSel": 15, "ExtSel": "", "Notes": "Includes L0p cycles. To get just L0, subtract RxL0P_POWER_CYCLES", }, "QPI_LL.RxL_BYPASSED": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Counts the number of times that an incoming flit was able to bypass the flit buffer and pass directly across the BGF and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of flits transfered, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.", "Desc": "Rx Flit Buffer Bypassed", "EvSel": 9, "ExtSel": "", }, "QPI_LL.RxL_CREDITS_CONSUMED_VN0": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN0 Credit Consumed", "EvSel": 30, "ExtSel": "", "MaxIncCyc": 2, }, "QPI_LL.RxL_CREDITS_CONSUMED_VN0.DRS": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN0 Credit Consumed", "EvSel": 30, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxxxxx1", }, "QPI_LL.RxL_CREDITS_CONSUMED_VN0.NCB": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN0 Credit Consumed", "EvSel": 30, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxxxx1x", }, "QPI_LL.RxL_CREDITS_CONSUMED_VN0.SNP": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN0 Credit Consumed", "EvSel": 30, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxx1xxxx", }, "QPI_LL.RxL_CREDITS_CONSUMED_VN0.HOM": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN0 Credit Consumed", "EvSel": 30, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxx1xxx", }, "QPI_LL.RxL_CREDITS_CONSUMED_VN0.NCS": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN0 Credit Consumed", "EvSel": 30, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxxx1xx", }, "QPI_LL.RxL_CREDITS_CONSUMED_VN0.NDR": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN0 Credit Consumed", "EvSel": 30, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxx1xxxxx", }, "QPI_LL.RxL_CREDITS_CONSUMED_VN1": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN1 Credit Consumed", "EvSel": 57, "ExtSel": "", "MaxIncCyc": 2, }, "QPI_LL.RxL_CREDITS_CONSUMED_VN1.HOM": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN1 Credit Consumed", "EvSel": 57, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxx1xxx", }, "QPI_LL.RxL_CREDITS_CONSUMED_VN1.NCS": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN1 Credit Consumed", "EvSel": 57, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxxx1xx", }, "QPI_LL.RxL_CREDITS_CONSUMED_VN1.NDR": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN1 Credit Consumed", "EvSel": 57, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxx1xxxxx", }, "QPI_LL.RxL_CREDITS_CONSUMED_VN1.DRS": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN1 Credit Consumed", "EvSel": 57, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxxxxx1", }, "QPI_LL.RxL_CREDITS_CONSUMED_VN1.NCB": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN1 Credit Consumed", "EvSel": 57, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxxxx1x", }, "QPI_LL.RxL_CREDITS_CONSUMED_VN1.SNP": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN1 Credit Consumed", "EvSel": 57, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxx1xxxx", }, "QPI_LL.RxL_CREDITS_CONSUMED_VNA": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VNA credit was consumed (i.e. message uses a VNA credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VNA Credit Consumed", "EvSel": 29, "ExtSel": "", }, "QPI_LL.RxL_CYCLES_NE": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy.", "Desc": "RxQ Cycles Not Empty", "EvSel": 10, "ExtSel": "", }, "QPI_LL.RxL_FLITS_G1": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 1", "EvSel": 2, "ExtSel": "", "MaxIncCyc": 2, }, "QPI_LL.RxL_FLITS_G1.HOM": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 1", "EvSel": 2, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000110", }, "QPI_LL.RxL_FLITS_G1.DRS_DATA": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 1", "EvSel": 2, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00001000", }, "QPI_LL.RxL_FLITS_G1.DRS": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 1", "EvSel": 2, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00011000", }, "QPI_LL.RxL_FLITS_G1.HOM_NONREQ": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 1", "EvSel": 2, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000100", }, "QPI_LL.RxL_FLITS_G1.HOM_REQ": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 1", "EvSel": 2, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000010", }, "QPI_LL.RxL_FLITS_G1.SNP": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 1", "EvSel": 2, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000001", }, "QPI_LL.RxL_FLITS_G1.DRS_NONDATA": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 1", "EvSel": 2, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00010000", }, "QPI_LL.RxL_FLITS_G2": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 2", "EvSel": 3, "ExtSel": "", "MaxIncCyc": 2, }, "QPI_LL.RxL_FLITS_G2.NCB_NONDATA": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 2", "EvSel": 3, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00001000", }, "QPI_LL.RxL_FLITS_G2.NCB_DATA": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 2", "EvSel": 3, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000100", }, "QPI_LL.RxL_FLITS_G2.NCS": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 2", "EvSel": 3, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00010000", }, "QPI_LL.RxL_FLITS_G2.NDR_AK": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 2", "EvSel": 3, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000010", }, "QPI_LL.RxL_FLITS_G2.NDR_AD": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 2", "EvSel": 3, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000001", }, "QPI_LL.RxL_FLITS_G2.NCB": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 2", "EvSel": 3, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00001100", }, "QPI_LL.RxL_INSERTS": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.", "Desc": "Rx Flit Buffer Allocations", "EvSel": 8, "ExtSel": "", }, "QPI_LL.RxL_INSERTS_DRS": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only DRS flits.", "Desc": "Rx Flit Buffer Allocations - DRS", "EvSel": 9, "ExtSel": "", }, "QPI_LL.RxL_INSERTS_DRS.VN0": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only DRS flits.", "Desc": "Rx Flit Buffer Allocations - DRS", "EvSel": 9, "ExtSel": "", "Umask": "bxxxxxxx1", }, "QPI_LL.RxL_INSERTS_DRS.VN1": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only DRS flits.", "Desc": "Rx Flit Buffer Allocations - DRS", "EvSel": 9, "ExtSel": "", "Umask": "bxxxxxx1x", }, "QPI_LL.RxL_INSERTS_HOM": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only HOM flits.", "Desc": "Rx Flit Buffer Allocations - HOM", "EvSel": 12, "ExtSel": "", }, "QPI_LL.RxL_INSERTS_HOM.VN0": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only HOM flits.", "Desc": "Rx Flit Buffer Allocations - HOM", "EvSel": 12, "ExtSel": "", "Umask": "bxxxxxxx1", }, "QPI_LL.RxL_INSERTS_HOM.VN1": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only HOM flits.", "Desc": "Rx Flit Buffer Allocations - HOM", "EvSel": 12, "ExtSel": "", "Umask": "bxxxxxx1x", }, "QPI_LL.RxL_INSERTS_NCB": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCB flits.", "Desc": "Rx Flit Buffer Allocations - NCB", "EvSel": 10, "ExtSel": "", }, "QPI_LL.RxL_INSERTS_NCB.VN0": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCB flits.", "Desc": "Rx Flit Buffer Allocations - NCB", "EvSel": 10, "ExtSel": "", "Umask": "bxxxxxxx1", }, "QPI_LL.RxL_INSERTS_NCB.VN1": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCB flits.", "Desc": "Rx Flit Buffer Allocations - NCB", "EvSel": 10, "ExtSel": "", "Umask": "bxxxxxx1x", }, "QPI_LL.RxL_INSERTS_NCS": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCS flits.", "Desc": "Rx Flit Buffer Allocations - NCS", "EvSel": 11, "ExtSel": "", }, "QPI_LL.RxL_INSERTS_NCS.VN0": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCS flits.", "Desc": "Rx Flit Buffer Allocations - NCS", "EvSel": 11, "ExtSel": "", "Umask": "bxxxxxxx1", }, "QPI_LL.RxL_INSERTS_NCS.VN1": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCS flits.", "Desc": "Rx Flit Buffer Allocations - NCS", "EvSel": 11, "ExtSel": "", "Umask": "bxxxxxx1x", }, "QPI_LL.RxL_INSERTS_NDR": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NDR flits.", "Desc": "Rx Flit Buffer Allocations - NDR", "EvSel": 14, "ExtSel": "", }, "QPI_LL.RxL_INSERTS_NDR.VN1": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NDR flits.", "Desc": "Rx Flit Buffer Allocations - NDR", "EvSel": 14, "ExtSel": "", "Umask": "bxxxxxx1x", }, "QPI_LL.RxL_INSERTS_NDR.VN0": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NDR flits.", "Desc": "Rx Flit Buffer Allocations - NDR", "EvSel": 14, "ExtSel": "", "Umask": "bxxxxxxx1", }, "QPI_LL.RxL_INSERTS_SNP": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only SNP flits.", "Desc": "Rx Flit Buffer Allocations - SNP", "EvSel": 13, "ExtSel": "", }, "QPI_LL.RxL_INSERTS_SNP.VN1": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only SNP flits.", "Desc": "Rx Flit Buffer Allocations - SNP", "EvSel": 13, "ExtSel": "", "Umask": "bxxxxxx1x", }, "QPI_LL.RxL_INSERTS_SNP.VN0": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only SNP flits.", "Desc": "Rx Flit Buffer Allocations - SNP", "EvSel": 13, "ExtSel": "", "Umask": "bxxxxxxx1", }, "QPI_LL.RxL_OCCUPANCY": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.", "Desc": "RxQ Occupancy - All Packets", "EvSel": 11, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, }, "QPI_LL.RxL_OCCUPANCY_DRS": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors DRS flits only.", "Desc": "RxQ Occupancy - DRS", "EvSel": 21, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, }, "QPI_LL.RxL_OCCUPANCY_DRS.VN1": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors DRS flits only.", "Desc": "RxQ Occupancy - DRS", "EvSel": 21, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bxxxxxx1x", }, "QPI_LL.RxL_OCCUPANCY_DRS.VN0": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors DRS flits only.", "Desc": "RxQ Occupancy - DRS", "EvSel": 21, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bxxxxxxx1", }, "QPI_LL.RxL_OCCUPANCY_HOM": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors HOM flits only.", "Desc": "RxQ Occupancy - HOM", "EvSel": 24, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, }, "QPI_LL.RxL_OCCUPANCY_HOM.VN1": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors HOM flits only.", "Desc": "RxQ Occupancy - HOM", "EvSel": 24, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bxxxxxx1x", }, "QPI_LL.RxL_OCCUPANCY_HOM.VN0": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors HOM flits only.", "Desc": "RxQ Occupancy - HOM", "EvSel": 24, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bxxxxxxx1", }, "QPI_LL.RxL_OCCUPANCY_NCB": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCB flits only.", "Desc": "RxQ Occupancy - NCB", "EvSel": 22, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, }, "QPI_LL.RxL_OCCUPANCY_NCB.VN0": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCB flits only.", "Desc": "RxQ Occupancy - NCB", "EvSel": 22, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bxxxxxxx1", }, "QPI_LL.RxL_OCCUPANCY_NCB.VN1": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCB flits only.", "Desc": "RxQ Occupancy - NCB", "EvSel": 22, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bxxxxxx1x", }, "QPI_LL.RxL_OCCUPANCY_NCS": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCS flits only.", "Desc": "RxQ Occupancy - NCS", "EvSel": 23, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, }, "QPI_LL.RxL_OCCUPANCY_NCS.VN1": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCS flits only.", "Desc": "RxQ Occupancy - NCS", "EvSel": 23, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bxxxxxx1x", }, "QPI_LL.RxL_OCCUPANCY_NCS.VN0": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCS flits only.", "Desc": "RxQ Occupancy - NCS", "EvSel": 23, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bxxxxxxx1", }, "QPI_LL.RxL_OCCUPANCY_NDR": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NDR flits only.", "Desc": "RxQ Occupancy - NDR", "EvSel": 26, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, }, "QPI_LL.RxL_OCCUPANCY_NDR.VN0": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NDR flits only.", "Desc": "RxQ Occupancy - NDR", "EvSel": 26, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bxxxxxxx1", }, "QPI_LL.RxL_OCCUPANCY_NDR.VN1": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NDR flits only.", "Desc": "RxQ Occupancy - NDR", "EvSel": 26, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bxxxxxx1x", }, "QPI_LL.RxL_OCCUPANCY_SNP": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors SNP flits only.", "Desc": "RxQ Occupancy - SNP", "EvSel": 25, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, }, "QPI_LL.RxL_OCCUPANCY_SNP.VN1": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors SNP flits only.", "Desc": "RxQ Occupancy - SNP", "EvSel": 25, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bxxxxxx1x", }, "QPI_LL.RxL_OCCUPANCY_SNP.VN0": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors SNP flits only.", "Desc": "RxQ Occupancy - SNP", "EvSel": 25, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bxxxxxxx1", }, "QPI_LL.TxL0P_POWER_CYCLES": { "Box": "QPI_LL", "Category": "QPI_LL POWER_TX Events", "Counters": "0-3", "Defn": "Number of QPI qfclk cycles spent in L0p power mode. L0p is a mode where we disable 1/2 of the QPI lanes, decreasing our bandwidth in order to save power. It increases snoop and data transfer latencies and decreases overall bandwidth. This mode can be very useful in NUMA optimized workloads that largely only utilize QPI for snoops and their responses. Use edge detect to count the number of instances when the QPI link entered L0p. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another.", "Desc": "Cycles in L0p", "EvSel": 13, "ExtSel": "", "Notes": "Using .edge_det to count transitions does not function if L1_POWER_CYCLES > 0.", }, "QPI_LL.TxL0_POWER_CYCLES": { "Box": "QPI_LL", "Category": "QPI_LL POWER_TX Events", "Counters": "0-3", "Defn": "Number of QPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.", "Desc": "Cycles in L0", "EvSel": 12, "ExtSel": "", "Notes": "Includes L0p cycles. To get just L0, subtract TxL0P_POWER_CYCLES", }, "QPI_LL.TxL_BYPASSED": { "Box": "QPI_LL", "Category": "QPI_LL TXQ Events", "Counters": "0-3", "Defn": "Counts the number of times that an incoming flit was able to bypass the Tx flit buffer and pass directly out the QPI Link. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link.", "Desc": "Tx Flit Buffer Bypassed", "EvSel": 5, "ExtSel": "", }, "QPI_LL.TxL_CYCLES_NE": { "Box": "QPI_LL", "Category": "QPI_LL TXQ Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the TxQ is not empty. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link.", "Desc": "Tx Flit Buffer Cycles not Empty", "EvSel": 6, "ExtSel": "", }, "QPI_LL.TxL_FLITS_G0": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.", "Desc": "Flits Transferred - Group 0", "EvSel": 0, "ExtSel": "", "MaxIncCyc": 2, }, "QPI_LL.TxL_FLITS_G0.DATA": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.", "Desc": "Flits Transferred - Group 0", "EvSel": 0, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000010", }, "QPI_LL.TxL_FLITS_G0.NON_DATA": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.", "Desc": "Flits Transferred - Group 0", "EvSel": 0, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000100", }, "QPI_LL.TxL_FLITS_G1": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits transmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 1", "EvSel": 0, "ExtSel": "", "MaxIncCyc": 2, }, "QPI_LL.TxL_FLITS_G1.HOM": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits transmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 1", "EvSel": 0, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000110", }, "QPI_LL.TxL_FLITS_G1.DRS_DATA": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits transmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 1", "EvSel": 0, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00001000", }, "QPI_LL.TxL_FLITS_G1.HOM_NONREQ": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits transmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 1", "EvSel": 0, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000100", }, "QPI_LL.TxL_FLITS_G1.DRS": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits transmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 1", "EvSel": 0, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00011000", }, "QPI_LL.TxL_FLITS_G1.HOM_REQ": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits transmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 1", "EvSel": 0, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000010", }, "QPI_LL.TxL_FLITS_G1.SNP": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits transmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 1", "EvSel": 0, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000001", }, "QPI_LL.TxL_FLITS_G1.DRS_NONDATA": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits transmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 1", "EvSel": 0, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00010000", }, "QPI_LL.TxL_FLITS_G2": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits transmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 2", "EvSel": 1, "ExtSel": "", "MaxIncCyc": 2, }, "QPI_LL.TxL_FLITS_G2.NDR_AD": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits transmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 2", "EvSel": 1, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000001", }, "QPI_LL.TxL_FLITS_G2.NDR_AK": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits transmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 2", "EvSel": 1, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000010", }, "QPI_LL.TxL_FLITS_G2.NCB": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits transmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 2", "EvSel": 1, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00001100", }, "QPI_LL.TxL_FLITS_G2.NCB_NONDATA": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits transmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 2", "EvSel": 1, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00001000", }, "QPI_LL.TxL_FLITS_G2.NCB_DATA": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits transmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 2", "EvSel": 1, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000100", }, "QPI_LL.TxL_FLITS_G2.NCS": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits transmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 2", "EvSel": 1, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00010000", }, "QPI_LL.TxL_INSERTS": { "Box": "QPI_LL", "Category": "QPI_LL TXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Tx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.", "Desc": "Tx Flit Buffer Allocations", "EvSel": 4, "ExtSel": "", }, "QPI_LL.TxL_OCCUPANCY": { "Box": "QPI_LL", "Category": "QPI_LL TXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of flits in the TxQ. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This can be used with the cycles not empty event to track average occupancy, or the allocations event to track average lifetime in the TxQ.", "Desc": "Tx Flit Buffer Occupancy", "EvSel": 7, "ExtSel": "", }, "QPI_LL.TxR_AD_HOM_CREDIT_ACQUIRED": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for Home messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - HOM", "EvSel": 38, "ExtSel": "", }, "QPI_LL.TxR_AD_HOM_CREDIT_ACQUIRED.VN0": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for Home messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - HOM", "EvSel": 38, "ExtSel": "", "Umask": "b00000001", }, "QPI_LL.TxR_AD_HOM_CREDIT_ACQUIRED.VN1": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for Home messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - HOM", "EvSel": 38, "ExtSel": "", "Umask": "b00000010", }, "QPI_LL.TxR_AD_HOM_CREDIT_OCCUPANCY": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO for HOM messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - AD HOM", "EvSel": 34, "ExtSel": "", "MaxIncCyc": 28, "SubCtr": 1, }, "QPI_LL.TxR_AD_HOM_CREDIT_OCCUPANCY.VN1": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO for HOM messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - AD HOM", "EvSel": 34, "ExtSel": "", "MaxIncCyc": 28, "SubCtr": 1, "Umask": "b00000010", }, "QPI_LL.TxR_AD_HOM_CREDIT_OCCUPANCY.VN0": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO for HOM messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - AD HOM", "EvSel": 34, "ExtSel": "", "MaxIncCyc": 28, "SubCtr": 1, "Umask": "b00000001", }, "QPI_LL.TxR_AD_NDR_CREDIT_ACQUIRED": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for NDR messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - AD NDR", "EvSel": 40, "ExtSel": "", }, "QPI_LL.TxR_AD_NDR_CREDIT_ACQUIRED.VN0": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for NDR messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - AD NDR", "EvSel": 40, "ExtSel": "", "Umask": "b00000001", }, "QPI_LL.TxR_AD_NDR_CREDIT_ACQUIRED.VN1": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for NDR messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - AD NDR", "EvSel": 40, "ExtSel": "", "Umask": "b00000010", }, "QPI_LL.TxR_AD_NDR_CREDIT_OCCUPANCY": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO for NDR messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - AD NDR", "EvSel": 36, "ExtSel": "", "MaxIncCyc": 8, "SubCtr": 1, }, "QPI_LL.TxR_AD_NDR_CREDIT_OCCUPANCY.VN0": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO for NDR messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - AD NDR", "EvSel": 36, "ExtSel": "", "MaxIncCyc": 8, "SubCtr": 1, "Umask": "b00000001", }, "QPI_LL.TxR_AD_NDR_CREDIT_OCCUPANCY.VN1": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO for NDR messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - AD NDR", "EvSel": 36, "ExtSel": "", "MaxIncCyc": 8, "SubCtr": 1, "Umask": "b00000010", }, "QPI_LL.TxR_AD_SNP_CREDIT_ACQUIRED": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for Snoop messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - SNP", "EvSel": 39, "ExtSel": "", }, "QPI_LL.TxR_AD_SNP_CREDIT_ACQUIRED.VN1": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for Snoop messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - SNP", "EvSel": 39, "ExtSel": "", "Umask": "b00000010", }, "QPI_LL.TxR_AD_SNP_CREDIT_ACQUIRED.VN0": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for Snoop messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - SNP", "EvSel": 39, "ExtSel": "", "Umask": "b00000001", }, "QPI_LL.TxR_AD_SNP_CREDIT_OCCUPANCY": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO fro Snoop messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - AD SNP", "EvSel": 35, "ExtSel": "", "MaxIncCyc": 28, "SubCtr": 1, }, "QPI_LL.TxR_AD_SNP_CREDIT_OCCUPANCY.VN1": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO fro Snoop messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - AD SNP", "EvSel": 35, "ExtSel": "", "MaxIncCyc": 28, "SubCtr": 1, "Umask": "b00000010", }, "QPI_LL.TxR_AD_SNP_CREDIT_OCCUPANCY.VN0": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO fro Snoop messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - AD SNP", "EvSel": 35, "ExtSel": "", "MaxIncCyc": 28, "SubCtr": 1, "Umask": "b00000001", }, "QPI_LL.TxR_AK_NDR_CREDIT_ACQUIRED": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. Local NDR message class to AK Egress.", "Desc": "R3QPI Egress Credit Occupancy - AK NDR", "EvSel": 41, "ExtSel": "", }, "QPI_LL.TxR_AK_NDR_CREDIT_OCCUPANCY": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. Local NDR message class to AK Egress.", "Desc": "R3QPI Egress Credit Occupancy - AK NDR", "EvSel": 37, "ExtSel": "", "MaxIncCyc": 6, "SubCtr": 1, }, "QPI_LL.TxR_BL_DRS_CREDIT_ACQUIRED": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. DRS message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - DRS", "EvSel": 42, "ExtSel": "", }, "QPI_LL.TxR_BL_DRS_CREDIT_ACQUIRED.VN1": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. DRS message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - DRS", "EvSel": 42, "ExtSel": "", "Umask": "b00000010", }, "QPI_LL.TxR_BL_DRS_CREDIT_ACQUIRED.VN0": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. DRS message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - DRS", "EvSel": 42, "ExtSel": "", "Umask": "b00000001", }, "QPI_LL.TxR_BL_DRS_CREDIT_ACQUIRED.VN_SHR": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. DRS message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - DRS", "EvSel": 42, "ExtSel": "", "Umask": "b00000100", }, "QPI_LL.TxR_BL_DRS_CREDIT_OCCUPANCY": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. DRS message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - BL DRS", "EvSel": 31, "ExtSel": "", "MaxIncCyc": 8, "SubCtr": 1, }, "QPI_LL.TxR_BL_DRS_CREDIT_OCCUPANCY.VN1": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. DRS message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - BL DRS", "EvSel": 31, "ExtSel": "", "MaxIncCyc": 8, "SubCtr": 1, "Umask": "b00000010", }, "QPI_LL.TxR_BL_DRS_CREDIT_OCCUPANCY.VN0": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. DRS message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - BL DRS", "EvSel": 31, "ExtSel": "", "MaxIncCyc": 8, "SubCtr": 1, "Umask": "b00000001", }, "QPI_LL.TxR_BL_DRS_CREDIT_OCCUPANCY.VN_SHR": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. DRS message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - BL DRS", "EvSel": 31, "ExtSel": "", "MaxIncCyc": 8, "SubCtr": 1, "Umask": "b00000100", }, "QPI_LL.TxR_BL_NCB_CREDIT_ACQUIRED": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. NCB message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - NCB", "EvSel": 43, "ExtSel": "", }, "QPI_LL.TxR_BL_NCB_CREDIT_ACQUIRED.VN0": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. NCB message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - NCB", "EvSel": 43, "ExtSel": "", "Umask": "b00000001", }, "QPI_LL.TxR_BL_NCB_CREDIT_ACQUIRED.VN1": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. NCB message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - NCB", "EvSel": 43, "ExtSel": "", "Umask": "b00000010", }, "QPI_LL.TxR_BL_NCB_CREDIT_OCCUPANCY": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. NCB message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - BL NCB", "EvSel": 32, "ExtSel": "", "MaxIncCyc": 2, "SubCtr": 1, }, "QPI_LL.TxR_BL_NCB_CREDIT_OCCUPANCY.VN0": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. NCB message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - BL NCB", "EvSel": 32, "ExtSel": "", "MaxIncCyc": 2, "SubCtr": 1, "Umask": "b00000001", }, "QPI_LL.TxR_BL_NCB_CREDIT_OCCUPANCY.VN1": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. NCB message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - BL NCB", "EvSel": 32, "ExtSel": "", "MaxIncCyc": 2, "SubCtr": 1, "Umask": "b00000010", }, "QPI_LL.TxR_BL_NCS_CREDIT_ACQUIRED": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. NCS message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - NCS", "EvSel": 44, "ExtSel": "", }, "QPI_LL.TxR_BL_NCS_CREDIT_ACQUIRED.VN1": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. NCS message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - NCS", "EvSel": 44, "ExtSel": "", "Umask": "b00000010", }, "QPI_LL.TxR_BL_NCS_CREDIT_ACQUIRED.VN0": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. NCS message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - NCS", "EvSel": 44, "ExtSel": "", "Umask": "b00000001", }, "QPI_LL.TxR_BL_NCS_CREDIT_OCCUPANCY": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. NCS message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - BL NCS", "EvSel": 33, "ExtSel": "", "MaxIncCyc": 2, "SubCtr": 1, }, "QPI_LL.TxR_BL_NCS_CREDIT_OCCUPANCY.VN0": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. NCS message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - BL NCS", "EvSel": 33, "ExtSel": "", "MaxIncCyc": 2, "SubCtr": 1, "Umask": "b00000001", }, "QPI_LL.TxR_BL_NCS_CREDIT_OCCUPANCY.VN1": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. NCS message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - BL NCS", "EvSel": 33, "ExtSel": "", "MaxIncCyc": 2, "SubCtr": 1, "Umask": "b00000010", }, "QPI_LL.VNA_CREDIT_RETURNS": { "Box": "QPI_LL", "Category": "QPI_LL VNA_CREDIT_RETURN Events", "Counters": "0-3", "Defn": "Number of VNA credits returned.", "Desc": "VNA Credits Returned", "EvSel": 28, "ExtSel": "", }, "QPI_LL.VNA_CREDIT_RETURN_OCCUPANCY": { "Box": "QPI_LL", "Category": "QPI_LL VNA_CREDIT_RETURN Events", "Counters": "0-3", "Defn": "Number of VNA credits in the Rx side that are waitng to be returned back across the link.", "Desc": "VNA Credits Pending Return - Occupancy", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, }, # PCU: "PCU.CLOCKTICKS": { "Box": "PCU", "Category": "PCU PCLK Events", "Counters": "0-3", "Defn": "The PCU runs off a fixed 800 MHz clock. This event counts the number of pclk cycles measured while the counter was enabled. The pclk, like the Memory Controller's dclk, counts at a constant rate making it a good measure of actual wall time.", "Desc": "pclk Cycles", "EvSel": 0, "ExtSel": "", }, "PCU.CORE0_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 96, "ExtSel": "", "Notes": "This only tracks the hardware portion in the RCFSM (CFCFSM). This portion is just doing the core C state transition. It does not include any necessary frequency/voltage transitions.", }, "PCU.CORE10_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 106, "ExtSel": "", }, "PCU.CORE11_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 107, "ExtSel": "", }, "PCU.CORE12_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 108, "ExtSel": "", }, "PCU.CORE13_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 109, "ExtSel": "", }, "PCU.CORE14_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 110, "ExtSel": "", }, "PCU.CORE15_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 111, "ExtSel": "", }, "PCU.CORE16_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 112, "ExtSel": "", }, "PCU.CORE17_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 113, "ExtSel": "", }, "PCU.CORE1_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 97, "ExtSel": "", }, "PCU.CORE2_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 98, "ExtSel": "", }, "PCU.CORE3_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 99, "ExtSel": "", }, "PCU.CORE4_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 100, "ExtSel": "", }, "PCU.CORE5_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 101, "ExtSel": "", }, "PCU.CORE6_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 102, "ExtSel": "", }, "PCU.CORE7_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 103, "ExtSel": "", }, "PCU.CORE8_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 104, "ExtSel": "", "Notes": "This only tracks the hardware portion in the RCFSM (CFCFSM). This portion is just doing the core C state transition. It does not include any necessary frequency/voltage transitions.", }, "PCU.CORE9_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 105, "ExtSel": "", }, "PCU.DEMOTIONS_CORE0": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 48, "ExtSel": "", }, "PCU.DEMOTIONS_CORE1": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 49, "ExtSel": "", }, "PCU.DEMOTIONS_CORE10": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 58, "ExtSel": "", }, "PCU.DEMOTIONS_CORE11": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 59, "ExtSel": "", }, "PCU.DEMOTIONS_CORE12": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 60, "ExtSel": "", }, "PCU.DEMOTIONS_CORE13": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 61, "ExtSel": "", }, "PCU.DEMOTIONS_CORE14": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 62, "ExtSel": "", }, "PCU.DEMOTIONS_CORE15": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 63, "ExtSel": "", }, "PCU.DEMOTIONS_CORE16": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 64, "ExtSel": "", }, "PCU.DEMOTIONS_CORE17": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 65, "ExtSel": "", }, "PCU.DEMOTIONS_CORE2": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 50, "ExtSel": "", }, "PCU.DEMOTIONS_CORE3": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 51, "ExtSel": "", }, "PCU.DEMOTIONS_CORE4": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 52, "ExtSel": "", }, "PCU.DEMOTIONS_CORE5": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 53, "ExtSel": "", }, "PCU.DEMOTIONS_CORE6": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 54, "ExtSel": "", }, "PCU.DEMOTIONS_CORE7": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 55, "ExtSel": "", }, "PCU.DEMOTIONS_CORE8": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 56, "ExtSel": "", }, "PCU.DEMOTIONS_CORE9": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 57, "ExtSel": "", }, "PCU.FREQ_BAND0_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_RESIDENCY Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.", "Desc": "Frequency Residency", "EvSel": 11, "Filter": "PCUFilter[7:0]", "ExtSel": "", "Notes": "The PMON control registers in the PCU only update on a frequency transition. Changing the measuring threshold during a sample interval may introduce errors in the counts. This is especially true when running at a constant frequency for an extended period of time. There is a corner case here: we set this code on the GV transition. So, if we never GV we will never call this code. This event does not include transition times. It is handled on fast path.", }, "PCU.FREQ_BAND1_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_RESIDENCY Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.", "Desc": "Frequency Residency", "EvSel": 12, "Filter": "PCUFilter[15:8]", "ExtSel": "", "Notes": "The PMON control registers in the PCU only update on a frequency transition. Changing the measuring threshold during a sample interval may introduce errors in the counts. This is especially true when running at a constant frequency for an extended period of time. There is a corner case here: we set this code on the GV transition. So, if we never GV we will never call this code. This event does not include transition times. It is handled on fast path.", }, "PCU.FREQ_BAND2_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_RESIDENCY Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.", "Desc": "Frequency Residency", "EvSel": 13, "Filter": "PCUFilter[23:16]", "ExtSel": "", "Notes": "The PMON control registers in the PCU only update on a frequency transition. Changing the measuring threshold during a sample interval may introduce errors in the counts. This is especially true when running at a constant frequency for an extended period of time. There is a corner case here: we set this code on the GV transition. So, if we never GV we will never call this code. This event does not include transition times. It is handled on fast path.", }, "PCU.FREQ_BAND3_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_RESIDENCY Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.", "Desc": "Frequency Residency", "EvSel": 14, "Filter": "PCUFilter[31:24]", "ExtSel": "", "Notes": "The PMON control registers in the PCU only update on a frequency transition. Changing the measuring threshold during a sample interval may introduce errors in the counts. This is especially true when running at a constant frequency for an extended period of time. There is a corner case here: we set this code on the GV transition. So, if we never GV we will never call this code. This event does not include transition times. It is handled on fast path.", }, "PCU.FREQ_MAX_LIMIT_THERMAL_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_MAX_LIMIT Events", "Counters": "0-3", "Defn": "Counts the number of cycles when thermal conditions are the upper limit on frequency. This is related to the THERMAL_THROTTLE CYCLES_ABOVE_TEMP event, which always counts cycles when we are above the thermal temperature. This event (STRONGEST_UPPER_LIMIT) is sampled at the output of the algorithm that determines the actual frequency, while THERMAL_THROTTLE looks at the input.", "Desc": "Thermal Strongest Upper Limit Cycles", "EvSel": 4, "ExtSel": "", }, "PCU.FREQ_MAX_OS_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_MAX_LIMIT Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the OS is the upper limit on frequency.", "Desc": "OS Strongest Upper Limit Cycles", "EvSel": 6, "ExtSel": "", "Notes": "Essentially, this event says the OS is getting the frequency it requested.", }, "PCU.FREQ_MAX_POWER_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_MAX_LIMIT Events", "Counters": "0-3", "Defn": "Counts the number of cycles when power is the upper limit on frequency.", "Desc": "Power Strongest Upper Limit Cycles", "EvSel": 5, "ExtSel": "", }, "PCU.FREQ_MIN_IO_P_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_MIN_LIMIT Events", "Counters": "0-3", "Defn": "Counts the number of cycles when IO P Limit is preventing us from dropping the frequency lower. This algorithm monitors the needs to the IO subsystem on both local and remote sockets and will maintain a frequency high enough to maintain good IO BW. This is necessary for when all the IA cores on a socket are idle but a user still would like to maintain high IO Bandwidth.", "Desc": "IO P Limit Strongest Lower Limit Cycles", "EvSel": 115, "ExtSel": "", }, "PCU.FREQ_TRANS_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_TRANS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the system is changing frequency. This can not be filtered by thread ID. One can also use it with the occupancy counter that monitors number of threads in C0 to estimate the performance impact that frequency transitions had on the system.", "Desc": "Cycles spent changing Frequency", "EvSel": 116, "ExtSel": "", }, "PCU.MEMORY_PHASE_SHEDDING_CYCLES": { "Box": "PCU", "Category": "PCU MEMORY_PHASE_SHEDDING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the PCU has triggered memory phase shedding. This is a mode that can be run in the iMC physicals that saves power at the expense of additional latency.", "Desc": "Memory Phase Shedding Cycles", "EvSel": 47, "ExtSel": "", "Notes": "Package C1", }, "PCU.PKG_RESIDENCY_C0_CYCLES": { "Box": "PCU", "Category": "PCU PKG_C_STATE_RESIDENCY Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the package was in C0. This event can be used in conjunction with edge detect to count C0 entrances (or exits using invert). Residency events do not include transition times.", "Desc": "Package C State Residency - C0", "EvSel": 42, "ExtSel": "", }, "PCU.PKG_RESIDENCY_C1E_CYCLES": { "Box": "PCU", "Category": "PCU PKG_C_STATE_RESIDENCY Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the package was in C1E. This event can be used in conjunction with edge detect to count C1E entrances (or exits using invert). Residency events do not include transition times.", "Desc": "Package C State Residency - C1E", "EvSel": 78, "ExtSel": "", }, "PCU.PKG_RESIDENCY_C2E_CYCLES": { "Box": "PCU", "Category": "PCU PKG_C_STATE_RESIDENCY Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the package was in C2E. This event can be used in conjunction with edge detect to count C2E entrances (or exits using invert). Residency events do not include transition times.", "Desc": "Package C State Residency - C2E", "EvSel": 43, "ExtSel": "", }, "PCU.PKG_RESIDENCY_C3_CYCLES": { "Box": "PCU", "Category": "PCU PKG_C_STATE_RESIDENCY Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the package was in C3. This event can be used in conjunction with edge detect to count C3 entrances (or exits using invert). Residency events do not include transition times.", "Desc": "Package C State Residency - C3", "EvSel": 44, "ExtSel": "", }, "PCU.PKG_RESIDENCY_C6_CYCLES": { "Box": "PCU", "Category": "PCU PKG_C_STATE_RESIDENCY Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the package was in C6. This event can be used in conjunction with edge detect to count C6 entrances (or exits using invert). Residency events do not include transition times.", "Desc": "Package C State Residency - C6", "EvSel": 45, "ExtSel": "", }, "PCU.PKG_RESIDENCY_C7_CYCLES": { "Box": "PCU", "Category": "PCU PKG_C_STATE_RESIDENCY Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the package was in C7. This event can be used in conjunction with edge detect to count C7 entrances (or exits using invert). Residency events do not include transition times.", "Desc": "Package C7 State Residency", "EvSel": 46, "ExtSel": "", }, "PCU.POWER_STATE_OCCUPANCY": { "Box": "PCU", "Category": "PCU POWER_STATE_OCC Events", "Counters": "0-3", "Defn": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", "Desc": "Number of cores in C-State", "EvSel": 128, "ExtSel": "", "MaxIncCyc": 8, "SubCtr": 1, }, "PCU.POWER_STATE_OCCUPANCY.CORES_C6": { "Box": "PCU", "Category": "PCU POWER_STATE_OCC Events", "Counters": "0-3", "Defn": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", "Desc": "Number of cores in C-State", "EvSel": 128, "ExtSel": "", "MaxIncCyc": 8, "SubCtr": 1, "Umask": "b11000000", }, "PCU.POWER_STATE_OCCUPANCY.CORES_C3": { "Box": "PCU", "Category": "PCU POWER_STATE_OCC Events", "Counters": "0-3", "Defn": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", "Desc": "Number of cores in C-State", "EvSel": 128, "ExtSel": "", "MaxIncCyc": 8, "SubCtr": 1, "Umask": "b10000000", }, "PCU.POWER_STATE_OCCUPANCY.CORES_C0": { "Box": "PCU", "Category": "PCU POWER_STATE_OCC Events", "Counters": "0-3", "Defn": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", "Desc": "Number of cores in C-State", "EvSel": 128, "ExtSel": "", "MaxIncCyc": 8, "SubCtr": 1, "Umask": "b01000000", }, "PCU.PROCHOT_EXTERNAL_CYCLES": { "Box": "PCU", "Category": "PCU PROCHOT Events", "Counters": "0-3", "Defn": "Counts the number of cycles that we are in external PROCHOT mode. This mode is triggered when a sensor off the die determines that something off-die (like DRAM) is too hot and must throttle to avoid damaging the chip.", "Desc": "External Prochot", "EvSel": 10, "ExtSel": "", }, "PCU.PROCHOT_INTERNAL_CYCLES": { "Box": "PCU", "Category": "PCU PROCHOT Events", "Counters": "0-3", "Defn": "Counts the number of cycles that we are in Interal PROCHOT mode. This mode is triggered when a sensor on the die determines that we are too hot and must throttle to avoid damaging the chip.", "Desc": "Internal Prochot", "EvSel": 9, "ExtSel": "", }, "PCU.TOTAL_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions across all cores.", "Desc": "Total Core C State Transition Cycles", "EvSel": 114, "ExtSel": "", }, "PCU.VR_HOT_CYCLES": { "Box": "PCU", "Category": "PCU VR_HOT Events", "Counters": "0-3", "Desc": "VR Hot", "EvSel": 66, "ExtSel": "", }, # R2PCIe: "R2PCIe.CLOCKTICKS": { "Box": "R2PCIe", "Category": "R2PCIe UCLK Events", "Counters": "0-3", "Defn": "Counts the number of uclks in the R2PCIe uclk domain. This could be slightly different than the count in the Ubox because of enable/freeze delays. However, because the R2PCIe is close to the Ubox, they generally should not diverge by more than a handful of cycles.", "Desc": "Number of uclks in domain", "EvSel": 1, "ExtSel": "", }, "R2PCIe.IIO_CREDIT": { "Box": "R2PCIe", "Category": "R2PCIe IIO Credit Events", "Counters": "0-1", "EvSel": 45, "ExtSel": "", "MaxIncCyc": 4, }, "R2PCIe.IIO_CREDIT.PRQ_QPI1": { "Box": "R2PCIe", "Category": "R2PCIe IIO Credit Events", "Counters": "0-1", "EvSel": 45, "ExtSel": "", "MaxIncCyc": 4, "Umask": "bxxxxxx1x", }, "R2PCIe.IIO_CREDIT.ISOCH_QPI0": { "Box": "R2PCIe", "Category": "R2PCIe IIO Credit Events", "Counters": "0-1", "EvSel": 45, "ExtSel": "", "MaxIncCyc": 4, "Umask": "bxxxxx1xx", }, "R2PCIe.IIO_CREDIT.ISOCH_QPI1": { "Box": "R2PCIe", "Category": "R2PCIe IIO Credit Events", "Counters": "0-1", "EvSel": 45, "ExtSel": "", "MaxIncCyc": 4, "Umask": "bxxxx1xxx", }, "R2PCIe.IIO_CREDIT.PRQ_QPI0": { "Box": "R2PCIe", "Category": "R2PCIe IIO Credit Events", "Counters": "0-1", "EvSel": 45, "ExtSel": "", "MaxIncCyc": 4, "Umask": "bxxxxxxx1", }, "R2PCIe.RING_AD_USED": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", }, "R2PCIe.RING_AD_USED.CCW": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00001100", }, "R2PCIe.RING_AD_USED.CCW_ODD": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxx1xxx", }, "R2PCIe.RING_AD_USED.CCW_EVEN": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxx1xx", }, "R2PCIe.RING_AD_USED.CW_ODD": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxx1x", }, "R2PCIe.RING_AD_USED.CW_EVEN": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxxx1", }, "R2PCIe.RING_AD_USED.CW": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00000011", }, "R2PCIe.RING_AK_BOUNCES": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of times when a request destined for the AK ingress bounced.", "Desc": "AK Ingress Bounced", "EvSel": 18, "ExtSel": "", }, "R2PCIe.RING_AK_BOUNCES.DN": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of times when a request destined for the AK ingress bounced.", "Desc": "AK Ingress Bounced", "EvSel": 18, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R2PCIe.RING_AK_BOUNCES.UP": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of times when a request destined for the AK ingress bounced.", "Desc": "AK Ingress Bounced", "EvSel": 18, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R2PCIe.RING_AK_USED": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", }, "R2PCIe.RING_AK_USED.CCW_ODD": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxx1xxx", }, "R2PCIe.RING_AK_USED.CCW": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00001100", }, "R2PCIe.RING_AK_USED.CW_EVEN": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxxx1", }, "R2PCIe.RING_AK_USED.CW": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00000011", }, "R2PCIe.RING_AK_USED.CW_ODD": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxx1x", }, "R2PCIe.RING_AK_USED.CCW_EVEN": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxx1xx", }, "R2PCIe.RING_BL_USED": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", }, "R2PCIe.RING_BL_USED.CCW_EVEN": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxx1xx", }, "R2PCIe.RING_BL_USED.CW_EVEN": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxxx1", }, "R2PCIe.RING_BL_USED.CW": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00000011", }, "R2PCIe.RING_BL_USED.CW_ODD": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxx1x", }, "R2PCIe.RING_BL_USED.CCW_ODD": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxx1xxx", }, "R2PCIe.RING_BL_USED.CCW": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00001100", }, "R2PCIe.RING_IV_USED": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.", "Desc": "R2 IV Ring in Use", "EvSel": 10, "ExtSel": "", "Notes": "IV messages are split into two parts. In any cycle, a ring stop can see up to one (half-)packet moving in the CW direction and one (half-)packet moving in the CCW direction.", }, "R2PCIe.RING_IV_USED.CCW": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.", "Desc": "R2 IV Ring in Use", "EvSel": 10, "ExtSel": "", "Notes": "IV messages are split into two parts. In any cycle, a ring stop can see up to one (half-)packet moving in the CW direction and one (half-)packet moving in the CCW direction.", "Umask": "b00001100", }, "R2PCIe.RING_IV_USED.ANY": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.", "Desc": "R2 IV Ring in Use", "EvSel": 10, "ExtSel": "", "Notes": "IV messages are split into two parts. In any cycle, a ring stop can see up to one (half-)packet moving in the CW direction and one (half-)packet moving in the CCW direction.", "Umask": "b00001111", }, "R2PCIe.RING_IV_USED.CW": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.", "Desc": "R2 IV Ring in Use", "EvSel": 10, "ExtSel": "", "Notes": "IV messages are split into two parts. In any cycle, a ring stop can see up to one (half-)packet moving in the CW direction and one (half-)packet moving in the CCW direction.", "Umask": "b00000011", }, "R2PCIe.RxR_CYCLES_NE": { "Box": "R2PCIe", "Category": "R2PCIe INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the R2PCIe Ingress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Cycles Not Empty", "EvSel": 16, "ExtSel": "", }, "R2PCIe.RxR_CYCLES_NE.NCB": { "Box": "R2PCIe", "Category": "R2PCIe INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the R2PCIe Ingress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Cycles Not Empty", "EvSel": 16, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R2PCIe.RxR_CYCLES_NE.NCS": { "Box": "R2PCIe", "Category": "R2PCIe INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the R2PCIe Ingress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Cycles Not Empty", "EvSel": 16, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R2PCIe.RxR_INSERTS": { "Box": "R2PCIe", "Category": "R2PCIe INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the R2PCIe Ingress. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Allocations", "EvSel": 17, "ExtSel": "", }, "R2PCIe.RxR_INSERTS.NCS": { "Box": "R2PCIe", "Category": "R2PCIe INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the R2PCIe Ingress. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Allocations", "EvSel": 17, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R2PCIe.RxR_INSERTS.NCB": { "Box": "R2PCIe", "Category": "R2PCIe INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the R2PCIe Ingress. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Allocations", "EvSel": 17, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R2PCIe.RxR_OCCUPANCY": { "Box": "R2PCIe", "Category": "R2PCIe INGRESS Events", "Counters": 0, "Defn": "Accumulates the occupancy of a given R2PCIe Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the R2PCIe Ingress Not Empty event to calculate average occupancy or the R2PCIe Ingress Allocations event in order to calculate average queuing latency.", "Desc": "Ingress Occupancy Accumulator", "EvSel": 19, "ExtSel": "", "MaxIncCyc": 24, "SubCtr": 1, }, "R2PCIe.RxR_OCCUPANCY.DRS": { "Box": "R2PCIe", "Category": "R2PCIe INGRESS Events", "Counters": 0, "Defn": "Accumulates the occupancy of a given R2PCIe Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the R2PCIe Ingress Not Empty event to calculate average occupancy or the R2PCIe Ingress Allocations event in order to calculate average queuing latency.", "Desc": "Ingress Occupancy Accumulator", "EvSel": 19, "ExtSel": "", "MaxIncCyc": 24, "SubCtr": 1, "Umask": "b00001000", }, "R2PCIe.SBO0_CREDITS_ACQUIRED": { "Box": "R2PCIe", "Category": "R2PCIe SBO Credit Events", "Counters": "0-1", "Defn": "Number of Sbo 0 credits acquired in a given cycle, per ring.", "Desc": "SBo0 Credits Acquired", "EvSel": 40, "ExtSel": "", "MaxIncCyc": 2, }, "R2PCIe.SBO0_CREDITS_ACQUIRED.BL": { "Box": "R2PCIe", "Category": "R2PCIe SBO Credit Events", "Counters": "0-1", "Defn": "Number of Sbo 0 credits acquired in a given cycle, per ring.", "Desc": "SBo0 Credits Acquired", "EvSel": 40, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxxxx1x", }, "R2PCIe.SBO0_CREDITS_ACQUIRED.AD": { "Box": "R2PCIe", "Category": "R2PCIe SBO Credit Events", "Counters": "0-1", "Defn": "Number of Sbo 0 credits acquired in a given cycle, per ring.", "Desc": "SBo0 Credits Acquired", "EvSel": 40, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxxxxx1", }, "R2PCIe.STALL_NO_SBO_CREDIT": { "Box": "R2PCIe", "Category": "R2PCIe SBO Credit Events", "Counters": "0-1", "Defn": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.", "Desc": "Stall on No Sbo Credits", "EvSel": 44, "ExtSel": "", "MaxIncCyc": 4, }, "R2PCIe.STALL_NO_SBO_CREDIT.SBO0_BL": { "Box": "R2PCIe", "Category": "R2PCIe SBO Credit Events", "Counters": "0-1", "Defn": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.", "Desc": "Stall on No Sbo Credits", "EvSel": 44, "ExtSel": "", "MaxIncCyc": 4, "Umask": "bxxxxx1xx", }, "R2PCIe.STALL_NO_SBO_CREDIT.SBO1_BL": { "Box": "R2PCIe", "Category": "R2PCIe SBO Credit Events", "Counters": "0-1", "Defn": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.", "Desc": "Stall on No Sbo Credits", "EvSel": 44, "ExtSel": "", "MaxIncCyc": 4, "Umask": "bxxxx1xxx", }, "R2PCIe.STALL_NO_SBO_CREDIT.SBO1_AD": { "Box": "R2PCIe", "Category": "R2PCIe SBO Credit Events", "Counters": "0-1", "Defn": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.", "Desc": "Stall on No Sbo Credits", "EvSel": 44, "ExtSel": "", "MaxIncCyc": 4, "Umask": "bxxxxxx1x", }, "R2PCIe.STALL_NO_SBO_CREDIT.SBO0_AD": { "Box": "R2PCIe", "Category": "R2PCIe SBO Credit Events", "Counters": "0-1", "Defn": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.", "Desc": "Stall on No Sbo Credits", "EvSel": 44, "ExtSel": "", "MaxIncCyc": 4, "Umask": "bxxxxxxx1", }, "R2PCIe.TxR_CYCLES_FULL": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": 0, "Defn": "Counts the number of cycles when the R2PCIe Egress buffer is full.", "Desc": "Egress Cycles Full", "EvSel": 37, "ExtSel": "", }, "R2PCIe.TxR_CYCLES_FULL.AK": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": 0, "Defn": "Counts the number of cycles when the R2PCIe Egress buffer is full.", "Desc": "Egress Cycles Full", "EvSel": 37, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R2PCIe.TxR_CYCLES_FULL.BL": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": 0, "Defn": "Counts the number of cycles when the R2PCIe Egress buffer is full.", "Desc": "Egress Cycles Full", "EvSel": 37, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R2PCIe.TxR_CYCLES_FULL.AD": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": 0, "Defn": "Counts the number of cycles when the R2PCIe Egress buffer is full.", "Desc": "Egress Cycles Full", "EvSel": 37, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R2PCIe.TxR_CYCLES_NE": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": 0, "Defn": "Counts the number of cycles when the R2PCIe Egress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "Egress Cycles Not Empty", "EvSel": 35, "ExtSel": "", }, "R2PCIe.TxR_CYCLES_NE.BL": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": 0, "Defn": "Counts the number of cycles when the R2PCIe Egress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "Egress Cycles Not Empty", "EvSel": 35, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R2PCIe.TxR_CYCLES_NE.AK": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": 0, "Defn": "Counts the number of cycles when the R2PCIe Egress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "Egress Cycles Not Empty", "EvSel": 35, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R2PCIe.TxR_CYCLES_NE.AD": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": 0, "Defn": "Counts the number of cycles when the R2PCIe Egress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "Egress Cycles Not Empty", "EvSel": 35, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R2PCIe.TxR_NACK_CW": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": "0-1", "Desc": "Egress CCW NACK", "EvSel": 38, "ExtSel": "", }, "R2PCIe.TxR_NACK_CW.UP_BL": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": "0-1", "Desc": "Egress CCW NACK", "EvSel": 38, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R2PCIe.TxR_NACK_CW.DN_AK": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": "0-1", "Desc": "Egress CCW NACK", "EvSel": 38, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R2PCIe.TxR_NACK_CW.DN_AD": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": "0-1", "Desc": "Egress CCW NACK", "EvSel": 38, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R2PCIe.TxR_NACK_CW.UP_AK": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": "0-1", "Desc": "Egress CCW NACK", "EvSel": 38, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R2PCIe.TxR_NACK_CW.DN_BL": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": "0-1", "Desc": "Egress CCW NACK", "EvSel": 38, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R2PCIe.TxR_NACK_CW.UP_AD": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": "0-1", "Desc": "Egress CCW NACK", "EvSel": 38, "ExtSel": "", "Umask": "bxxxx1xxx", }, # IRP: "IRP.CACHE_TOTAL_OCCUPANCY": { "Box": "IRP", "Category": "IRP WRITE_CACHE Events", "Counters": "0-1", "Defn": "Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.", "Desc": "Total Write Cache Occupancy", "EvSel": 18, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, }, "IRP.CACHE_TOTAL_OCCUPANCY.ANY": { "Box": "IRP", "Category": "IRP WRITE_CACHE Events", "Counters": "0-1", "Defn": "Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.", "Desc": "Total Write Cache Occupancy", "EvSel": 18, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "b00000001", }, "IRP.CACHE_TOTAL_OCCUPANCY.SOURCE": { "Box": "IRP", "Category": "IRP WRITE_CACHE Events", "Counters": "0-1", "Defn": "Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.", "Desc": "Total Write Cache Occupancy", "EvSel": 18, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "b00000010", }, "IRP.CLOCKTICKS": { "Box": "IRP", "Category": "IRP IO_CLKS Events", "Counters": "0-1", "Defn": "Number of clocks in the IRP.", "Desc": "Clocks in the IRP", "EvSel": 0, "ExtSel": "", }, "IRP.COHERENT_OPS": { "Box": "IRP", "Category": "IRP Coherency Events", "Counters": "0-1", "Defn": "Counts the number of coherency related operations servied by the IRP", "Desc": "Coherent Ops", "EvSel": 19, "ExtSel": "", }, "IRP.COHERENT_OPS.PCIDCAHINT": { "Box": "IRP", "Category": "IRP Coherency Events", "Counters": "0-1", "Defn": "Counts the number of coherency related operations servied by the IRP", "Desc": "Coherent Ops", "EvSel": 19, "ExtSel": "", "Umask": "bxx1xxxxx", }, "IRP.COHERENT_OPS.DRD": { "Box": "IRP", "Category": "IRP Coherency Events", "Counters": "0-1", "Defn": "Counts the number of coherency related operations servied by the IRP", "Desc": "Coherent Ops", "EvSel": 19, "ExtSel": "", "Umask": "bxxxxx1xx", }, "IRP.COHERENT_OPS.CRD": { "Box": "IRP", "Category": "IRP Coherency Events", "Counters": "0-1", "Defn": "Counts the number of coherency related operations servied by the IRP", "Desc": "Coherent Ops", "EvSel": 19, "ExtSel": "", "Umask": "bxxxxxx1x", }, "IRP.COHERENT_OPS.PCIRDCUR": { "Box": "IRP", "Category": "IRP Coherency Events", "Counters": "0-1", "Defn": "Counts the number of coherency related operations servied by the IRP", "Desc": "Coherent Ops", "EvSel": 19, "ExtSel": "", "Umask": "bxxxxxxx1", }, "IRP.COHERENT_OPS.WBMTOI": { "Box": "IRP", "Category": "IRP Coherency Events", "Counters": "0-1", "Defn": "Counts the number of coherency related operations servied by the IRP", "Desc": "Coherent Ops", "EvSel": 19, "ExtSel": "", "Umask": "bx1xxxxxx", }, "IRP.COHERENT_OPS.CLFLUSH": { "Box": "IRP", "Category": "IRP Coherency Events", "Counters": "0-1", "Defn": "Counts the number of coherency related operations servied by the IRP", "Desc": "Coherent Ops", "EvSel": 19, "ExtSel": "", "Umask": "b1xxxxxxx", }, "IRP.COHERENT_OPS.RFO": { "Box": "IRP", "Category": "IRP Coherency Events", "Counters": "0-1", "Defn": "Counts the number of coherency related operations servied by the IRP", "Desc": "Coherent Ops", "EvSel": 19, "ExtSel": "", "Umask": "bxxxx1xxx", }, "IRP.COHERENT_OPS.PCITOM": { "Box": "IRP", "Category": "IRP Coherency Events", "Counters": "0-1", "Defn": "Counts the number of coherency related operations servied by the IRP", "Desc": "Coherent Ops", "EvSel": 19, "ExtSel": "", "Umask": "bxxx1xxxx", }, "IRP.MISC0": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 0", "EvSel": 20, "ExtSel": "", }, "IRP.MISC0.2ND_WR_INSERT": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 0", "EvSel": 20, "ExtSel": "", "Umask": "bx00x1x00", }, "IRP.MISC0.2ND_ATOMIC_INSERT": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 0", "EvSel": 20, "ExtSel": "", "Umask": "bx001xx00", }, "IRP.MISC0.2ND_RD_INSERT": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 0", "EvSel": 20, "ExtSel": "", "Umask": "bx00xx100", }, "IRP.MISC0.FAST_REQ": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 0", "EvSel": 20, "ExtSel": "", "Umask": "b000000x1", }, "IRP.MISC0.PF_TIMEOUT": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 0", "EvSel": 20, "ExtSel": "", "Umask": "b1xx00000", }, "IRP.MISC0.FAST_REJ": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 0", "EvSel": 20, "ExtSel": "", "Umask": "b0000001x", }, "IRP.MISC0.PF_ACK_HINT": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 0", "EvSel": 20, "ExtSel": "", "Umask": "bx1x00000", }, "IRP.MISC0.FAST_XFER": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 0", "EvSel": 20, "ExtSel": "", "Umask": "bxx100000", }, "IRP.MISC1": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 1", "EvSel": 21, "ExtSel": "", }, "IRP.MISC1.SLOW_E": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 1", "EvSel": 21, "ExtSel": "", "Umask": "b000xx1xx", }, "IRP.MISC1.DATA_THROTTLE": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 1", "EvSel": 21, "ExtSel": "", "Umask": "b1xxx0000", }, "IRP.MISC1.SLOW_I": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 1", "EvSel": 21, "ExtSel": "", "Umask": "b000xxxx1", }, "IRP.MISC1.SEC_RCVD_INVLD": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 1", "EvSel": 21, "ExtSel": "", "Umask": "bxx1x0000", }, "IRP.MISC1.SEC_RCVD_VLD": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 1", "EvSel": 21, "ExtSel": "", "Umask": "bx1xx0000", }, "IRP.MISC1.SLOW_S": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 1", "EvSel": 21, "ExtSel": "", "Umask": "b000xxx1x", }, "IRP.MISC1.LOST_FWD": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 1", "EvSel": 21, "ExtSel": "", "Umask": "b0001xxxx", }, "IRP.MISC1.SLOW_M": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 1", "EvSel": 21, "ExtSel": "", "Umask": "b000x1xxx", }, "IRP.RxR_AK_INSERTS": { "Box": "IRP", "Category": "IRP AK_INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the AK Ingress. This queue is where the IRP receives responses from R2PCIe (the ring).", "Desc": "AK Ingress Occupancy", "EvSel": 10, "ExtSel": "", }, "IRP.RxR_BL_DRS_CYCLES_FULL": { "Box": "IRP", "Category": "IRP BL_INGRESS_DRS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.", "EvSel": 4, "ExtSel": "", }, "IRP.RxR_BL_DRS_INSERTS": { "Box": "IRP", "Category": "IRP BL_INGRESS_DRS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.", "Desc": "BL Ingress Occupancy - DRS", "EvSel": 1, "ExtSel": "", }, "IRP.RxR_BL_DRS_OCCUPANCY": { "Box": "IRP", "Category": "IRP BL_INGRESS_DRS Events", "Counters": "0-1", "Defn": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.", "EvSel": 7, "ExtSel": "", "MaxIncCyc": 24, "SubCtr": 1, }, "IRP.RxR_BL_NCB_CYCLES_FULL": { "Box": "IRP", "Category": "IRP BL_INGRESS_NCB Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.", "EvSel": 5, "ExtSel": "", }, "IRP.RxR_BL_NCB_INSERTS": { "Box": "IRP", "Category": "IRP BL_INGRESS_NCB Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.", "Desc": "BL Ingress Occupancy - NCB", "EvSel": 2, "ExtSel": "", }, "IRP.RxR_BL_NCB_OCCUPANCY": { "Box": "IRP", "Category": "IRP BL_INGRESS_NCB Events", "Counters": "0-1", "Defn": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.", "EvSel": 8, "ExtSel": "", "MaxIncCyc": 24, "SubCtr": 1, }, "IRP.RxR_BL_NCS_CYCLES_FULL": { "Box": "IRP", "Category": "IRP BL_INGRESS_NCS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.", "EvSel": 6, "ExtSel": "", }, "IRP.RxR_BL_NCS_INSERTS": { "Box": "IRP", "Category": "IRP BL_INGRESS_NCS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.", "Desc": "BL Ingress Occupancy - NCS", "EvSel": 3, "ExtSel": "", }, "IRP.RxR_BL_NCS_OCCUPANCY": { "Box": "IRP", "Category": "IRP BL_INGRESS_NCS Events", "Counters": "0-1", "Defn": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.", "EvSel": 9, "ExtSel": "", "MaxIncCyc": 24, "SubCtr": 1, }, "IRP.SNOOP_RESP": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Desc": "Snoop Responses", "EvSel": 23, "ExtSel": "", "Notes": "The first 4 subevent bits are the Responses to the Code/Data/Invalid Snoops represented by the last 3 subevent bits. At least 1 of the bottom 4 bits must be combined with 1 of the top 3 bits to obtain counts. Unsure which combinations are possible.", }, "IRP.SNOOP_RESP.SNPDATA": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Desc": "Snoop Responses", "EvSel": 23, "ExtSel": "", "Notes": "The first 4 subevent bits are the Responses to the Code/Data/Invalid Snoops represented by the last 3 subevent bits. At least 1 of the bottom 4 bits must be combined with 1 of the top 3 bits to obtain counts. Unsure which combinations are possible.", "Umask": "bxx1xxxxx", }, "IRP.SNOOP_RESP.HIT_I": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Desc": "Snoop Responses", "EvSel": 23, "ExtSel": "", "Notes": "The first 4 subevent bits are the Responses to the Code/Data/Invalid Snoops represented by the last 3 subevent bits. At least 1 of the bottom 4 bits must be combined with 1 of the top 3 bits to obtain counts. Unsure which combinations are possible.", "Umask": "bxxxxxx1x", }, "IRP.SNOOP_RESP.HIT_ES": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Desc": "Snoop Responses", "EvSel": 23, "ExtSel": "", "Notes": "The first 4 subevent bits are the Responses to the Code/Data/Invalid Snoops represented by the last 3 subevent bits. At least 1 of the bottom 4 bits must be combined with 1 of the top 3 bits to obtain counts. Unsure which combinations are possible.", "Umask": "bxxxxx1xx", }, "IRP.SNOOP_RESP.HIT_M": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Desc": "Snoop Responses", "EvSel": 23, "ExtSel": "", "Notes": "The first 4 subevent bits are the Responses to the Code/Data/Invalid Snoops represented by the last 3 subevent bits. At least 1 of the bottom 4 bits must be combined with 1 of the top 3 bits to obtain counts. Unsure which combinations are possible.", "Umask": "bxxxx1xxx", }, "IRP.SNOOP_RESP.MISS": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Desc": "Snoop Responses", "EvSel": 23, "ExtSel": "", "Notes": "The first 4 subevent bits are the Responses to the Code/Data/Invalid Snoops represented by the last 3 subevent bits. At least 1 of the bottom 4 bits must be combined with 1 of the top 3 bits to obtain counts. Unsure which combinations are possible.", "Umask": "bxxxxxxx1", }, "IRP.SNOOP_RESP.SNPCODE": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Desc": "Snoop Responses", "EvSel": 23, "ExtSel": "", "Notes": "The first 4 subevent bits are the Responses to the Code/Data/Invalid Snoops represented by the last 3 subevent bits. At least 1 of the bottom 4 bits must be combined with 1 of the top 3 bits to obtain counts. Unsure which combinations are possible.", "Umask": "bxxx1xxxx", }, "IRP.SNOOP_RESP.SNPINV": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Desc": "Snoop Responses", "EvSel": 23, "ExtSel": "", "Notes": "The first 4 subevent bits are the Responses to the Code/Data/Invalid Snoops represented by the last 3 subevent bits. At least 1 of the bottom 4 bits must be combined with 1 of the top 3 bits to obtain counts. Unsure which combinations are possible.", "Umask": "bx1xxxxxx", }, "IRP.TRANSACTIONS": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Defn": "Counts the number of \"Inbound\" transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.", "Desc": "Inbound Transaction Count", "EvSel": 22, "ExtSel": "", "Notes": "Bit 7 is a filter that can be applied to the other subevents. Meaningless by itself.", }, "IRP.TRANSACTIONS.READS": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Defn": "Counts the number of \"Inbound\" transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.", "Desc": "Inbound Transaction Count", "EvSel": 22, "ExtSel": "", "Notes": "Bit 7 is a filter that can be applied to the other subevents. Meaningless by itself.", "Umask": "bxxxxxxx1", }, "IRP.TRANSACTIONS.OTHER": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Defn": "Counts the number of \"Inbound\" transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.", "Desc": "Inbound Transaction Count", "EvSel": 22, "ExtSel": "", "Notes": "Bit 7 is a filter that can be applied to the other subevents. Meaningless by itself.", "Umask": "bxx1xxxxx", }, "IRP.TRANSACTIONS.ORDERINGQ": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Defn": "Counts the number of \"Inbound\" transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.", "Desc": "Inbound Transaction Count", "EvSel": 22, "ExtSel": "", "Notes": "Bit 7 is a filter that can be applied to the other subevents. Meaningless by itself.", "Umask": "bx1xxxxxx", }, "IRP.TRANSACTIONS.WRITES": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Defn": "Counts the number of \"Inbound\" transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.", "Desc": "Inbound Transaction Count", "EvSel": 22, "ExtSel": "", "Notes": "Bit 7 is a filter that can be applied to the other subevents. Meaningless by itself.", "Umask": "bxxxxxx1x", }, "IRP.TRANSACTIONS.ATOMIC": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Defn": "Counts the number of \"Inbound\" transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.", "Desc": "Inbound Transaction Count", "EvSel": 22, "ExtSel": "", "Notes": "Bit 7 is a filter that can be applied to the other subevents. Meaningless by itself.", "Umask": "bxxx1xxxx", }, "IRP.TRANSACTIONS.WR_PREF": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Defn": "Counts the number of \"Inbound\" transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.", "Desc": "Inbound Transaction Count", "EvSel": 22, "ExtSel": "", "Notes": "Bit 7 is a filter that can be applied to the other subevents. Meaningless by itself.", "Umask": "bxxxx1xxx", }, "IRP.TRANSACTIONS.RD_PREF": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Defn": "Counts the number of \"Inbound\" transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.", "Desc": "Inbound Transaction Count", "EvSel": 22, "ExtSel": "", "Notes": "Bit 7 is a filter that can be applied to the other subevents. Meaningless by itself.", "Umask": "bxxxxx1xx", }, "IRP.TxR_AD_STALL_CREDIT_CYCLES": { "Box": "IRP", "Category": "IRP STALL_CYCLES Events", "Counters": "0-1", "Defn": "Counts the number times when it is not possible to issue a request to the R2PCIe because there are no AD Egress Credits available.", "Desc": "No AD Egress Credit Stalls", "EvSel": 24, "ExtSel": "", }, "IRP.TxR_BL_STALL_CREDIT_CYCLES": { "Box": "IRP", "Category": "IRP STALL_CYCLES Events", "Counters": "0-1", "Defn": "Counts the number times when it is not possible to issue data to the R2PCIe because there are no BL Egress Credits available.", "Desc": "No BL Egress Credit Stalls", "EvSel": 25, "ExtSel": "", }, "IRP.TxR_DATA_INSERTS_NCB": { "Box": "IRP", "Category": "IRP OUTBOUND_REQUESTS Events", "Counters": "0-1", "Defn": "Counts the number of requests issued to the switch (towards the devices).", "Desc": "Outbound Read Requests", "EvSel": 14, "ExtSel": "", }, "IRP.TxR_DATA_INSERTS_NCS": { "Box": "IRP", "Category": "IRP OUTBOUND_REQUESTS Events", "Counters": "0-1", "Defn": "Counts the number of requests issued to the switch (towards the devices).", "Desc": "Outbound Read Requests", "EvSel": 15, "ExtSel": "", }, "IRP.TxR_REQUEST_OCCUPANCY": { "Box": "IRP", "Category": "IRP OUTBOUND_REQUESTS Events", "Counters": "0-1", "Defn": "Accumultes the number of outstanding outbound requests from the IRP to the switch (towards the devices). This can be used in conjuection with the allocations event in order to calculate average latency of outbound requests.", "Desc": "Outbound Request Queue Occupancy", "EvSel": 13, "ExtSel": "", "SubCtr": 1, }, } derived = { # QPI_LL: "QPI_LL.DATA_FROM_QPI": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "Data received from QPI in bytes ( = DRS + NCB Data messages received from QPI)", "Desc": "Data From QPI", "Equation": "DRS_DATA_MSGS_FROM_QPI + NCB_DATA_MSGS_FROM_QPI", }, "QPI_LL.DATA_FROM_QPI_TO_HA_OR_IIO": { "Box": "QPI_LL", "Category": "QPI_LL DIRECT2CORE Events", "Defn": "Data received from QPI forwarded to HA or IIO. Expressed in Bytes", "Desc": "Data From QPI To HA or IIO", "Equation": "DATA_FROM_QPI - DATA_FROM_QPI_TO_LLC", }, "QPI_LL.DATA_FROM_QPI_TO_LLC": { "Box": "QPI_LL", "Category": "QPI_LL DIRECT2CORE Events", "Defn": "Data received from QPI forwarded to LLC. Expressed in Bytes", "Desc": "Data From QPI To LLC", "Equation": "DIRECT2CORE.SUCCESS_RBT_HIT * 64", }, "QPI_LL.DATA_FROM_QPI_TO_NODEx": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "Data packets received from QPI sent to Node ID 'x'. Expressed in bytes", "Desc": "Data From QPI To Node x", "Equation": "DRS_DataC_FROM_QPI_TO_NODEx + DRS_WRITE_FROM_QPI_TO_NODEx + NCB_DATA_FROM_QPI_TO_NODEx", }, "QPI_LL.DRS_DATA_MSGS_FROM_QPI": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Defn": "DRS Data Messges From QPI in bytes", "Desc": "DRS Data Messges From QPI", "Equation": "(RxL_FLITS_G1.DRS_DATA * 8)", }, "QPI_LL.DRS_DataC_FROM_QPI_TO_NODEx": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "DRS DataC packets received from QPI sent to Node ID 'x'. Expressed in bytes", "Desc": "DRS DataC From QPI To Node x", "Equation": "(CTO_COUNT with:{Q_Py_PCI_PMON_PKT_MATCH0{[12:0],dnid}={0x1C00,x}, Q_Py_PCI_PMON_PKT_MASK0[17:0]=0x3FF80}) * 64", "Filter": "QPIRxMask0[17:0],QPIRxMatch0[17:0];QPITxMask0[17:0],QPITxMatch0[17:0]", }, "QPI_LL.DRS_DataC_M_FROM_QPI": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "DRS DataC_F packets received from QPI. Expressed in bytes", "Desc": "DRS DataC_Fs From QPI", "Equation": "(CTO_COUNT with:{Q_Py_PCI_PMON_PKT_MATCH0[12:0]=0x1C00, Q_Py_PCI_PMON_PKT_MASK0[12:0]=0x1FE0, Q_Py_PCI_PMON_PKT_MATCH1[19:16]=0x1, Q_Py_PCI_PMON_PKT_MASK1[19:16]=0xF }) * 64", "Filter": "QPIMask0[17:0],QPIMatch0[17:0],QPIMask1[19:16],QPIMatch1[19:16]", }, "QPI_LL.DRS_FULL_CACHELINE_MSGS_FROM_QPI": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "DRS Full Cacheline Data Messges From QPI in bytes", "Desc": "DRS Full Cacheline Data Messges From QPI", "Equation": "(CTO_COUNT with:{Q_Py_PCI_PMON_PKT_MATCH0[12:0]=0x1C00,Q_Py_PCI_PMON_PKT_MASK0[12:0]=0x1F00}) * 64", "Filter": "QPIRxMask0[12:0],QPIRxMatch0[12:0];QPITxMask0[12:0],QPITxMatch0[12:0]", }, "QPI_LL.DRS_F_OR_E_FROM_QPI": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "DRS response in F or E states received from QPI in bytes. To calculate the total data response for each cache line state, it's necessary to add the contribution from three flavors {DataC, DataC_FrcAckCnflt, DataC_Cmp} of data response packets for each cache line state.", "Desc": "DRS Data in F or E From QPI", "Equation": "((CTO_COUNT with:{Q_Py_PCI_PMON_PKT_MATCH0[12:0]=0x1C00, Q_Py_PCI_PMON_PKT_MASK0[12:0]=0x1FE0, Q_Py_PCI_PMON_PKT_MATCH1[19:16]=0x4, Q_Py_PCI_PMON_PKT_MASK1[19:16]=0xF }) + (CTO_COUNT with:{Q_Py_PCI_PMON_PKT_MATCH0[12:0]=0x1C00, Q_Py_PCI_PMON_PKT_MASK0[12:0]=0x1FE0, Q_Py_PCI_PMON_PKT_MATCH1[19:16]=0x1, Q_Py_PCI_PMON_PKT_MASK1[19:16]=0xF }) + (CTO_COUNT with:{Q_Py_PCI_PMON_PKT_MATCH0[12:0]=0x1C40, Q_Py_PCI_PMON_PKT_MASK0[12:0]=0x1FE0, Q_Py_PCI_PMON_PKT_MATCH1[19:16]=0x4, Q_Py_PCI_PMON_PKT_MASK1[19:16]=0xF }) + (CTO_COUNT with:{Q_Py_PCI_PMON_PKT_MATCH0[12:0]=0x1C40, Q_Py_PCI_PMON_PKT_MASK0[12:0]=0x1FE0, Q_Py_PCI_PMON_PKT_MATCH1[19:16]=0x1, Q_Py_PCI_PMON_PKT_MASK1[19:16]=0xF }) + (CTO_COUNT with:{Q_Py_PCI_PMON_PKT_MATCH0[12:0]=0x1C20, Q_Py_PCI_PMON_PKT_MASK0[12:0]=0x1FE0, Q_Py_PCI_PMON_PKT_MATCH1[19:16]=0x4, Q_Py_PCI_PMON_PKT_MASK1[19:16]=0xF }) + (CTO_COUNT with:{Q_Py_PCI_PMON_PKT_MATCH0[12:0]=0x1C20, Q_Py_PCI_PMON_PKT_MASK0[12:0]=0x1FE0, Q_Py_PCI_PMON_PKT_MATCH1[19:16]=0x1, Q_Py_PCI_PMON_PKT_MASK1[19:16]=0xF })) * 64", "Filter": "QPIRxMask0[12:0],QPIRxMatch0[12:0],QPIRxMask1[19:16],QPIRxMatch1[19:16];QPITxMask0[12:0],QPITxMatch0[12:0],QPITxMask1[19:16],QPITxMatch1[19:16]", }, "QPI_LL.DRS_M_FROM_QPI": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "DRS response in M state received from QPI in bytes", "Desc": "DRS Data in M From QPI", "Equation": "(CTO_COUNT with:{Q_Py_PCI_PMON_PKT_MATCH0[12:0]=0x1C00, Q_Py_PCI_PMON_PKT_MASK0[12:0]=0x1FE0, Q_Py_PCI_PMON_PKT_MATCH1[19:16]=0x8, Q_Py_PCI_PMON_PKT_MASK1[19:16]=0xF }) * 64", "Filter": "QPIRxMask0[12:0],QPIRxMatch0[12:0],QPIRxMask1[19:16],QPIRxMatch1[19:16];QPITxMask0[12:0],QPITxMatch0[12:0],QPITxMask1[19:16],QPITxMatch1[19:16]", }, "QPI_LL.DRS_PTL_CACHELINE_MSGS_FROM_QPI": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "DRS Partial Cacheline Data Messges From QPI in bytes", "Desc": "DRS Partial Cacheline Data Messges From QPI", "Equation": "(CTO_COUNT with:{Q_Py_PCI_PMON_PKT_MATCH0[12:0]=0x1D00, Q_Py_PCI_PMON_PKT_MASK0[12:0]=0x1F00}) * 64", "Filter": "QPIRxMask0[12:0],QPIRxMatch0[12:0];QPITxMask0[12:0],QPITxMatch0[12:0]", }, "QPI_LL.DRS_WB_FROM_QPI": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "DRS writeback packets received from QPI in bytes. This is the sum of Wb{I,S,E} DRS packets", "Desc": "DRS Writeback From QPI", "Equation": "DRS_WbI_FROM_QPI + DRS_WbS_FROM_QPI + DRS_WbE_FROM_QPI", }, "QPI_LL.DRS_WRITE_FROM_QPI_TO_NODEx": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "DRS Data packets (Any - DataC) received from QPI sent to Node ID 'x'. Expressed in bytes", "Desc": "DRS Data From QPI To Node x", "Equation": "((CTO_COUNT with:{Q_Py_PCI_PMON_PKT_MATCH0{[12:0],dnid}={0x1C00,x}, Q_Py_PCI_PMON_PKT_MASK0[17:0]=0x3FE00}) - (CTO_COUNT with:{Q_Py_PCI_PMON_PKT_MATCH0{[12:0],dnid}={0x1C00,x}, Q_Py_PCI_PMON_PKT_MASK0[17:0]=0x3FF80})) * 64", "Filter": "QPIRxMask0[17:0],QPIRxMatch0[17:0];QPITxMask0[17:0],QPITxMatch0[17:0]", }, "QPI_LL.DRS_WbE_FROM_QPI": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "DRS writeback 'change to E state' packets received from QPI in bytes", "Desc": "DRS WbE From QPI", "Equation": "(CTO_COUNT with:{Q_Py_PCI_PMON_PKT_MATCH0[12:0]=0x1CC0, Q_Py_PCI_PMON_PKT_MASK0[12:0]=0x1FE0}) * 64", "Filter": "QPIRxMask0[12:0],QPIRxMatch0[12:0];QPITxMask0[12:0],QPITxMatch0[12:0]", }, "QPI_LL.DRS_WbI_FROM_QPI": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "DRS writeback 'change to I state' packets received from QPI in bytes", "Desc": "DRS WbI From QPI", "Equation": "(CTO_COUNT with:{Q_Py_PCI_PMON_PKT_MATCH0[12:0]=0x1C80, Q_Py_PCI_PMON_PKT_MASK0[12:0]=0x1FE0}) * 64", "Filter": "QPIRxMask0[12:0],QPIRxMatch0[12:0];QPITxMask0[12:0],QPITxMatch0[12:0]", }, "QPI_LL.DRS_WbS_FROM_QPI": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "DRS writeback 'change to S state' packets received from QPI in bytes", "Desc": "DRS WbSFrom QPI", "Equation": "(CTO_COUNT with:{Q_Py_PCI_PMON_PKT_MATCH0[12:0]=0x1CA0, Q_Py_PCI_PMON_PKT_MASK0[12:0]=0x1FE0}) * 64", "Filter": "QPIRxMask0[12:0],QPIRxMatch0[12:0];QPITxMask0[12:0],QPITxMatch0[12:0]", }, "QPI_LL.NCB_DATA_FROM_QPI_TO_NODEx": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "NCB Data packets (Any - Interrupts) received from QPI sent to Node ID 'x'. Expressed in bytes", "Desc": "NCB Data From QPI To Node x", "Equation": "((CTO_COUNT with:{Q_Py_PCI_PMON_PKT_MATCH0{[12:0],dnid}={0x1800,x}, Q_Py_PCI_PMON_PKT_MASK0[17:0]=0x3FE00}) - (CTO_COUNT with:{Q_Py_PCI_PMON_PKT_MATCH0{[12:0],dnid}={0x1900,x}, Q_Py_PCI_PMON_PKT_MASK0[17:0]=0x3FF80})) * 64", "Filter": "QPIRxMask0[17:0],QPIRxMatch0[17:0];QPITxMask0[17:0],QPITxMatch0[17:0]", }, "QPI_LL.NCB_DATA_MSGS_FROM_QPI": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Defn": "NCB Data Messages From QPI in bytes", "Desc": "NCB Data Messages From QPI", "Equation": "(RxL_FLITS_G2.NCB_DATA * 8)", }, "QPI_LL.PCT_LINK_FULL_POWER_CYCLES": { "Box": "QPI_LL", "Category": "QPI_LL POWER_RX Events", "Defn": "Percent of Cycles the QPI link is at Full Power", "Desc": "Percent Link Full Power Cycles", "Equation": "RxL0_POWER_CYCLES / CLOCKTICKS", }, "QPI_LL.PCT_LINK_HALF_DISABLED_CYCLES": { "Box": "QPI_LL", "Category": "QPI_LL POWER_RX Events", "Defn": "Percent of Cycles the QPI link in power mode where half of the lanes are disabled.", "Desc": "Percent Link Half Disabled Cycles", "Equation": "RxL0P_POWER_CYCLES / CLOCKTICKS", }, "QPI_LL.PCT_LINK_SHUTDOWN_CYCLES": { "Box": "QPI_LL", "Category": "QPI_LL POWER Events", "Defn": "Percent of Cycles the QPI link is Shutdown", "Desc": "Percent Link Shutdown Cycles", "Equation": "L1_POWER_CYCLES / CLOCKTICKS", }, "QPI_LL.QPI_DATA_BW": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Defn": "QPI data transmit bandwidth in Bytes", "Desc": "QPI Data Bandwidth", "Equation": "TxL_FLITS_G0.DATA * 8", }, "QPI_LL.QPI_LINK_BW": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Defn": "QPI total transmit bandwidth in Bytes (includes control)", "Desc": "QPI Link Bandwidth", "Equation": "(TxL_FLITS_G0.DATA + TxL_FLITS_G0.NON_DATA) * 8", }, #"QPI_LL.QPI_LINK_UTIL": { # "Box": "QPI_LL", # "Category": "QPI_LL FLITS_RX Events", # "Defn": "Percentage of cycles that QPI Link was utilized. Calculated from 1 - Number of idle flits - time the link was 'off'", # "Desc": "QPI Link Utilization", # "Equation": "(RxL_FLITS_G0.DATA + RxL_FLITS_G0.NON_DATA) / (2 * CLOCKTICKS)", #}, # PCU: #"PCU.PCT_CYC_FREQ_CURRENT_LTD": { # "Box": "PCU", # "Category": "PCU FREQ_MAX_LIMIT Events", # "Defn": "Percentage of Cycles the Max Frequency is limited by current", # "Desc": "Percent Frequency Current Limited", # "Equation": "FREQ_MAX_CURRENT_CYCLES / CLOCKTICKS", #}, "PCU.PCT_CYC_FREQ_OS_LTD": { "Box": "PCU", "Category": "PCU FREQ_MAX_LIMIT Events", "Defn": "Percentage of Cycles the Max Frequency is limited by the OS", "Desc": "Percent Frequency OS Limited", "Equation": "FREQ_MAX_OS_CYCLES / CLOCKTICKS", }, "PCU.PCT_CYC_FREQ_POWER_LTD": { "Box": "PCU", "Category": "PCU FREQ_MAX_LIMIT Events", "Defn": "Percentage of Cycles the Max Frequency is limited by power", "Desc": "Percent Frequency Power Limited", "Equation": "FREQ_MAX_POWER_CYCLES / CLOCKTICKS", }, #"PCU.PCT_CYC_FREQ_THERMAL_LTD": { # "Box": "PCU", # "Category": "PCU FREQ_MAX_LIMIT Events", # "Defn": "Percentage of Cycles the Max Frequency is limited by thermal issues", # "Desc": "Percent Frequency Thermal Limited", # "Equation": "FREQ_MAX_THERMAL_CYCLES / CLOCKTICKS", #}, # R2PCIe: "R2PCIe.CYC_USED_DN": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Defn": "Cycles Used in the Down direction, Even polarity", "Desc": "Cycles Used Down and Even", "Equation": "RING_BL_USED.CCW / SAMPLE_INTERVAL", }, "R2PCIe.CYC_USED_UP": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Defn": "Cycles Used in the Up direction, Even polarity", "Desc": "Cycles Used Up and Even", "Equation": "RING_BL_USED.CW / SAMPLE_INTERVAL", }, "R2PCIe.RING_THRU_DN_BYTES": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Defn": "Ring throughput in the Down direction, Even polarity in Bytes", "Desc": "Ring Throughput Down and Even", "Equation": "RING_BL_USED.CCW* 32", }, "R2PCIe.RING_THRU_UP_BYTES": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Defn": "Ring throughput in the Up direction, Even polarity in Bytes", "Desc": "Ring Throughput Up and Even", "Equation": "RING_BL_USED.CW * 32", }, # CBO: "CBO.AVG_INGRESS_DEPTH": { "Box": "CBO", "Category": "CBO INGRESS Events", "Defn": "Average Depth of the Ingress Queue through the sample interval", "Desc": "Average Ingress Depth", "Equation": "RxR_OCCUPANCY.IRQ / SAMPLE_INTERVAL", }, "CBO.AVG_INGRESS_LATENCY": { "Box": "CBO", "Category": "CBO INGRESS Events", "Defn": "Average Latency of Requests through the Ingress Queue in Uncore Clocks", "Desc": "Average Ingress Latency", "Equation": "RxR_OCCUPANCY.IRQ / RxR_INSERTS.IRQ", }, "CBO.AVG_INGRESS_LATENCY_WHEN_NE": { "Box": "CBO", "Category": "CBO INGRESS Events", "Defn": "Average Latency of Requests through the Ingress Queue in Uncore Clocks when Ingress Queue has at least one entry", "Desc": "Average Latency in Non-Empty Ingress", "Equation": "RxR_OCCUPANCY.IRQ / COUNTER0_OCCUPANCY{edge_det=1,thresh=0x1}", }, "CBO.AVG_TOR_DRDS_MISS_WHEN_NE": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Average Number of Data Read Entries that Miss the LLC when the TOR is not empty.", "Desc": "Average Data Read Misses in Non-Empty TOR", "Equation": "(TOR_OCCUPANCY.MISS_OPCODE / COUNTER0_OCCUPANCY{edge_det=1,thresh=0x1}) with:Cn_MSR_PMON_BOX_FILTER1.opc=0x182", "Filter": "CBoFilter1[28:20]", }, "CBO.AVG_TOR_DRDS_WHEN_NE": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Average Number of Data Read Entries when the TOR is not empty.", "Desc": "Average Data Reads in Non-Empty TOR", "Equation": "(TOR_OCCUPANCY.OPCODE / COUNTER0_OCCUPANCY{edge_det=1,thresh=0x1}) with:Cn_MSR_PMON_BOX_FILTER1.opc=0x182", "Filter": "CBoFilter1[28:20]", }, "CBO.AVG_TOR_DRD_HIT_LATENCY": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Average Latency of Data Reads through the TOR that hit the LLC", "Desc": "Data Read Hit Latency through TOR", "Equation": "((TOR_OCCUPANCY.OPCODE - TOR_OCCUPANCY.MISS_OPCODE) / (TOR_INSERTS.OPCODE - TOR_INSERTS.MISS_OPCODE)) with:Cn_MSR_PMON_BOX_FILTER.opc=0x182", "Filter": "CBoFilter1[28:20]", }, "CBO.AVG_TOR_DRD_LATENCY": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Average Latency of Data Read Entries making their way through the TOR", "Desc": "Data Read Latency through TOR", "Equation": "(TOR_OCCUPANCY.OPCODE / TOR_INSERTS.OPCODE) with:Cn_MSR_PMON_BOX_FILTER1.opc=0x182", "Filter": "CBoFilter1[28:20]", }, "CBO.AVG_TOR_DRD_LOC_MISS_LATENCY": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Average Latency of Data Reads through the TOR that miss the LLC and were satsified by Local Memory", "Desc": "Data Read Local Miss Latency through TOR", "Equation": "(TOR_OCCUPANCY.MISS_OPCODE / TOR_INSERTS.MISS_OPCODE) with:Cn_MSR_PMON_BOX_FILTER1.{opc,nid}={0x182,my_node}", "Filter": "CBoFilter1[28:20], CBoFilter1[15:0]", }, "CBO.AVG_TOR_DRD_MISS_LATENCY": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Average Latency of Data Reads through the TOR that miss the LLC", "Desc": "Data Read Miss Latency through TOR", "Equation": "(TOR_OCCUPANCY.MISS_OPCODE / TOR_INSERTS.MISS_OPCODE) with:Cn_MSR_PMON_BOX_FILTER1.opc=0x182", "Filter": "CBoFilter1[28:20]", }, "CBO.AVG_TOR_DRD_REM_MISS_LATENCY": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Average Latency of Data Reads through the TOR that miss the LLC and were satsified by a Remote cache or Remote Memory", "Desc": "Data Read Remote Miss Latency through TOR", "Equation": "(TOR_OCCUPANCY.MISS_OPCODE / TOR_INSERTS.MISS_OPCODE) with:Cn_MSR_PMON_BOX_FILTER.{opc,nid}={0x182,other_nodes}", "Filter": "CBoFilter1[28:20], CBoFilter1[15:0]", }, "CBO.CYC_INGRESS_BLOCKED": { "Box": "CBO", "Category": "CBO INGRESS Events", "Defn": "Cycles the Ingress Request Queue arbiter was Blocked", "Desc": "Cycles Ingress Blocked", "Equation": "RxR_EXT_STARVED.IRQ / SAMPLE_INTERVAL", }, "CBO.CYC_USED_DN": { "Box": "CBO", "Category": "CBO RING Events", "Defn": "Cycles Used in the Down direction, Even polarity", "Desc": "Cycles Used Down and Even", "Equation": "RING_BL_USED.CCW / SAMPLE_INTERVAL", }, "CBO.CYC_USED_UP": { "Box": "CBO", "Category": "CBO RING Events", "Defn": "Cycles Used in the Up direction, Even polarity", "Desc": "Cycles Used Up and Even", "Equation": "RING_BL_USED.CW / SAMPLE_INTERVAL", }, "CBO.FAST_STR_LLC_MISS": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Number of ItoM (fast string) operations that miss the LLC", "Desc": "Fast String misses", # XXX correct? "Equation": "TOR_INSERTS.MISS_OPCODE with:{Cn_MSR_PMON_BOX_FILTER1.opc=0x1C8, Cn_MSR_PMON_BOX_FILTER0.tid=0x3E}", "Filter": "CBoFilter1[28:20]", }, "CBO.FAST_STR_LLC_REQ": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Number of ItoM (fast string) operations that reference the LLC", "Desc": "Fast String operations", # XXX guessing at the minus "Equation": "TOR_INSERTS.OPCODE with:{Cn_MSR_PMON_BOX_FILTER1.opc=0x1C8, Cn_MSR_PMON_BOX_FILTER0.tid=0x3E} - TOR_INSERTS.OPCODE with:{Cn_MSR_PMON_BOX_FILTER1.opc=0x1C8}", "Filter": "CBoFilter1[28:20]", }, "CBO.INGRESS_REJ_V_INS": { "Box": "CBO", "Category": "CBO INGRESS Events", "Defn": "Ratio of Ingress Request Entries that were rejected vs. inserted", "Desc": "Ingress Rejects vs. Inserts", "Equation": "RxR_INSERTS.IRQ_REJ / RxR_INSERTS.IRQ", }, "CBO.IO_READ_BW": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "IO Read Bandwidth in MB - Disk or Network Reads", "Desc": "IO Read Bandwidth", "Equation": "(TOR_INSERTS.OPCODE with:{Cn_MSR_PMON_BOX_FILTER0.tid=0x3E, Cn_MSR_PMON_BOX_FILTER1.opc=0x1C8} + TOR_INSERTS.OPCODE with:{Cn_MSR_PMON_BOX_FILTER.opc=0x1E6} ) * 64 / 1000000", "Filter": "CBoFilter0[5:0], CBoFilter1[28:20]", }, "CBO.IO_WRITE_BW": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "IO Write Bandwidth in MB - Disk or Network Writes", "Desc": "IO Write Bandwidth", "Equation": "(TOR_INSERTS.OPCODE with:Cn_MSR_PMON_BOX_FILTER1.opc=0x19E + TOR_INSERTS.OPCODE with:Cn_MSR_PMON_BOX_FILTER.opc=0x1E4) * 64 / 1000000", "Filter": "CBoFilter1[28:20]", }, "CBO.LLC_DRD_MISS_PCT": { "Box": "CBO", "Category": "CBO CACHE Events", "Defn": "LLC Data Read miss ratio", "Desc": "LLC DRD Miss Ratio", "Equation": "LLC_LOOKUP.DATA_READ with:Cn_MSR_PMON_BOX_FILTER0.state=0x1 / LLC_LOOKUP.DATA_READ with:Cn_MSR_PMON_BOX_FILTER0.state=0x3F", "Filter": "CBoFilter0[23:17]", }, "CBO.LLC_RFO_MISS_PCT": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "LLC RFO Miss Ratio", "Desc": "LLC RFO Miss Ratio", "Equation": "(TOR_INSERTS.MISS_OPCODE / TOR_INSERTS.OPCODE) with:Cn_MSR_PMON_BOX_FILTER1.opc=0x180 - (TOR_INSERTS.MISS_OPCODE / TOR_INSERTS.OPCODE) with:{Cn_MSR_PMON_BOX_FILTER0.tid=0x3E, Cn_MSR_PMON_BOX_FILTER1.opc=0x180}", "Filter": "CBoFilter1[28:20]", }, "CBO.MEM_WB_BYTES": { "Box": "CBO", "Category": "CBO CACHE Events", "Defn": "Data written back to memory in Number of Bytes", "Desc": "Memory Writebacks", "Equation": "LLC_VICTIMS.M_STATE * 64", }, "CBO.MMIO_PARTIAL_READS_CPU": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Number of Partial MMIO Reads initiated by a Core", "Desc": "MMIO Partial Reads - CPU", "Equation": "TOR_INSERTS.OPCODE with:{Cn_MSR_PMON_BOX_FILTER0.nc=1, Cn_MSR_PMON_BOX_FILTER1.opc=0x187}", "Filter": "CBoFilter1[28:20], CBoFilter1[30]", }, "CBO.MMIO_WRITES_CPU": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Number of MMIO Writes initiated by a Core", "Desc": "MMIO Writes - CPU", "Equation": "TOR_INSERTS.OPCODE with:{Cn_MSR_PMON_BOX_FILTER0.nc=1, Cn_MSR_PMON_BOX_FILTER1.opc=0x18F}", "Filter": "CBoFilter1[28:20], CBoFilter1[30]", }, "CBO.PARTIAL_PCI_WRITES": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Number of partial PCI writes", "Desc": "Partial PCI Writes", "Equation": "TOR_INSERTS.OPCODE with:{Cn_MSR_PMON_BOX_FILTER0.tid=0x3E,Cn_MSR_PMON_BOX_FILTER1.opc=0x180}", "Filter": "CBoFilter0[5:0], CBoFilter1[28:20]", }, "CBO.PCI_READS": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Number of PCI reads (full and partial)", "Desc": "PCI Reads", "Equation": "TOR_INSERTS.OPCODE with:Cn_MSR_PMON_BOX_FILTER1.opc=0x19E", "Filter": "CBoFilter1[28:20]", }, "CBO.PCI_WRITES": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Number of PCI writes", "Desc": "PCI Writes", "Equation": "TOR_INSERTS.OPCODE with:{Cn_MSR_PMON_BOX_FILTER0.tid=0x3E,Cn_MSR_PMON_BOX_FILTER1.opc=0x1C8}", "Filter": "CBoFilter0[5:0], CBoFilter1[28:20]", }, #"CBO.RING_THRU_DN_BYTES": { # "Box": "CBO", # "Category": "CBO RING Events", # "Defn": "Ring throughput in the Down direction, Even polarity in Bytes", # "Desc": "Ring Throughput Down and Even", # "Equation": "RING_BL_USED.CCW* 32", #}, #"CBO.RING_THRU_UP_BYTES": { # "Box": "CBO", # "Category": "CBO RING Events", # "Defn": "Ring throughput in the Up direction, Even polarity in Bytes", # "Desc": "Ring Throughput Up and Even", # "Equation": "RING_BL_USED.CW * 32", #}, "CBO.STREAMED_FULL_STORES": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Number of Streamed Store (of Full Cache Line) Transactions", "Desc": "Streaming Stores (Full Line)", "Equation": "TOR_INSERTS.OPCODE with:Cn_MSR_PMON_BOX_FILTER1.opc=0x18C", "Filter": "CBoFilter1[28:20]", }, "CBO.STREAMED_PART_STORES": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Number of Streamed Store (of Partial Cache Line) Transactions", "Desc": "Streaming Stores (Partial Line)", "Equation": "TOR_INSERTS.OPCODE with:Cn_MSR_PMON_BOX_FILTER1.opc=0x18D", "Filter": "CBoFilter1[28:20]", }, "CBO.UC_READS": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Uncachable Read Transactions", "Desc": "Uncacheable Reads", "Equation": "TOR_INSERTS.MISS_OPCODE with:Cn_MSR_PMON_BOX_FILTER1.opc=0x187", "Filter": "CBoFilter1[28:20]", }, # R3QPI: # HA: #"HA.HITME_INSERTS": { # "Box": "HA", # "Category": "HA HitME Events", # "Equation": "HITME_LOOKUP.ALLOCS - HITME_HITS.ALLOCS", #}, "HA.HITME_INVAL": { "Box": "HA", "Category": "HA HitME Events", "Equation": "HITME_HIT.INVALS", }, "HA.PCT_CYCLES_BL_FULL": { "Box": "HA", "Category": "HA EGRESS Events", "Defn": "Percentage of time the BL Egress Queue is full", "Desc": "Percent BL Egress Full", "Equation": "TxR_BL_CYCLES_FULL.ALL / SAMPLE_INTERVAL", }, "HA.PCT_CYCLES_D2C_DISABLED": { "Box": "HA", "Category": "HA DIRECT2CORE Events", "Defn": "Percentage of time that Direct2Core was disabled.", "Desc": "Percent D2C Disabled", "Equation": "DIRECT2CORE_CYCLES_DISABLED / SAMPLE_INTERVAL", }, "HA.PCT_RD_REQUESTS": { "Box": "HA", "Category": "HA REQUESTS Events", "Defn": "Percentage of HA traffic that is from Read Requests", "Desc": "Percent Read Requests", "Equation": "REQUESTS.READS / (REQUESTS.READS + REQUESTS.WRITES)", }, "HA.PCT_WR_REQUESTS": { "Box": "HA", "Category": "HA REQUESTS Events", "Defn": "Percentage of HA traffic that is from Write Requests", "Desc": "Percent Write Requests", "Equation": "REQUESTS.WRITES / (REQUESTS.READS + REQUESTS.WRITES)", }, # SBO: #"SBO.CYC_USED_DNEVEN": { # "Box": "SBO", # "Category": "SBO RING Events", # "Defn": "Cycles Used in the Down direction, Even polarity", # "Desc": "Cycles Used Down and Even", # "Equation": "RING_BL_USED.DN_EVEN / TOTAL_CORE_CYCLES", #}, #"SBO.CYC_USED_DNODD": { # "Box": "SBO", # "Category": "SBO RING Events", # "Defn": "Cycles Used in the Down direction, Odd polarity", # "Desc": "Cycles Used Down and Odd", # "Equation": "RING_BL_USED.DN_ODD / TOTAL_CORE_CYCLES", #}, "SBO.CYC_USED_UPEVEN": { "Box": "SBO", "Category": "SBO RING Events", "Defn": "Cycles Used in the Up direction, Even polarity", "Desc": "Cycles Used Up and Even", "Equation": "RING_BL_USED.UP_EVEN / TOTAL_CORE_CYCLES", }, "SBO.CYC_USED_UPODD": { "Box": "SBO", "Category": "SBO RING Events", "Defn": "Cycles Used in the Up direction, Odd polarity", "Desc": "Cycles Used Up and Odd", "Equation": "RING_BL_USED.UP_ODD / TOTAL_CORE_CYCLES", }, #"SBO.RING_THRU_DNEVEN_BYTES": { # "Box": "SBO", # "Category": "SBO RING Events", # "Defn": "Ring throughput in the Down direction, Even polarity in Bytes", # "Desc": "Ring Throughput Down and Even", # "Equation": "RING_BL_USED.DN_EVEN * 32", #}, #"SBO.RING_THRU_DNODD_BYTES": { # "Box": "SBO", # "Category": "SBO RING Events", # "Defn": "Ring throughput in the Down direction, Odd polarity in Bytes", # "Desc": "Ring Throughput Down and Odd", # "Equation": "RING_BL_USED.DN_ODD * 32", #}, "SBO.RING_THRU_UPEVEN_BYTES": { "Box": "SBO", "Category": "SBO RING Events", "Defn": "Ring throughput in the Up direction, Even polarity in Bytes", "Desc": "Ring Throughput Up and Even", "Equation": "RING_BL_USED.UP_EVEN * 32", }, "SBO.RING_THRU_UPODD_BYTES": { "Box": "SBO", "Category": "SBO RING Events", "Defn": "Ring throughput in the Up direction, Odd polarity in Bytes", "Desc": "Ring Throughput Up and Odd", "Equation": "RING_BL_USED.UP_ODD * 32", }, # iMC: "iMC.MEM_BW_READS": { "Box": "iMC", "Category": "iMC CAS Events", "Defn": "Memory bandwidth consumed by reads. Expressed in bytes.", "Desc": "Read Memory Bandwidth", "Equation": "(CAS_COUNT.RD * 64)", }, "iMC.MEM_BW_TOTAL": { "Box": "iMC", "Category": "iMC CAS Events", "Defn": "Total memory bandwidth. Expressed in bytes.", "Desc": "Total Memory Bandwidth", "Equation": "MEM_BW_READS + MEM_BW_WRITES", }, "iMC.MEM_BW_WRITES": { "Box": "iMC", "Category": "iMC CAS Events", "Defn": "Memory bandwidth consumed by writes Expressed in bytes.", "Desc": "Write Memory Bandwidth", "Equation": "(CAS_COUNT.WR * 64)", }, "iMC.PCT_CYCLES_CRITICAL_THROTTLE": { "Box": "iMC", "Category": "iMC POWER Events", "Defn": "The percentage of cycles all DRAM ranks in critical thermal throttling", "Desc": "Percent Cycles Critical Throttle", "Equation": "POWER_CRITICAL_THROTTLE_CYCLES / MC_Chy_PCI_PMON_CTR_FIXED", }, "iMC.PCT_CYCLES_DLLOFF": { "Box": "iMC", "Category": "iMC POWER Events", "Defn": "The percentage of cycles all DRAM ranks in CKE slow (DLOFF) mode", "Desc": "Percent Cycles DLOFF", "Equation": "POWER_CHANNEL_DLLOFF / MC_Chy_PCI_PMON_CTR_FIXED", }, "iMC.PCT_CYCLES_DRAM_RANKx_IN_CKE": { "Box": "iMC", "Category": "iMC POWER Events", "Defn": "The percentage of cycles DRAM rank (x) spent in CKE ON mode.", "Desc": "Percent Cycles DRAM Rank x in CKE", "Equation": "POWER_CKE_CYCLES.RANKx / MC_Chy_PCI_PMON_CTR_FIXED", }, "iMC.PCT_CYCLES_DRAM_RANKx_IN_THR": { "Box": "iMC", "Category": "iMC POWER Events", "Defn": "The percentage of cycles DRAM rank (x) spent in thermal throttling.", "Desc": "Percent Cycles DRAM Rank x in CKE", "Equation": "POWER_THROTTLE_CYCLES.RANKx / MC_Chy_PCI_PMON_CTR_FIXED", }, "iMC.PCT_CYCLES_PPD": { "Box": "iMC", "Category": "iMC POWER Events", "Defn": "The percentage of cycles all DRAM ranks in PPD mode", "Desc": "Percent Cycles PPD", "Equation": "POWER_CHANNEL_PPD / MC_Chy_PCI_PMON_CTR_FIXED", }, "iMC.PCT_CYCLES_SELF_REFRESH": { "Box": "iMC", "Category": "iMC POWER Events", "Defn": "The percentage of cycles Memory is in self refresh power mode", "Desc": "Percent Cycles Self Refresh", "Equation": "POWER_SELF_REFRESH / MC_Chy_PCI_PMON_CTR_FIXED", }, # "iMC.PCT_RD_REQUESTS": { # "Box": "iMC", # "Category": "iMC RPQ Events", # "Defn": "Percentage of read requests from total requests.", # "Desc": "Percent Read Requests", # "Equation": "RPQ_INSERTS / (RPQ_INSERTS + WPQ_INSERTS)", # }, "iMC.PCT_REQUESTS_PAGE_EMPTY": { "Box": "iMC", "Category": "iMC CAS Events", "Defn": "Percentage of memory requests that resulted in Page Empty", "Desc": "Percent Requests Page Empty", "Equation": "(ACT_COUNT - PRE_COUNT.PAGE_MISS)/ (CAS_COUNT.RD + CAS_COUNT.WR)", }, "iMC.PCT_REQUESTS_PAGE_HIT": { "Box": "iMC", "Category": "iMC CAS Events", "Defn": "Percentage of memory requests that resulted in Page Hits", "Desc": "Percent Requests Page Hit", "Equation": "1 - (PCT_REQUESTS_PAGE_EMPTY + PCT_REQUESTS_PAGE_MISS)", }, "iMC.PCT_REQUESTS_PAGE_MISS": { "Box": "iMC", "Category": "iMC CAS Events", "Defn": "Percentage of memory requests that resulted in Page Misses", "Desc": "Percent Requests Page Miss", "Equation": "PRE_COUNT.PAGE_MISS / (CAS_COUNT.RD + CAS_COUNT.WR)", }, #"iMC.PCT_WR_REQUESTS": { # "Box": "iMC", # "Category": "iMC WPQ Events", # "Defn": "Percentage of write requests from total requests.", # "Desc": "Percent Write Requests", # "Equation": "WPQ_INSERTS / (RPQ_INSERTS + WPQ_INSERTS)", #}, } categories = ( "CBO CACHE Events", "CBO EGRESS Events", "CBO INGRESS Events", "CBO INGRESS_RETRY Events", "CBO MISC Events", "CBO OCCUPANCY Events", "CBO RING Events", "CBO SBO Credit Events", "CBO TOR Events", "CBO UCLK Events", "HA ADDR_OPCODE_MATCH Events", "HA BL_EGRESS Events", "HA BT (Backup Tracker) Events", "HA BYPASS Events", "HA CONFLICTS Events", "HA DIRECT2CORE Events", "HA DIRECTORY Events", "HA EGRESS Events", "HA HitME Events", "HA IMC_MISC Events", "HA IMC_READS Events", "HA IMC_WRITES Events", "HA OSB (Opportunistic Snoop Broadcast) Events", "HA OUTBOUND_TX Events", "HA QPI_IGR_CREDITS Events", "HA REQUESTS Events", "HA RING Events", "HA RPQ_CREDITS Events", "HA SBO Credit Events", "HA SNOOPS Events", "HA SNP_RESP Events", "HA TAD Events", "HA TRACKER Events", "HA UCLK Events", "HA WPQ_CREDITS Events", "IRP AK_INGRESS Events", "IRP BL_INGRESS_DRS Events", "IRP BL_INGRESS_NCB Events", "IRP BL_INGRESS_NCS Events", "IRP Coherency Events", "IRP IO_CLKS Events", "IRP MISC Events", "IRP OUTBOUND_REQUESTS Events", "IRP STALL_CYCLES Events", "IRP TRANSACTIONS Events", "IRP WRITE_CACHE Events", "PCU CORE_C_STATE_TRANSITION Events", "PCU FREQ_MAX_LIMIT Events", "PCU FREQ_MIN_LIMIT Events", "PCU FREQ_RESIDENCY Events", "PCU FREQ_TRANS Events", "PCU MEMORY_PHASE_SHEDDING Events", "PCU PCLK Events", "PCU PKG_C_STATE_RESIDENCY Events", "PCU POWER_STATE_OCC Events", "PCU PROCHOT Events", "PCU VR_HOT Events", "QPI_LL CFCLK Events", "QPI_LL CTO Events", "QPI_LL DIRECT2CORE Events", "QPI_LL FLITS_RX Events", "QPI_LL FLITS_TX Events", "QPI_LL POWER Events", "QPI_LL POWER_RX Events", "QPI_LL POWER_TX Events", "QPI_LL R3QPI_EGRESS_CREDITS Events", "QPI_LL RXQ Events", "QPI_LL RX_CREDITS_CONSUMED Events", "QPI_LL TXQ Events", "QPI_LL VNA_CREDIT_RETURN Events", "R2PCIe EGRESS Events", "R2PCIe IIO Credit Events", "R2PCIe INGRESS Events", "R2PCIe RING Events", "R2PCIe SBO Credit Events", "R2PCIe UCLK Events", "R3QPI EGRESS Credit Events", "R3QPI EGRESS Events", "R3QPI INGRESS Events", "R3QPI LINK_VN0_CREDITS Events", "R3QPI LINK_VN1_CREDITS Events", "R3QPI LINK_VNA_CREDITS Events", "R3QPI RING Events", "R3QPI SBO Credit Events", "R3QPI UCLK Events", "SBO EGRESS Events", "SBO INGRESS Events", "SBO RING Events", "SBO UCLK Events", "UBOX EVENT_MSG Events", "UBOX PHOLD Events", "UBOX RACU Events", "iMC ACT Events", "iMC BYPASS Command Events", "iMC CAS Events", "iMC DCLK Events", "iMC DRAM_PRE_ALL Events", "iMC DRAM_REFRESH Events", "iMC ECC Events", "iMC MAJOR_MODES Events", "iMC POWER Events", "iMC PRE Events", "iMC PREEMPTION Events", "iMC RPQ Events", "iMC VMSE Events", "iMC WPQ Events", );
722,780
Python
.py
13,911
42.053339
1,159
0.584945
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,956
bdx_uc.py
andikleen_pmu-tools/ucevent/bdx_uc.py
# BDX bdx_uc_events.v0.60p bdx_uc_derived.v0.60p # aliases aliases = { "IRPFilter": "IRP_PCI_PMON_BOX_FILTER", "UBoxFilter": "U_MSR_PMON_BOX_FILTER", "PCUFilter": "PCU_MSR_PMON_BOX_FILTER", "HA_AddrMatch0": "HAn_PCI_PMON_BOX_ADDRMATCH0", "QPITxMatch1": "Q_Py_PCI_TX_PMON_BOX_MATCH1", "CBoFilter1": "Cn_MSR_PMON_BOX_FILTER1", "QPIRxMask1": "Q_Py_PCI_RX_PMON_BOX_MASK1", "QPIRxMask0": "Q_Py_PCI_RX_PMON_BOX_MASK0", "HA_OpcodeMatch": "HAn_PCI_PMON_BOX_OPCODEMATCH", "QPIRxMatch1": "Q_Py_PCI_RX_PMON_BOX_MATCH1", "QPITxMask1": "Q_Py_PCI_TX_PMON_BOX_MASK1", "CBoFilter0": "Cn_MSR_PMON_BOX_FILTER", "QPIRxMatch0": "Q_Py_PCI_RX_PMON_BOX_MATCH0", "QPITxMask0": "Q_Py_PCI_TX_PMON_BOX_MASK0", "HA_AddrMatch1": "HAn_PCI_PMON_BOX_ADDRMATCH1", "QPITxMatch0": "Q_Py_PCI_TX_PMON_BOX_MATCH0", } events = { # CBO: "CBO.BOUNCE_CONTROL": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Desc": "Bounce Control", "EvSel": 10, "ExtSel": "", }, "CBO.CLOCKTICKS": { "Box": "CBO", "Category": "CBO UCLK Events", "Counters": "0-3", "Desc": "Uncore Clocks", "EvSel": 0, "ExtSel": "", }, "CBO.COUNTER0_OCCUPANCY": { "Box": "CBO", "Category": "CBO OCCUPANCY Events", "Counters": "0-3", "Defn": "Since occupancy counts can only be captured in the Cbo's 0 counter, this event allows a user to capture occupancy related information by filtering the Cb0 occupancy count captured in Counter 0. The filtering available is found in the control register - threshold, invert and edge detect. E.g. setting threshold to 1 can effectively monitor how many cycles the monitored queue has an entry.", "Desc": "Counter 0 Occupancy", "EvSel": 31, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, }, "CBO.FAST_ASSERTED": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles either the local distress or incoming distress signals are asserted. Incoming distress includes both up and dn.", "Desc": "FaST wire asserted", "EvSel": 9, "ExtSel": "", }, "CBO.LLC_LOOKUP": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "Bit 0 of the umask must always be set for this event. This allows us to match a given state (or states). The state is programmed in Cn_MSR_PMON_BOX_FILTER.state. The state field is a bit mask, so you can select (and monitor) multiple states at a time. 0 = I (miss), 1 = S, 2 = E, 3 = M, 4 = F. For example, if you wanted to monitor F and S hits, you could set 10010b in the 5-bit state field. To monitor any lookup, set the field to 0x1F.", }, "CBO.LLC_LOOKUP.ANY": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "Bit 0 of the umask must always be set for this event. This allows us to match a given state (or states). The state is programmed in Cn_MSR_PMON_BOX_FILTER.state. The state field is a bit mask, so you can select (and monitor) multiple states at a time. 0 = I (miss), 1 = S, 2 = E, 3 = M, 4 = F. For example, if you wanted to monitor F and S hits, you could set 10010b in the 5-bit state field. To monitor any lookup, set the field to 0x1F.", "Umask": "b00010001", }, "CBO.LLC_LOOKUP.DATA_READ": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "Bit 0 of the umask must always be set for this event. This allows us to match a given state (or states). The state is programmed in Cn_MSR_PMON_BOX_FILTER.state. The state field is a bit mask, so you can select (and monitor) multiple states at a time. 0 = I (miss), 1 = S, 2 = E, 3 = M, 4 = F. For example, if you wanted to monitor F and S hits, you could set 10010b in the 5-bit state field. To monitor any lookup, set the field to 0x1F.", "Umask": "b00000011", }, "CBO.LLC_LOOKUP.WRITE": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "Bit 0 of the umask must always be set for this event. This allows us to match a given state (or states). The state is programmed in Cn_MSR_PMON_BOX_FILTER.state. The state field is a bit mask, so you can select (and monitor) multiple states at a time. 0 = I (miss), 1 = S, 2 = E, 3 = M, 4 = F. For example, if you wanted to monitor F and S hits, you could set 10010b in the 5-bit state field. To monitor any lookup, set the field to 0x1F.", "Umask": "b00000101", }, "CBO.LLC_LOOKUP.REMOTE_SNOOP": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "Bit 0 of the umask must always be set for this event. This allows us to match a given state (or states). The state is programmed in Cn_MSR_PMON_BOX_FILTER.state. The state field is a bit mask, so you can select (and monitor) multiple states at a time. 0 = I (miss), 1 = S, 2 = E, 3 = M, 4 = F. For example, if you wanted to monitor F and S hits, you could set 10010b in the 5-bit state field. To monitor any lookup, set the field to 0x1F.", "Umask": "b00001001", }, "CBO.LLC_LOOKUP.NID": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "Bit 0 of the umask must always be set for this event. This allows us to match a given state (or states). The state is programmed in Cn_MSR_PMON_BOX_FILTER.state. The state field is a bit mask, so you can select (and monitor) multiple states at a time. 0 = I (miss), 1 = S, 2 = E, 3 = M, 4 = F. For example, if you wanted to monitor F and S hits, you could set 10010b in the 5-bit state field. To monitor any lookup, set the field to 0x1F.", "Umask": "b01000001", }, "CBO.LLC_LOOKUP.READ": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "Bit 0 of the umask must always be set for this event. This allows us to match a given state (or states). The state is programmed in Cn_MSR_PMON_BOX_FILTER.state. The state field is a bit mask, so you can select (and monitor) multiple states at a time. 0 = I (miss), 1 = S, 2 = E, 3 = M, 4 = F. For example, if you wanted to monitor F and S hits, you could set 10010b in the 5-bit state field. To monitor any lookup, set the field to 0x1F.", "Umask": "b00100001", }, "CBO.LLC_VICTIMS": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", }, "CBO.LLC_VICTIMS.NID": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CBO.LLC_VICTIMS.E_STATE": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CBO.LLC_VICTIMS.I_STATE": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CBO.LLC_VICTIMS.F_STATE": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CBO.LLC_VICTIMS.MISS": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CBO.LLC_VICTIMS.M_STATE": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CBO.MISC": { "Box": "CBO", "Category": "CBO MISC Events", "Counters": "0-3", "Defn": "Miscellaneous events in the Cbo.", "Desc": "Cbo Misc", "EvSel": 57, "ExtSel": "", }, "CBO.MISC.CVZERO_PREFETCH_MISS": { "Box": "CBO", "Category": "CBO MISC Events", "Counters": "0-3", "Defn": "Miscellaneous events in the Cbo.", "Desc": "Cbo Misc", "EvSel": 57, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CBO.MISC.CVZERO_PREFETCH_VICTIM": { "Box": "CBO", "Category": "CBO MISC Events", "Counters": "0-3", "Defn": "Miscellaneous events in the Cbo.", "Desc": "Cbo Misc", "EvSel": 57, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CBO.MISC.RSPI_WAS_FSE": { "Box": "CBO", "Category": "CBO MISC Events", "Counters": "0-3", "Defn": "Miscellaneous events in the Cbo.", "Desc": "Cbo Misc", "EvSel": 57, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CBO.MISC.RFO_HIT_S": { "Box": "CBO", "Category": "CBO MISC Events", "Counters": "0-3", "Defn": "Miscellaneous events in the Cbo.", "Desc": "Cbo Misc", "EvSel": 57, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CBO.MISC.WC_ALIASING": { "Box": "CBO", "Category": "CBO MISC Events", "Counters": "0-3", "Defn": "Miscellaneous events in the Cbo.", "Desc": "Cbo Misc", "EvSel": 57, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CBO.MISC.STARTED": { "Box": "CBO", "Category": "CBO MISC Events", "Counters": "0-3", "Defn": "Miscellaneous events in the Cbo.", "Desc": "Cbo Misc", "EvSel": 57, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CBO.RING_AD_USED": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", }, "CBO.RING_AD_USED.DOWN_EVEN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxxx1xx", }, "CBO.RING_AD_USED.UP_EVEN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxxxxx1", }, "CBO.RING_AD_USED.CCW": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "b00001100", }, "CBO.RING_AD_USED.DOWN_ODD": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxx1xxx", }, "CBO.RING_AD_USED.UP_ODD": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxxxx1x", }, "CBO.RING_AD_USED.ALL": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "b00001111", }, "CBO.RING_AD_USED.CW": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "b00000011", }, "CBO.RING_AK_USED": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", }, "CBO.RING_AK_USED.ALL": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "b00001111", }, "CBO.RING_AK_USED.UP_ODD": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxxxx1x", }, "CBO.RING_AK_USED.CW": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "b00000011", }, "CBO.RING_AK_USED.DOWN_EVEN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxxx1xx", }, "CBO.RING_AK_USED.UP_EVEN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxxxxx1", }, "CBO.RING_AK_USED.DOWN_ODD": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxx1xxx", }, "CBO.RING_AK_USED.CCW": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "b00001100", }, "CBO.RING_BL_USED": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", }, "CBO.RING_BL_USED.DOWN_ODD": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxx1xxx", }, "CBO.RING_BL_USED.CCW": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "b00001100", }, "CBO.RING_BL_USED.UP_EVEN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxxxxx1", }, "CBO.RING_BL_USED.DOWN_EVEN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxxx1xx", }, "CBO.RING_BL_USED.ALL": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "b00001111", }, "CBO.RING_BL_USED.UP_ODD": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxxxx1x", }, "CBO.RING_BL_USED.CW": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "b00000011", }, "CBO.RING_BOUNCES": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Desc": "Number of LLC responses that bounced on the Ring.", "EvSel": 5, "ExtSel": "", }, "CBO.RING_BOUNCES.AK": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Desc": "Number of LLC responses that bounced on the Ring.", "EvSel": 5, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CBO.RING_BOUNCES.AD": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Desc": "Number of LLC responses that bounced on the Ring.", "EvSel": 5, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CBO.RING_BOUNCES.IV": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Desc": "Number of LLC responses that bounced on the Ring.", "EvSel": 5, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CBO.RING_BOUNCES.BL": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Desc": "Number of LLC responses that bounced on the Ring.", "EvSel": 5, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CBO.RING_IV_USED": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring in BDX Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "BL Ring in Use", "EvSel": 30, "ExtSel": "", "Notes": "IV messages are split into two parts. In any cycle, a ring stop can see up to one (half-)packet moving in the UP direction and one (half-)packet moving in the DN direction.", }, "CBO.RING_IV_USED.UP": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring in BDX Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "BL Ring in Use", "EvSel": 30, "ExtSel": "", "Notes": "IV messages are split into two parts. In any cycle, a ring stop can see up to one (half-)packet moving in the UP direction and one (half-)packet moving in the DN direction.", "Umask": "b00000011", }, "CBO.RING_IV_USED.DN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring in BDX Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "BL Ring in Use", "EvSel": 30, "ExtSel": "", "Notes": "IV messages are split into two parts. In any cycle, a ring stop can see up to one (half-)packet moving in the UP direction and one (half-)packet moving in the DN direction.", "Umask": "b00001100", }, "CBO.RING_IV_USED.DOWN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring in BDX Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "BL Ring in Use", "EvSel": 30, "ExtSel": "", "Notes": "IV messages are split into two parts. In any cycle, a ring stop can see up to one (half-)packet moving in the UP direction and one (half-)packet moving in the DN direction.", "Umask": "b11001100", }, "CBO.RING_IV_USED.ANY": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring in BDX Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "BL Ring in Use", "EvSel": 30, "ExtSel": "", "Notes": "IV messages are split into two parts. In any cycle, a ring stop can see up to one (half-)packet moving in the UP direction and one (half-)packet moving in the DN direction.", "Umask": "b00001111", }, "CBO.RING_SRC_THRTL": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-3", "Desc": "Number of cycles the Cbo is actively throttling traffic onto the Ring in order to limit bounce traffic.", "EvSel": 7, "ExtSel": "", }, "CBO.RxR_EXT_STARVED": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-3", "Defn": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.", "Desc": "Ingress Arbiter Blocking Cycles", "EvSel": 18, "ExtSel": "", }, "CBO.RxR_EXT_STARVED.IPQ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-3", "Defn": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.", "Desc": "Ingress Arbiter Blocking Cycles", "EvSel": 18, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CBO.RxR_EXT_STARVED.ISMQ_BIDS": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-3", "Defn": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.", "Desc": "Ingress Arbiter Blocking Cycles", "EvSel": 18, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CBO.RxR_EXT_STARVED.PRQ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-3", "Defn": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.", "Desc": "Ingress Arbiter Blocking Cycles", "EvSel": 18, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CBO.RxR_EXT_STARVED.IRQ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-3", "Defn": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.", "Desc": "Ingress Arbiter Blocking Cycles", "EvSel": 18, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CBO.RxR_INSERTS": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-3", "Defn": "Counts number of allocations per cycle into the specified Ingress queue.", "Desc": "Ingress Allocations", "EvSel": 19, "ExtSel": "", "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", }, "CBO.RxR_INSERTS.IRQ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-3", "Defn": "Counts number of allocations per cycle into the specified Ingress queue.", "Desc": "Ingress Allocations", "EvSel": 19, "ExtSel": "", "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "Umask": "bxxxxxxx1", }, "CBO.RxR_INSERTS.PRQ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-3", "Defn": "Counts number of allocations per cycle into the specified Ingress queue.", "Desc": "Ingress Allocations", "EvSel": 19, "ExtSel": "", "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "Umask": "bxxx1xxxx", }, "CBO.RxR_INSERTS.IRQ_REJ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-3", "Defn": "Counts number of allocations per cycle into the specified Ingress queue.", "Desc": "Ingress Allocations", "EvSel": 19, "ExtSel": "", "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "Umask": "bxxxxxx1x", }, "CBO.RxR_INSERTS.PRQ_REJ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-3", "Defn": "Counts number of allocations per cycle into the specified Ingress queue.", "Desc": "Ingress Allocations", "EvSel": 19, "ExtSel": "", "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "Umask": "bxx1xxxxx", }, "CBO.RxR_INSERTS.IPQ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-3", "Defn": "Counts number of allocations per cycle into the specified Ingress queue.", "Desc": "Ingress Allocations", "EvSel": 19, "ExtSel": "", "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "Umask": "bxxxxx1xx", }, "CBO.RxR_IPQ_RETRY": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Defn": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.", "Desc": "Probe Queue Retries", "EvSel": 49, "ExtSel": "", }, "CBO.RxR_IPQ_RETRY.QPI_CREDITS": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Defn": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.", "Desc": "Probe Queue Retries", "EvSel": 49, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CBO.RxR_IPQ_RETRY.FULL": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Defn": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.", "Desc": "Probe Queue Retries", "EvSel": 49, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CBO.RxR_IPQ_RETRY.ADDR_CONFLICT": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Defn": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.", "Desc": "Probe Queue Retries", "EvSel": 49, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CBO.RxR_IPQ_RETRY.ANY": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Defn": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.", "Desc": "Probe Queue Retries", "EvSel": 49, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CBO.RxR_IPQ_RETRY2": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Defn": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.", "Desc": "Probe Queue Retries", "EvSel": 40, "ExtSel": "", }, "CBO.RxR_IPQ_RETRY2.TARGET": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Defn": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.", "Desc": "Probe Queue Retries", "EvSel": 40, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CBO.RxR_IPQ_RETRY2.AD_SBO": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Defn": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.", "Desc": "Probe Queue Retries", "EvSel": 40, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CBO.RxR_IRQ_RETRY": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Desc": "Ingress Request Queue Rejects", "EvSel": 50, "ExtSel": "", }, "CBO.RxR_IRQ_RETRY.FULL": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Desc": "Ingress Request Queue Rejects", "EvSel": 50, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CBO.RxR_IRQ_RETRY.IIO_CREDITS": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Desc": "Ingress Request Queue Rejects", "EvSel": 50, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CBO.RxR_IRQ_RETRY.QPI_CREDITS": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Desc": "Ingress Request Queue Rejects", "EvSel": 50, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CBO.RxR_IRQ_RETRY.NID": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Desc": "Ingress Request Queue Rejects", "EvSel": 50, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CBO.RxR_IRQ_RETRY.ADDR_CONFLICT": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Desc": "Ingress Request Queue Rejects", "EvSel": 50, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CBO.RxR_IRQ_RETRY.RTID": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Desc": "Ingress Request Queue Rejects", "EvSel": 50, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CBO.RxR_IRQ_RETRY.ANY": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Desc": "Ingress Request Queue Rejects", "EvSel": 50, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CBO.RxR_IRQ_RETRY2": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Desc": "Ingress Request Queue Rejects", "EvSel": 41, "ExtSel": "", }, "CBO.RxR_IRQ_RETRY2.BL_SBO": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Desc": "Ingress Request Queue Rejects", "EvSel": 41, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CBO.RxR_IRQ_RETRY2.AD_SBO": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Desc": "Ingress Request Queue Rejects", "EvSel": 41, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CBO.RxR_IRQ_RETRY2.TARGET": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Desc": "Ingress Request Queue Rejects", "EvSel": 41, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CBO.RxR_ISMQ_RETRY": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 51, "ExtSel": "", }, "CBO.RxR_ISMQ_RETRY.IIO_CREDITS": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 51, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CBO.RxR_ISMQ_RETRY.WB_CREDITS": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 51, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CBO.RxR_ISMQ_RETRY.FULL": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 51, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CBO.RxR_ISMQ_RETRY.QPI_CREDITS": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 51, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CBO.RxR_ISMQ_RETRY.NID": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 51, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CBO.RxR_ISMQ_RETRY.RTID": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 51, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CBO.RxR_ISMQ_RETRY.ANY": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 51, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CBO.RxR_ISMQ_RETRY2": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Desc": "ISMQ Request Queue Rejects", "EvSel": 42, "ExtSel": "", }, "CBO.RxR_ISMQ_RETRY2.TARGET": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Desc": "ISMQ Request Queue Rejects", "EvSel": 42, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CBO.RxR_ISMQ_RETRY2.BL_SBO": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Desc": "ISMQ Request Queue Rejects", "EvSel": 42, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CBO.RxR_ISMQ_RETRY2.AD_SBO": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-3", "Desc": "ISMQ Request Queue Rejects", "EvSel": 42, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CBO.RxR_OCCUPANCY": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": 0, "Defn": "Counts number of entries in the specified Ingress queue in each cycle.", "Desc": "Ingress Occupancy", "EvSel": 17, "ExtSel": "", "MaxIncCyc": 20, "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "SubCtr": 1, }, "CBO.RxR_OCCUPANCY.IRQ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": 0, "Defn": "Counts number of entries in the specified Ingress queue in each cycle.", "Desc": "Ingress Occupancy", "EvSel": 17, "ExtSel": "", "MaxIncCyc": 20, "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "SubCtr": 1, "Umask": "b00000001", }, "CBO.RxR_OCCUPANCY.IRQ_REJ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": 0, "Defn": "Counts number of entries in the specified Ingress queue in each cycle.", "Desc": "Ingress Occupancy", "EvSel": 17, "ExtSel": "", "MaxIncCyc": 20, "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "SubCtr": 1, "Umask": "b00000010", }, "CBO.RxR_OCCUPANCY.PRQ_REJ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": 0, "Defn": "Counts number of entries in the specified Ingress queue in each cycle.", "Desc": "Ingress Occupancy", "EvSel": 17, "ExtSel": "", "MaxIncCyc": 20, "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "SubCtr": 1, "Umask": "b00100000", }, "CBO.RxR_OCCUPANCY.IPQ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": 0, "Defn": "Counts number of entries in the specified Ingress queue in each cycle.", "Desc": "Ingress Occupancy", "EvSel": 17, "ExtSel": "", "MaxIncCyc": 20, "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "SubCtr": 1, "Umask": "b00000100", }, "CBO.SBO_CREDITS_ACQUIRED": { "Box": "CBO", "Category": "CBO SBO Credit Events", "Counters": "0-3", "Defn": "Number of Sbo credits acquired in a given cycle, per ring. Each Cbo is assigned an Sbo it can communicate with.", "Desc": "SBo Credits Acquired", "EvSel": 61, "ExtSel": "", }, "CBO.SBO_CREDITS_ACQUIRED.BL": { "Box": "CBO", "Category": "CBO SBO Credit Events", "Counters": "0-3", "Defn": "Number of Sbo credits acquired in a given cycle, per ring. Each Cbo is assigned an Sbo it can communicate with.", "Desc": "SBo Credits Acquired", "EvSel": 61, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CBO.SBO_CREDITS_ACQUIRED.AD": { "Box": "CBO", "Category": "CBO SBO Credit Events", "Counters": "0-3", "Defn": "Number of Sbo credits acquired in a given cycle, per ring. Each Cbo is assigned an Sbo it can communicate with.", "Desc": "SBo Credits Acquired", "EvSel": 61, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CBO.SBO_CREDIT_OCCUPANCY": { "Box": "CBO", "Category": "CBO SBO Credit Events", "Counters": 0, "Defn": "Number of Sbo credits in use in a given cycle, per ring. Each Cbo is assigned an Sbo it can communicate with.", "Desc": "SBo Credits Occupancy", "EvSel": 62, "ExtSel": "", "MaxIncCyc": 7, "Notes": "Each Cbo has 3 AD and 2 BL credits into its assigned Sbo.", "SubCtr": 1, }, "CBO.SBO_CREDIT_OCCUPANCY.BL": { "Box": "CBO", "Category": "CBO SBO Credit Events", "Counters": 0, "Defn": "Number of Sbo credits in use in a given cycle, per ring. Each Cbo is assigned an Sbo it can communicate with.", "Desc": "SBo Credits Occupancy", "EvSel": 62, "ExtSel": "", "MaxIncCyc": 7, "Notes": "Each Cbo has 3 AD and 2 BL credits into its assigned Sbo.", "SubCtr": 1, "Umask": "bxxxxxx1x", }, "CBO.SBO_CREDIT_OCCUPANCY.AD": { "Box": "CBO", "Category": "CBO SBO Credit Events", "Counters": 0, "Defn": "Number of Sbo credits in use in a given cycle, per ring. Each Cbo is assigned an Sbo it can communicate with.", "Desc": "SBo Credits Occupancy", "EvSel": 62, "ExtSel": "", "MaxIncCyc": 7, "Notes": "Each Cbo has 3 AD and 2 BL credits into its assigned Sbo.", "SubCtr": 1, "Umask": "bxxxxxxx1", }, "CBO.TOR_INSERTS": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", }, "CBO.TOR_INSERTS.NID_EVICTION": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b01000100", }, "CBO.TOR_INSERTS.MISS_REMOTE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b10001010", }, "CBO.TOR_INSERTS.NID_ALL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b01001000", }, "CBO.TOR_INSERTS.LOCAL_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b00100001", }, "CBO.TOR_INSERTS.MISS_LOCAL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b00101010", }, "CBO.TOR_INSERTS.LOCAL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b00101000", }, "CBO.TOR_INSERTS.OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b00000001", }, "CBO.TOR_INSERTS.REMOTE_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b10000001", }, "CBO.TOR_INSERTS.REMOTE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b10001000", }, "CBO.TOR_INSERTS.MISS_REMOTE_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b10000011", }, "CBO.TOR_INSERTS.NID_MISS_ALL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b01001010", }, "CBO.TOR_INSERTS.NID_WB": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b01010000", }, "CBO.TOR_INSERTS.MISS_LOCAL_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b00100011", }, "CBO.TOR_INSERTS.NID_MISS_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b01000011", }, "CBO.TOR_INSERTS.NID_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b01000001", }, "CBO.TOR_INSERTS.WB": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b00010000", }, "CBO.TOR_INSERTS.ALL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b00001000", }, "CBO.TOR_INSERTS.EVICTION": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b00000100", }, "CBO.TOR_INSERTS.MISS_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b00000011", }, "CBO.TOR_OCCUPANCY": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, }, "CBO.TOR_OCCUPANCY.MISS_REMOTE_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b10000011", }, "CBO.TOR_OCCUPANCY.NID_WB": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b01010000", }, "CBO.TOR_OCCUPANCY.NID_MISS_ALL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b01001010", }, "CBO.TOR_OCCUPANCY.WB": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b00010000", }, "CBO.TOR_OCCUPANCY.ALL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b00001000", }, "CBO.TOR_OCCUPANCY.MISS_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b00000011", }, "CBO.TOR_OCCUPANCY.EVICTION": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b00000100", }, "CBO.TOR_OCCUPANCY.NID_MISS_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b01000011", }, "CBO.TOR_OCCUPANCY.MISS_LOCAL_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b00100011", }, "CBO.TOR_OCCUPANCY.NID_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b01000001", }, "CBO.TOR_OCCUPANCY.MISS_ALL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b00001010", }, "CBO.TOR_OCCUPANCY.LOCAL_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b00100001", }, "CBO.TOR_OCCUPANCY.NID_ALL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b01001000", }, "CBO.TOR_OCCUPANCY.NID_EVICTION": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b01000100", }, "CBO.TOR_OCCUPANCY.MISS_REMOTE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b10001010", }, "CBO.TOR_OCCUPANCY.LOCAL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b00101000", }, "CBO.TOR_OCCUPANCY.REMOTE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b10001000", }, "CBO.TOR_OCCUPANCY.REMOTE_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b10000001", }, "CBO.TOR_OCCUPANCY.OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b00000001", }, "CBO.TOR_OCCUPANCY.MISS_LOCAL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b00101010", }, "CBO.TxR_ADS_USED": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-3", "EvSel": 4, "ExtSel": "", }, "CBO.TxR_ADS_USED.AD": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-3", "EvSel": 4, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CBO.TxR_ADS_USED.BL": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-3", "EvSel": 4, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CBO.TxR_ADS_USED.AK": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-3", "EvSel": 4, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CBO.TxR_INSERTS": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "ExtSel": "", }, "CBO.TxR_INSERTS.BL_CACHE": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CBO.TxR_INSERTS.IV_CACHE": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CBO.TxR_INSERTS.AD_CACHE": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CBO.TxR_INSERTS.AK_CORE": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CBO.TxR_INSERTS.AK_CACHE": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CBO.TxR_INSERTS.AD_CORE": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CBO.TxR_INSERTS.BL_CORE": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "ExtSel": "", "Umask": "bx1xxxxxx", }, # R3QPI: "R3QPI.CLOCKTICKS": { "Box": "R3QPI", "Category": "R3QPI UCLK Events", "Counters": "0-2", "Defn": "Counts the number of uclks in the QPI uclk domain. This could be slightly different than the count in the Ubox because of enable/freeze delays. However, because the QPI Agent is close to the Ubox, they generally should not diverge by more than a handful of cycles.", "Desc": "Number of uclks in domain", "EvSel": 1, "ExtSel": "", }, "R3QPI.C_HI_AD_CREDITS_EMPTY": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 31, "ExtSel": "", }, "R3QPI.C_HI_AD_CREDITS_EMPTY.CBO8": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 31, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R3QPI.C_HI_AD_CREDITS_EMPTY.CBO10": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 31, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R3QPI.C_HI_AD_CREDITS_EMPTY.CBO14_16": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 31, "ExtSel": "", "Umask": "bx1xxxxxx", }, "R3QPI.C_HI_AD_CREDITS_EMPTY.CBO13": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 31, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R3QPI.C_HI_AD_CREDITS_EMPTY.CBO_15_17": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 31, "ExtSel": "", "Umask": "b1xxxxxxx", }, "R3QPI.C_HI_AD_CREDITS_EMPTY.CBO11": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 31, "ExtSel": "", "Umask": "bxxxx1xxx", }, "R3QPI.C_HI_AD_CREDITS_EMPTY.CBO12": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 31, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R3QPI.C_HI_AD_CREDITS_EMPTY.CBO9": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 31, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R3QPI.C_LO_AD_CREDITS_EMPTY": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 34, "ExtSel": "", }, "R3QPI.C_LO_AD_CREDITS_EMPTY.CBO4": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 34, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R3QPI.C_LO_AD_CREDITS_EMPTY.CBO2": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 34, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R3QPI.C_LO_AD_CREDITS_EMPTY.CBO7": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 34, "ExtSel": "", "Umask": "b1xxxxxxx", }, "R3QPI.C_LO_AD_CREDITS_EMPTY.CBO6": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 34, "ExtSel": "", "Umask": "bx1xxxxxx", }, "R3QPI.C_LO_AD_CREDITS_EMPTY.CBO1": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 34, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R3QPI.C_LO_AD_CREDITS_EMPTY.CBO3": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 34, "ExtSel": "", "Umask": "bxxxx1xxx", }, "R3QPI.C_LO_AD_CREDITS_EMPTY.CBO0": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 34, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R3QPI.C_LO_AD_CREDITS_EMPTY.CBO5": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 34, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R3QPI.HA_R2_BL_CREDITS_EMPTY": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to either HA or R2 on the BL Ring", "Desc": "HA/R2 AD Credits Empty", "EvSel": 45, "ExtSel": "", "Notes": "Counter 0 counts lack of credits to the lesser numbered Cboxes (0-8) Counter 1 counts lack of credits to Cbox to the higher numbered CBoxes (8-13,15+17,16+18)", }, "R3QPI.HA_R2_BL_CREDITS_EMPTY.HA1": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to either HA or R2 on the BL Ring", "Desc": "HA/R2 AD Credits Empty", "EvSel": 45, "ExtSel": "", "Notes": "Counter 0 counts lack of credits to the lesser numbered Cboxes (0-8) Counter 1 counts lack of credits to Cbox to the higher numbered CBoxes (8-13,15+17,16+18)", "Umask": "bxxxxxx1x", }, "R3QPI.HA_R2_BL_CREDITS_EMPTY.R2_NCB": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to either HA or R2 on the BL Ring", "Desc": "HA/R2 AD Credits Empty", "EvSel": 45, "ExtSel": "", "Notes": "Counter 0 counts lack of credits to the lesser numbered Cboxes (0-8) Counter 1 counts lack of credits to Cbox to the higher numbered CBoxes (8-13,15+17,16+18)", "Umask": "bxxxxx1xx", }, "R3QPI.HA_R2_BL_CREDITS_EMPTY.HA0": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to either HA or R2 on the BL Ring", "Desc": "HA/R2 AD Credits Empty", "EvSel": 45, "ExtSel": "", "Notes": "Counter 0 counts lack of credits to the lesser numbered Cboxes (0-8) Counter 1 counts lack of credits to Cbox to the higher numbered CBoxes (8-13,15+17,16+18)", "Umask": "bxxxxxxx1", }, "R3QPI.HA_R2_BL_CREDITS_EMPTY.R2_NCS": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to either HA or R2 on the BL Ring", "Desc": "HA/R2 AD Credits Empty", "EvSel": 45, "ExtSel": "", "Notes": "Counter 0 counts lack of credits to the lesser numbered Cboxes (0-8) Counter 1 counts lack of credits to Cbox to the higher numbered CBoxes (8-13,15+17,16+18)", "Umask": "bxxxx1xxx", }, "R3QPI.QPI0_AD_CREDITS_EMPTY": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI0 on the AD Ring", "Desc": "QPI0 AD Credits Empty", "EvSel": 32, "ExtSel": "", }, "R3QPI.QPI0_AD_CREDITS_EMPTY.VN1_NDR": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI0 on the AD Ring", "Desc": "QPI0 AD Credits Empty", "EvSel": 32, "ExtSel": "", "Umask": "bx1xxxxxx", }, "R3QPI.QPI0_AD_CREDITS_EMPTY.VN0_SNP": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI0 on the AD Ring", "Desc": "QPI0 AD Credits Empty", "EvSel": 32, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R3QPI.QPI0_AD_CREDITS_EMPTY.VNA": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI0 on the AD Ring", "Desc": "QPI0 AD Credits Empty", "EvSel": 32, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R3QPI.QPI0_AD_CREDITS_EMPTY.VN0_NDR": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI0 on the AD Ring", "Desc": "QPI0 AD Credits Empty", "EvSel": 32, "ExtSel": "", "Umask": "bxxxx1xxx", }, "R3QPI.QPI0_AD_CREDITS_EMPTY.VN1_HOM": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI0 on the AD Ring", "Desc": "QPI0 AD Credits Empty", "EvSel": 32, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R3QPI.QPI0_AD_CREDITS_EMPTY.VN1_SNP": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI0 on the AD Ring", "Desc": "QPI0 AD Credits Empty", "EvSel": 32, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R3QPI.QPI0_AD_CREDITS_EMPTY.VN0_HOM": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI0 on the AD Ring", "Desc": "QPI0 AD Credits Empty", "EvSel": 32, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R3QPI.QPI0_BL_CREDITS_EMPTY": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI0 on the BL Ring", "Desc": "QPI0 BL Credits Empty", "EvSel": 33, "ExtSel": "", }, "R3QPI.QPI0_BL_CREDITS_EMPTY.VN1_NDR": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI0 on the BL Ring", "Desc": "QPI0 BL Credits Empty", "EvSel": 33, "ExtSel": "", "Umask": "bx1xxxxxx", }, "R3QPI.QPI0_BL_CREDITS_EMPTY.VNA": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI0 on the BL Ring", "Desc": "QPI0 BL Credits Empty", "EvSel": 33, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R3QPI.QPI0_BL_CREDITS_EMPTY.VN1_SNP": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI0 on the BL Ring", "Desc": "QPI0 BL Credits Empty", "EvSel": 33, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R3QPI.QPI0_BL_CREDITS_EMPTY.VN1_HOM": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI0 on the BL Ring", "Desc": "QPI0 BL Credits Empty", "EvSel": 33, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R3QPI.QPI1_AD_CREDITS_EMPTY": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI1 on the AD Ring", "Desc": "QPI1 AD Credits Empty", "EvSel": 46, "ExtSel": "", }, "R3QPI.QPI1_AD_CREDITS_EMPTY.VN1_SNP": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI1 on the AD Ring", "Desc": "QPI1 AD Credits Empty", "EvSel": 46, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R3QPI.QPI1_AD_CREDITS_EMPTY.VN1_HOM": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI1 on the AD Ring", "Desc": "QPI1 AD Credits Empty", "EvSel": 46, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R3QPI.QPI1_AD_CREDITS_EMPTY.VN1_NDR": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI1 on the AD Ring", "Desc": "QPI1 AD Credits Empty", "EvSel": 46, "ExtSel": "", "Umask": "bx1xxxxxx", }, "R3QPI.QPI1_AD_CREDITS_EMPTY.VNA": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI1 on the AD Ring", "Desc": "QPI1 AD Credits Empty", "EvSel": 46, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R3QPI.QPI1_BL_CREDITS_EMPTY": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI1 on the BL Ring", "Desc": "QPI1 BL Credits Empty", "EvSel": 47, "ExtSel": "", }, "R3QPI.QPI1_BL_CREDITS_EMPTY.VN0_SNP": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI1 on the BL Ring", "Desc": "QPI1 BL Credits Empty", "EvSel": 47, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R3QPI.QPI1_BL_CREDITS_EMPTY.VN1_NDR": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI1 on the BL Ring", "Desc": "QPI1 BL Credits Empty", "EvSel": 47, "ExtSel": "", "Umask": "bx1xxxxxx", }, "R3QPI.QPI1_BL_CREDITS_EMPTY.VNA": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI1 on the BL Ring", "Desc": "QPI1 BL Credits Empty", "EvSel": 47, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R3QPI.QPI1_BL_CREDITS_EMPTY.VN0_NDR": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI1 on the BL Ring", "Desc": "QPI1 BL Credits Empty", "EvSel": 47, "ExtSel": "", "Umask": "bxxxx1xxx", }, "R3QPI.QPI1_BL_CREDITS_EMPTY.VN1_HOM": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI1 on the BL Ring", "Desc": "QPI1 BL Credits Empty", "EvSel": 47, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R3QPI.QPI1_BL_CREDITS_EMPTY.VN1_SNP": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI1 on the BL Ring", "Desc": "QPI1 BL Credits Empty", "EvSel": 47, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R3QPI.QPI1_BL_CREDITS_EMPTY.VN0_HOM": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI1 on the BL Ring", "Desc": "QPI1 BL Credits Empty", "EvSel": 47, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R3QPI.RING_AD_USED": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", }, "R3QPI.RING_AD_USED.CW_EVEN": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxxx1", }, "R3QPI.RING_AD_USED.CCW": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00001100", }, "R3QPI.RING_AD_USED.CW": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00000011", }, "R3QPI.RING_AD_USED.ALL": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00001111", }, "R3QPI.RING_AD_USED.CCW_EVEN": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxx1xx", }, "R3QPI.RING_AD_USED.CW_ODD": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxx1x", }, "R3QPI.RING_AD_USED.CCW_ODD": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxx1xxx", }, "R3QPI.RING_AK_USED": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", }, "R3QPI.RING_AK_USED.CCW_ODD": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxx1xxx", }, "R3QPI.RING_AK_USED.CW": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00000011", }, "R3QPI.RING_AK_USED.CCW_EVEN": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxx1xx", }, "R3QPI.RING_AK_USED.ALL": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00001111", }, "R3QPI.RING_AK_USED.CW_ODD": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxx1x", }, "R3QPI.RING_AK_USED.CW_EVEN": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxxx1", }, "R3QPI.RING_AK_USED.CCW": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00001100", }, "R3QPI.RING_BL_USED": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", }, "R3QPI.RING_BL_USED.CCW": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00001100", }, "R3QPI.RING_BL_USED.CW_EVEN": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxxx1", }, "R3QPI.RING_BL_USED.ALL": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00001111", }, "R3QPI.RING_BL_USED.CW_ODD": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxx1x", }, "R3QPI.RING_BL_USED.CCW_EVEN": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxx1xx", }, "R3QPI.RING_BL_USED.CW": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00000011", }, "R3QPI.RING_BL_USED.CCW_ODD": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxx1xxx", }, "R3QPI.RING_IV_USED": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.", "Desc": "R3 IV Ring in Use", "EvSel": 10, "ExtSel": "", "Notes": "IV messages are split into two parts. In any cycle, a ring stop can see up to one (half-)packet moving in the CW direction and one (half-)packet moving in the CCW direction.", }, "R3QPI.RING_IV_USED.ANY": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.", "Desc": "R3 IV Ring in Use", "EvSel": 10, "ExtSel": "", "Notes": "IV messages are split into two parts. In any cycle, a ring stop can see up to one (half-)packet moving in the CW direction and one (half-)packet moving in the CCW direction.", "Umask": "b00001111", }, "R3QPI.RING_IV_USED.CW": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.", "Desc": "R3 IV Ring in Use", "EvSel": 10, "ExtSel": "", "Notes": "IV messages are split into two parts. In any cycle, a ring stop can see up to one (half-)packet moving in the CW direction and one (half-)packet moving in the CCW direction.", "Umask": "b00000011", }, "R3QPI.RING_IV_USED.CCW": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.", "Desc": "R3 IV Ring in Use", "EvSel": 10, "ExtSel": "", "Notes": "IV messages are split into two parts. In any cycle, a ring stop can see up to one (half-)packet moving in the CW direction and one (half-)packet moving in the CCW direction.", "Umask": "b11001100", }, "R3QPI.RING_SINK_STARVED": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Number of cycles the ringstop is in starvation (per ring)", "Desc": "Ring Stop Starved", "EvSel": 14, "ExtSel": "", "MaxIncCyc": 2, }, "R3QPI.RING_SINK_STARVED.AK": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Number of cycles the ringstop is in starvation (per ring)", "Desc": "Ring Stop Starved", "EvSel": 14, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxxxx1x", }, "R3QPI.RxR_CYCLES_NE": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Cycles Not Empty", "EvSel": 16, "ExtSel": "", }, "R3QPI.RxR_CYCLES_NE.HOM": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Cycles Not Empty", "EvSel": 16, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R3QPI.RxR_CYCLES_NE.NDR": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Cycles Not Empty", "EvSel": 16, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R3QPI.RxR_CYCLES_NE.SNP": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Cycles Not Empty", "EvSel": 16, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R3QPI.RxR_CYCLES_NE_VN1": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the QPI VN1 Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress Cycles Not Empty", "EvSel": 20, "ExtSel": "", }, "R3QPI.RxR_CYCLES_NE_VN1.DRS": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the QPI VN1 Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress Cycles Not Empty", "EvSel": 20, "ExtSel": "", "Umask": "bxxxx1xxx", }, "R3QPI.RxR_CYCLES_NE_VN1.HOM": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the QPI VN1 Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress Cycles Not Empty", "EvSel": 20, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R3QPI.RxR_CYCLES_NE_VN1.SNP": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the QPI VN1 Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress Cycles Not Empty", "EvSel": 20, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R3QPI.RxR_CYCLES_NE_VN1.NCB": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the QPI VN1 Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress Cycles Not Empty", "EvSel": 20, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R3QPI.RxR_CYCLES_NE_VN1.NDR": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the QPI VN1 Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress Cycles Not Empty", "EvSel": 20, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R3QPI.RxR_CYCLES_NE_VN1.NCS": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the QPI VN1 Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress Cycles Not Empty", "EvSel": 20, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R3QPI.RxR_INSERTS": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Allocations", "EvSel": 17, "ExtSel": "", }, "R3QPI.RxR_INSERTS.NCS": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Allocations", "EvSel": 17, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R3QPI.RxR_INSERTS.NCB": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Allocations", "EvSel": 17, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R3QPI.RxR_INSERTS.NDR": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Allocations", "EvSel": 17, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R3QPI.RxR_INSERTS.SNP": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Allocations", "EvSel": 17, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R3QPI.RxR_INSERTS.HOM": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Allocations", "EvSel": 17, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R3QPI.RxR_INSERTS.DRS": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Allocations", "EvSel": 17, "ExtSel": "", "Umask": "bxxxx1xxx", }, "R3QPI.RxR_INSERTS_VN1": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the QPI VN1 Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress Allocations", "EvSel": 21, "ExtSel": "", }, "R3QPI.RxR_INSERTS_VN1.NCS": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the QPI VN1 Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress Allocations", "EvSel": 21, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R3QPI.RxR_INSERTS_VN1.NDR": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the QPI VN1 Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress Allocations", "EvSel": 21, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R3QPI.RxR_INSERTS_VN1.NCB": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the QPI VN1 Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress Allocations", "EvSel": 21, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R3QPI.RxR_INSERTS_VN1.SNP": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the QPI VN1 Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress Allocations", "EvSel": 21, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R3QPI.RxR_INSERTS_VN1.HOM": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the QPI VN1 Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress Allocations", "EvSel": 21, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R3QPI.RxR_INSERTS_VN1.DRS": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the QPI VN1 Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress Allocations", "EvSel": 21, "ExtSel": "", "Umask": "bxxxx1xxx", }, "R3QPI.RxR_OCCUPANCY_VN1": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": 0, "Defn": "Accumulates the occupancy of a given QPI VN1 Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI VN1 Ingress Not Empty event to calculate average occupancy or the QPI VN1 Ingress Allocations event in order to calculate average queuing latency.", "Desc": "VN1 Ingress Occupancy Accumulator", "EvSel": 19, "ExtSel": "", "MaxIncCyc": 32, "Notes": "Supposed to be 0x16", "SubCtr": 1, }, "R3QPI.RxR_OCCUPANCY_VN1.DRS": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": 0, "Defn": "Accumulates the occupancy of a given QPI VN1 Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI VN1 Ingress Not Empty event to calculate average occupancy or the QPI VN1 Ingress Allocations event in order to calculate average queuing latency.", "Desc": "VN1 Ingress Occupancy Accumulator", "EvSel": 19, "ExtSel": "", "MaxIncCyc": 32, "Notes": "Supposed to be 0x16", "SubCtr": 1, "Umask": "bxxxx1xxx", }, "R3QPI.RxR_OCCUPANCY_VN1.HOM": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": 0, "Defn": "Accumulates the occupancy of a given QPI VN1 Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI VN1 Ingress Not Empty event to calculate average occupancy or the QPI VN1 Ingress Allocations event in order to calculate average queuing latency.", "Desc": "VN1 Ingress Occupancy Accumulator", "EvSel": 19, "ExtSel": "", "MaxIncCyc": 32, "Notes": "Supposed to be 0x16", "SubCtr": 1, "Umask": "bxxxxxxx1", }, "R3QPI.RxR_OCCUPANCY_VN1.NCB": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": 0, "Defn": "Accumulates the occupancy of a given QPI VN1 Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI VN1 Ingress Not Empty event to calculate average occupancy or the QPI VN1 Ingress Allocations event in order to calculate average queuing latency.", "Desc": "VN1 Ingress Occupancy Accumulator", "EvSel": 19, "ExtSel": "", "MaxIncCyc": 32, "Notes": "Supposed to be 0x16", "SubCtr": 1, "Umask": "bxxx1xxxx", }, "R3QPI.RxR_OCCUPANCY_VN1.NDR": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": 0, "Defn": "Accumulates the occupancy of a given QPI VN1 Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI VN1 Ingress Not Empty event to calculate average occupancy or the QPI VN1 Ingress Allocations event in order to calculate average queuing latency.", "Desc": "VN1 Ingress Occupancy Accumulator", "EvSel": 19, "ExtSel": "", "MaxIncCyc": 32, "Notes": "Supposed to be 0x16", "SubCtr": 1, "Umask": "bxxxxx1xx", }, "R3QPI.RxR_OCCUPANCY_VN1.NCS": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": 0, "Defn": "Accumulates the occupancy of a given QPI VN1 Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI VN1 Ingress Not Empty event to calculate average occupancy or the QPI VN1 Ingress Allocations event in order to calculate average queuing latency.", "Desc": "VN1 Ingress Occupancy Accumulator", "EvSel": 19, "ExtSel": "", "MaxIncCyc": 32, "Notes": "Supposed to be 0x16", "SubCtr": 1, "Umask": "bxx1xxxxx", }, "R3QPI.RxR_OCCUPANCY_VN1.SNP": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": 0, "Defn": "Accumulates the occupancy of a given QPI VN1 Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI VN1 Ingress Not Empty event to calculate average occupancy or the QPI VN1 Ingress Allocations event in order to calculate average queuing latency.", "Desc": "VN1 Ingress Occupancy Accumulator", "EvSel": 19, "ExtSel": "", "MaxIncCyc": 32, "Notes": "Supposed to be 0x16", "SubCtr": 1, "Umask": "bxxxxxx1x", }, "R3QPI.SBO0_CREDITS_ACQUIRED": { "Box": "R3QPI", "Category": "R3QPI SBO Credit Events", "Counters": "0-1", "Defn": "Number of Sbo 0 credits acquired in a given cycle, per ring.", "Desc": "SBo0 Credits Acquired", "EvSel": 40, "ExtSel": "", "MaxIncCyc": 2, }, "R3QPI.SBO0_CREDITS_ACQUIRED.BL": { "Box": "R3QPI", "Category": "R3QPI SBO Credit Events", "Counters": "0-1", "Defn": "Number of Sbo 0 credits acquired in a given cycle, per ring.", "Desc": "SBo0 Credits Acquired", "EvSel": 40, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000010", }, "R3QPI.SBO0_CREDITS_ACQUIRED.AD": { "Box": "R3QPI", "Category": "R3QPI SBO Credit Events", "Counters": "0-1", "Defn": "Number of Sbo 0 credits acquired in a given cycle, per ring.", "Desc": "SBo0 Credits Acquired", "EvSel": 40, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000001", }, "R3QPI.SBO1_CREDITS_ACQUIRED": { "Box": "R3QPI", "Category": "R3QPI SBO Credit Events", "Counters": "0-1", "Defn": "Number of Sbo 1 credits acquired in a given cycle, per ring.", "Desc": "SBo1 Credits Acquired", "EvSel": 41, "ExtSel": "", "MaxIncCyc": 2, }, "R3QPI.SBO1_CREDITS_ACQUIRED.AD": { "Box": "R3QPI", "Category": "R3QPI SBO Credit Events", "Counters": "0-1", "Defn": "Number of Sbo 1 credits acquired in a given cycle, per ring.", "Desc": "SBo1 Credits Acquired", "EvSel": 41, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000001", }, "R3QPI.SBO1_CREDITS_ACQUIRED.BL": { "Box": "R3QPI", "Category": "R3QPI SBO Credit Events", "Counters": "0-1", "Defn": "Number of Sbo 1 credits acquired in a given cycle, per ring.", "Desc": "SBo1 Credits Acquired", "EvSel": 41, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000010", }, "R3QPI.STALL_NO_SBO_CREDIT": { "Box": "R3QPI", "Category": "R3QPI SBO Credit Events", "Counters": "0-1", "Defn": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.", "Desc": "Stall on No Sbo Credits", "EvSel": 44, "ExtSel": "", "MaxIncCyc": 4, }, "R3QPI.STALL_NO_SBO_CREDIT.SBO1_BL": { "Box": "R3QPI", "Category": "R3QPI SBO Credit Events", "Counters": "0-1", "Defn": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.", "Desc": "Stall on No Sbo Credits", "EvSel": 44, "ExtSel": "", "MaxIncCyc": 4, "Umask": "b00001000", }, "R3QPI.STALL_NO_SBO_CREDIT.SBO0_AD": { "Box": "R3QPI", "Category": "R3QPI SBO Credit Events", "Counters": "0-1", "Defn": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.", "Desc": "Stall on No Sbo Credits", "EvSel": 44, "ExtSel": "", "MaxIncCyc": 4, "Umask": "b00000001", }, "R3QPI.STALL_NO_SBO_CREDIT.SBO1_AD": { "Box": "R3QPI", "Category": "R3QPI SBO Credit Events", "Counters": "0-1", "Defn": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.", "Desc": "Stall on No Sbo Credits", "EvSel": 44, "ExtSel": "", "MaxIncCyc": 4, "Umask": "b00000010", }, "R3QPI.STALL_NO_SBO_CREDIT.SBO0_BL": { "Box": "R3QPI", "Category": "R3QPI SBO Credit Events", "Counters": "0-1", "Defn": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.", "Desc": "Stall on No Sbo Credits", "EvSel": 44, "ExtSel": "", "MaxIncCyc": 4, "Umask": "b00000100", }, "R3QPI.TxR_NACK": { "Box": "R3QPI", "Category": "R3QPI EGRESS Events", "Counters": "0-1", "Desc": "Egress CCW NACK", "EvSel": 38, "ExtSel": "", }, "R3QPI.TxR_NACK.DN_AD": { "Box": "R3QPI", "Category": "R3QPI EGRESS Events", "Counters": "0-1", "Desc": "Egress CCW NACK", "EvSel": 38, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R3QPI.TxR_NACK.UP_AK": { "Box": "R3QPI", "Category": "R3QPI EGRESS Events", "Counters": "0-1", "Desc": "Egress CCW NACK", "EvSel": 38, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R3QPI.TxR_NACK.UP_AD": { "Box": "R3QPI", "Category": "R3QPI EGRESS Events", "Counters": "0-1", "Desc": "Egress CCW NACK", "EvSel": 38, "ExtSel": "", "Umask": "bxxxx1xxx", }, "R3QPI.TxR_NACK.DN_AK": { "Box": "R3QPI", "Category": "R3QPI EGRESS Events", "Counters": "0-1", "Desc": "Egress CCW NACK", "EvSel": 38, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R3QPI.TxR_NACK.DN_BL": { "Box": "R3QPI", "Category": "R3QPI EGRESS Events", "Counters": "0-1", "Desc": "Egress CCW NACK", "EvSel": 38, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R3QPI.TxR_NACK.UP_BL": { "Box": "R3QPI", "Category": "R3QPI EGRESS Events", "Counters": "0-1", "Desc": "Egress CCW NACK", "EvSel": 38, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R3QPI.VN0_CREDITS_REJECT": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.", "Desc": "VN0 Credit Acquisition Failed on DRS", "EvSel": 55, "ExtSel": "", }, "R3QPI.VN0_CREDITS_REJECT.HOM": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.", "Desc": "VN0 Credit Acquisition Failed on DRS", "EvSel": 55, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R3QPI.VN0_CREDITS_REJECT.DRS": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.", "Desc": "VN0 Credit Acquisition Failed on DRS", "EvSel": 55, "ExtSel": "", "Umask": "bxxxx1xxx", }, "R3QPI.VN0_CREDITS_REJECT.SNP": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.", "Desc": "VN0 Credit Acquisition Failed on DRS", "EvSel": 55, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R3QPI.VN0_CREDITS_REJECT.NCS": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.", "Desc": "VN0 Credit Acquisition Failed on DRS", "EvSel": 55, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R3QPI.VN0_CREDITS_REJECT.NDR": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.", "Desc": "VN0 Credit Acquisition Failed on DRS", "EvSel": 55, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R3QPI.VN0_CREDITS_REJECT.NCB": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.", "Desc": "VN0 Credit Acquisition Failed on DRS", "EvSel": 55, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R3QPI.VN0_CREDITS_USED": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.", "Desc": "VN0 Credit Used", "EvSel": 54, "ExtSel": "", }, "R3QPI.VN0_CREDITS_USED.HOM": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.", "Desc": "VN0 Credit Used", "EvSel": 54, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R3QPI.VN0_CREDITS_USED.DRS": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.", "Desc": "VN0 Credit Used", "EvSel": 54, "ExtSel": "", "Umask": "bxxxx1xxx", }, "R3QPI.VN0_CREDITS_USED.SNP": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.", "Desc": "VN0 Credit Used", "EvSel": 54, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R3QPI.VN0_CREDITS_USED.NCS": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.", "Desc": "VN0 Credit Used", "EvSel": 54, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R3QPI.VN0_CREDITS_USED.NDR": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.", "Desc": "VN0 Credit Used", "EvSel": 54, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R3QPI.VN0_CREDITS_USED.NCB": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.", "Desc": "VN0 Credit Used", "EvSel": 54, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R3QPI.VN1_CREDITS_REJECT": { "Box": "R3QPI", "Category": "R3QPI LINK_VN1_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.", "Desc": "VN1 Credit Acquisition Failed on DRS", "EvSel": 57, "ExtSel": "", }, "R3QPI.VN1_CREDITS_REJECT.HOM": { "Box": "R3QPI", "Category": "R3QPI LINK_VN1_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.", "Desc": "VN1 Credit Acquisition Failed on DRS", "EvSel": 57, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R3QPI.VN1_CREDITS_REJECT.DRS": { "Box": "R3QPI", "Category": "R3QPI LINK_VN1_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.", "Desc": "VN1 Credit Acquisition Failed on DRS", "EvSel": 57, "ExtSel": "", "Umask": "bxxxx1xxx", }, "R3QPI.VN1_CREDITS_REJECT.SNP": { "Box": "R3QPI", "Category": "R3QPI LINK_VN1_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.", "Desc": "VN1 Credit Acquisition Failed on DRS", "EvSel": 57, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R3QPI.VN1_CREDITS_REJECT.NCS": { "Box": "R3QPI", "Category": "R3QPI LINK_VN1_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.", "Desc": "VN1 Credit Acquisition Failed on DRS", "EvSel": 57, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R3QPI.VN1_CREDITS_REJECT.NDR": { "Box": "R3QPI", "Category": "R3QPI LINK_VN1_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.", "Desc": "VN1 Credit Acquisition Failed on DRS", "EvSel": 57, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R3QPI.VN1_CREDITS_REJECT.NCB": { "Box": "R3QPI", "Category": "R3QPI LINK_VN1_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.", "Desc": "VN1 Credit Acquisition Failed on DRS", "EvSel": 57, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R3QPI.VN1_CREDITS_USED": { "Box": "R3QPI", "Category": "R3QPI LINK_VN1_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.", "Desc": "VN1 Credit Used", "EvSel": 56, "ExtSel": "", }, "R3QPI.VN1_CREDITS_USED.NCS": { "Box": "R3QPI", "Category": "R3QPI LINK_VN1_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.", "Desc": "VN1 Credit Used", "EvSel": 56, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R3QPI.VN1_CREDITS_USED.NDR": { "Box": "R3QPI", "Category": "R3QPI LINK_VN1_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.", "Desc": "VN1 Credit Used", "EvSel": 56, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R3QPI.VN1_CREDITS_USED.NCB": { "Box": "R3QPI", "Category": "R3QPI LINK_VN1_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.", "Desc": "VN1 Credit Used", "EvSel": 56, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R3QPI.VN1_CREDITS_USED.SNP": { "Box": "R3QPI", "Category": "R3QPI LINK_VN1_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.", "Desc": "VN1 Credit Used", "EvSel": 56, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R3QPI.VN1_CREDITS_USED.HOM": { "Box": "R3QPI", "Category": "R3QPI LINK_VN1_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.", "Desc": "VN1 Credit Used", "EvSel": 56, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R3QPI.VN1_CREDITS_USED.DRS": { "Box": "R3QPI", "Category": "R3QPI LINK_VN1_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.", "Desc": "VN1 Credit Used", "EvSel": 56, "ExtSel": "", "Umask": "bxxxx1xxx", }, "R3QPI.VNA_CREDITS_ACQUIRED": { "Box": "R3QPI", "Category": "R3QPI LINK_VNA_CREDITS Events", "Counters": "0-1", "Defn": "Number of QPI VNA Credit acquisitions. This event can be used in conjunction with the VNA In-Use Accumulator to calculate the average lifetime of a credit holder. VNA credits are used by all message classes in order to communicate across QPI. If a packet is unable to acquire credits, it will then attempt to use credts from the VN0 pool. Note that a single packet may require multiple flit buffers (i.e. when data is being transfered). Therefore, this event will increment by the number of credits acquired in each cycle. Filtering based on message class is not provided. One can count the number of packets transfered in a given message class using an qfclk event.", "Desc": "VNA credit Acquisitions", "EvSel": 51, "ExtSel": "", "MaxIncCyc": 4, }, "R3QPI.VNA_CREDITS_ACQUIRED.BL": { "Box": "R3QPI", "Category": "R3QPI LINK_VNA_CREDITS Events", "Counters": "0-1", "Defn": "Number of QPI VNA Credit acquisitions. This event can be used in conjunction with the VNA In-Use Accumulator to calculate the average lifetime of a credit holder. VNA credits are used by all message classes in order to communicate across QPI. If a packet is unable to acquire credits, it will then attempt to use credts from the VN0 pool. Note that a single packet may require multiple flit buffers (i.e. when data is being transfered). Therefore, this event will increment by the number of credits acquired in each cycle. Filtering based on message class is not provided. One can count the number of packets transfered in a given message class using an qfclk event.", "Desc": "VNA credit Acquisitions", "EvSel": 51, "ExtSel": "", "MaxIncCyc": 4, "Umask": "bxxxxx1xx", }, "R3QPI.VNA_CREDITS_ACQUIRED.AD": { "Box": "R3QPI", "Category": "R3QPI LINK_VNA_CREDITS Events", "Counters": "0-1", "Defn": "Number of QPI VNA Credit acquisitions. This event can be used in conjunction with the VNA In-Use Accumulator to calculate the average lifetime of a credit holder. VNA credits are used by all message classes in order to communicate across QPI. If a packet is unable to acquire credits, it will then attempt to use credts from the VN0 pool. Note that a single packet may require multiple flit buffers (i.e. when data is being transfered). Therefore, this event will increment by the number of credits acquired in each cycle. Filtering based on message class is not provided. One can count the number of packets transfered in a given message class using an qfclk event.", "Desc": "VNA credit Acquisitions", "EvSel": 51, "ExtSel": "", "MaxIncCyc": 4, "Umask": "bxxxxxxx1", }, "R3QPI.VNA_CREDITS_REJECT": { "Box": "R3QPI", "Category": "R3QPI LINK_VNA_CREDITS Events", "Counters": "0-1", "Defn": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.", "Desc": "VNA Credit Reject", "EvSel": 52, "ExtSel": "", }, "R3QPI.VNA_CREDITS_REJECT.NDR": { "Box": "R3QPI", "Category": "R3QPI LINK_VNA_CREDITS Events", "Counters": "0-1", "Defn": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.", "Desc": "VNA Credit Reject", "EvSel": 52, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R3QPI.VNA_CREDITS_REJECT.NCB": { "Box": "R3QPI", "Category": "R3QPI LINK_VNA_CREDITS Events", "Counters": "0-1", "Defn": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.", "Desc": "VNA Credit Reject", "EvSel": 52, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R3QPI.VNA_CREDITS_REJECT.NCS": { "Box": "R3QPI", "Category": "R3QPI LINK_VNA_CREDITS Events", "Counters": "0-1", "Defn": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.", "Desc": "VNA Credit Reject", "EvSel": 52, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R3QPI.VNA_CREDITS_REJECT.SNP": { "Box": "R3QPI", "Category": "R3QPI LINK_VNA_CREDITS Events", "Counters": "0-1", "Defn": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.", "Desc": "VNA Credit Reject", "EvSel": 52, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R3QPI.VNA_CREDITS_REJECT.DRS": { "Box": "R3QPI", "Category": "R3QPI LINK_VNA_CREDITS Events", "Counters": "0-1", "Defn": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.", "Desc": "VNA Credit Reject", "EvSel": 52, "ExtSel": "", "Umask": "bxxxx1xxx", }, "R3QPI.VNA_CREDITS_REJECT.HOM": { "Box": "R3QPI", "Category": "R3QPI LINK_VNA_CREDITS Events", "Counters": "0-1", "Defn": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.", "Desc": "VNA Credit Reject", "EvSel": 52, "ExtSel": "", "Umask": "bxxxxxxx1", }, # QPI_LL: "QPI_LL.CLOCKTICKS": { "Box": "QPI_LL", "Category": "QPI_LL CFCLK Events", "Counters": "0-3", "Defn": "Counts the number of clocks in the QPI LL. This clock runs at 1/4th the \"GT/s\" speed of the QPI link. For example, a 4GT/s link will have qfclk or 1GHz. BDX does not support dynamic link speeds, so this frequency is fixed.", "Desc": "Number of qfclks", "EvSel": 20, "ExtSel": "", }, "QPI_LL.CTO_COUNT": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Counters": "0-3", "Defn": "Counts the number of CTO (cluster trigger outs) events that were asserted across the two slots. If both slots trigger in a given cycle, the event will increment by 2. You can use edge detect to count the number of cases when both events triggered.", "Desc": "Count of CTO Events", "EvSel": 56, "Filter": "QPIMask0[17:0],QPIMatch0[17:0],QPIMask1[19:16],QPIMatch1[19:16]", "ExtSel": "", "MaxIncCyc": 2, "SubCtr": 1, }, "QPI_LL.DIRECT2CORE": { "Box": "QPI_LL", "Category": "QPI_LL DIRECT2CORE Events", "Counters": "0-3", "Defn": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.", "Desc": "Direct 2 Core Spawning", "EvSel": 19, "ExtSel": "", }, "QPI_LL.DIRECT2CORE.FAILURE_CREDITS_RBT_MISS": { "Box": "QPI_LL", "Category": "QPI_LL DIRECT2CORE Events", "Counters": "0-3", "Defn": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.", "Desc": "Direct 2 Core Spawning", "EvSel": 19, "ExtSel": "", "Umask": "b1xxxxxxx", }, "QPI_LL.DIRECT2CORE.FAILURE_CREDITS": { "Box": "QPI_LL", "Category": "QPI_LL DIRECT2CORE Events", "Counters": "0-3", "Defn": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.", "Desc": "Direct 2 Core Spawning", "EvSel": 19, "ExtSel": "", "Umask": "bxxxxxx1x", }, "QPI_LL.DIRECT2CORE.FAILURE_RBT_HIT": { "Box": "QPI_LL", "Category": "QPI_LL DIRECT2CORE Events", "Counters": "0-3", "Defn": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.", "Desc": "Direct 2 Core Spawning", "EvSel": 19, "ExtSel": "", "Umask": "bxxxxx1xx", }, "QPI_LL.DIRECT2CORE.FAILURE_MISS": { "Box": "QPI_LL", "Category": "QPI_LL DIRECT2CORE Events", "Counters": "0-3", "Defn": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.", "Desc": "Direct 2 Core Spawning", "EvSel": 19, "ExtSel": "", "Umask": "bxxx1xxxx", }, "QPI_LL.DIRECT2CORE.FAILURE_CREDITS_RBT": { "Box": "QPI_LL", "Category": "QPI_LL DIRECT2CORE Events", "Counters": "0-3", "Defn": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.", "Desc": "Direct 2 Core Spawning", "EvSel": 19, "ExtSel": "", "Umask": "bxxxx1xxx", }, "QPI_LL.DIRECT2CORE.SUCCESS_RBT_HIT": { "Box": "QPI_LL", "Category": "QPI_LL DIRECT2CORE Events", "Counters": "0-3", "Defn": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.", "Desc": "Direct 2 Core Spawning", "EvSel": 19, "ExtSel": "", "Umask": "bxxxxxxx1", }, "QPI_LL.DIRECT2CORE.FAILURE_CREDITS_MISS": { "Box": "QPI_LL", "Category": "QPI_LL DIRECT2CORE Events", "Counters": "0-3", "Defn": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.", "Desc": "Direct 2 Core Spawning", "EvSel": 19, "ExtSel": "", "Umask": "bxx1xxxxx", }, "QPI_LL.DIRECT2CORE.FAILURE_RBT_MISS": { "Box": "QPI_LL", "Category": "QPI_LL DIRECT2CORE Events", "Counters": "0-3", "Defn": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.", "Desc": "Direct 2 Core Spawning", "EvSel": 19, "ExtSel": "", "Umask": "bx1xxxxxx", }, "QPI_LL.L1_POWER_CYCLES": { "Box": "QPI_LL", "Category": "QPI_LL POWER Events", "Counters": "0-3", "Defn": "Number of QPI qfclk cycles spent in L1 power mode. L1 is a mode that totally shuts down a QPI link. Use edge detect to count the number of instances when the QPI link entered L1. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. Because L1 totally shuts down the link, it takes a good amount of time to exit this mode.", "Desc": "Cycles in L1", "EvSel": 18, "ExtSel": "", }, "QPI_LL.RxL0P_POWER_CYCLES": { "Box": "QPI_LL", "Category": "QPI_LL POWER_RX Events", "Counters": "0-3", "Defn": "Number of QPI qfclk cycles spent in L0p power mode. L0p is a mode where we disable 1/2 of the QPI lanes, decreasing our bandwidth in order to save power. It increases snoop and data transfer latencies and decreases overall bandwidth. This mode can be very useful in NUMA optimized workloads that largely only utilize QPI for snoops and their responses. Use edge detect to count the number of instances when the QPI link entered L0p. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another.", "Desc": "Cycles in L0p", "EvSel": 16, "ExtSel": "", "Notes": "Using .edge_det to count transitions does not function if L1_POWER_CYCLES > 0.", }, "QPI_LL.RxL0_POWER_CYCLES": { "Box": "QPI_LL", "Category": "QPI_LL POWER_RX Events", "Counters": "0-3", "Defn": "Number of QPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.", "Desc": "Cycles in L0", "EvSel": 15, "ExtSel": "", "Notes": "Includes L0p cycles. To get just L0, subtract RxL0P_POWER_CYCLES", }, "QPI_LL.RxL_BYPASSED": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Counts the number of times that an incoming flit was able to bypass the flit buffer and pass directly across the BGF and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of flits transfered, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.", "Desc": "Rx Flit Buffer Bypassed", "EvSel": 9, "ExtSel": "", }, "QPI_LL.RxL_CREDITS_CONSUMED_VN0": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN0 Credit Consumed", "EvSel": 30, "ExtSel": "", "MaxIncCyc": 2, }, "QPI_LL.RxL_CREDITS_CONSUMED_VN0.DRS": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN0 Credit Consumed", "EvSel": 30, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxxxxx1", }, "QPI_LL.RxL_CREDITS_CONSUMED_VN0.HOM": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN0 Credit Consumed", "EvSel": 30, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxx1xxx", }, "QPI_LL.RxL_CREDITS_CONSUMED_VN0.SNP": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN0 Credit Consumed", "EvSel": 30, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxx1xxxx", }, "QPI_LL.RxL_CREDITS_CONSUMED_VN0.NDR": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN0 Credit Consumed", "EvSel": 30, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxx1xxxxx", }, "QPI_LL.RxL_CREDITS_CONSUMED_VN0.NCB": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN0 Credit Consumed", "EvSel": 30, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxxxx1x", }, "QPI_LL.RxL_CREDITS_CONSUMED_VN0.NCS": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN0 Credit Consumed", "EvSel": 30, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxxx1xx", }, "QPI_LL.RxL_CREDITS_CONSUMED_VN1": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN1 Credit Consumed", "EvSel": 57, "ExtSel": "", "MaxIncCyc": 2, }, "QPI_LL.RxL_CREDITS_CONSUMED_VN1.SNP": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN1 Credit Consumed", "EvSel": 57, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxx1xxxx", }, "QPI_LL.RxL_CREDITS_CONSUMED_VN1.NCS": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN1 Credit Consumed", "EvSel": 57, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxxx1xx", }, "QPI_LL.RxL_CREDITS_CONSUMED_VN1.NCB": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN1 Credit Consumed", "EvSel": 57, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxxxx1x", }, "QPI_LL.RxL_CREDITS_CONSUMED_VN1.NDR": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN1 Credit Consumed", "EvSel": 57, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxx1xxxxx", }, "QPI_LL.RxL_CREDITS_CONSUMED_VN1.HOM": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN1 Credit Consumed", "EvSel": 57, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxx1xxx", }, "QPI_LL.RxL_CREDITS_CONSUMED_VN1.DRS": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN1 Credit Consumed", "EvSel": 57, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxxxxx1", }, "QPI_LL.RxL_CREDITS_CONSUMED_VNA": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VNA credit was consumed (i.e. message uses a VNA credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VNA Credit Consumed", "EvSel": 29, "ExtSel": "", }, "QPI_LL.RxL_CYCLES_NE": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy.", "Desc": "RxQ Cycles Not Empty", "EvSel": 10, "ExtSel": "", }, "QPI_LL.RxL_FLITS_G1": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 1", "EvSel": 2, "ExtSel": "", "MaxIncCyc": 2, }, "QPI_LL.RxL_FLITS_G1.HOM_NONREQ": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 1", "EvSel": 2, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000100", }, "QPI_LL.RxL_FLITS_G1.SNP": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 1", "EvSel": 2, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000001", }, "QPI_LL.RxL_FLITS_G1.HOM_REQ": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 1", "EvSel": 2, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000010", }, "QPI_LL.RxL_FLITS_G1.DRS_NONDATA": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 1", "EvSel": 2, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00010000", }, "QPI_LL.RxL_FLITS_G1.DRS": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 1", "EvSel": 2, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00011000", }, "QPI_LL.RxL_FLITS_G1.DRS_DATA": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 1", "EvSel": 2, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00001000", }, "QPI_LL.RxL_FLITS_G1.HOM": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 1", "EvSel": 2, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000110", }, "QPI_LL.RxL_FLITS_G2": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 2", "EvSel": 3, "ExtSel": "", "MaxIncCyc": 2, }, "QPI_LL.RxL_FLITS_G2.NCB_DATA": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 2", "EvSel": 3, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000100", }, "QPI_LL.RxL_FLITS_G2.NDR_AK": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 2", "EvSel": 3, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000010", }, "QPI_LL.RxL_FLITS_G2.NCB": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 2", "EvSel": 3, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00001100", }, "QPI_LL.RxL_FLITS_G2.NDR_AD": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 2", "EvSel": 3, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000001", }, "QPI_LL.RxL_FLITS_G2.NCS": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 2", "EvSel": 3, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00010000", }, "QPI_LL.RxL_FLITS_G2.NCB_NONDATA": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 2", "EvSel": 3, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00001000", }, "QPI_LL.RxL_INSERTS": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.", "Desc": "Rx Flit Buffer Allocations", "EvSel": 8, "ExtSel": "", }, "QPI_LL.RxL_INSERTS_DRS": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only DRS flits.", "Desc": "Rx Flit Buffer Allocations - DRS", "EvSel": 9, "ExtSel": "", }, "QPI_LL.RxL_INSERTS_DRS.VN1": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only DRS flits.", "Desc": "Rx Flit Buffer Allocations - DRS", "EvSel": 9, "ExtSel": "", "Umask": "bxxxxxx1x", }, "QPI_LL.RxL_INSERTS_DRS.VN0": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only DRS flits.", "Desc": "Rx Flit Buffer Allocations - DRS", "EvSel": 9, "ExtSel": "", "Umask": "bxxxxxxx1", }, "QPI_LL.RxL_INSERTS_HOM": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only HOM flits.", "Desc": "Rx Flit Buffer Allocations - HOM", "EvSel": 12, "ExtSel": "", }, "QPI_LL.RxL_INSERTS_HOM.VN0": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only HOM flits.", "Desc": "Rx Flit Buffer Allocations - HOM", "EvSel": 12, "ExtSel": "", "Umask": "bxxxxxxx1", }, "QPI_LL.RxL_INSERTS_HOM.VN1": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only HOM flits.", "Desc": "Rx Flit Buffer Allocations - HOM", "EvSel": 12, "ExtSel": "", "Umask": "bxxxxxx1x", }, "QPI_LL.RxL_INSERTS_NCB": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCB flits.", "Desc": "Rx Flit Buffer Allocations - NCB", "EvSel": 10, "ExtSel": "", }, "QPI_LL.RxL_INSERTS_NCB.VN1": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCB flits.", "Desc": "Rx Flit Buffer Allocations - NCB", "EvSel": 10, "ExtSel": "", "Umask": "bxxxxxx1x", }, "QPI_LL.RxL_INSERTS_NCB.VN0": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCB flits.", "Desc": "Rx Flit Buffer Allocations - NCB", "EvSel": 10, "ExtSel": "", "Umask": "bxxxxxxx1", }, "QPI_LL.RxL_INSERTS_NCS": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCS flits.", "Desc": "Rx Flit Buffer Allocations - NCS", "EvSel": 11, "ExtSel": "", }, "QPI_LL.RxL_INSERTS_NCS.VN0": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCS flits.", "Desc": "Rx Flit Buffer Allocations - NCS", "EvSel": 11, "ExtSel": "", "Umask": "bxxxxxxx1", }, "QPI_LL.RxL_INSERTS_NCS.VN1": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCS flits.", "Desc": "Rx Flit Buffer Allocations - NCS", "EvSel": 11, "ExtSel": "", "Umask": "bxxxxxx1x", }, "QPI_LL.RxL_INSERTS_NDR": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NDR flits.", "Desc": "Rx Flit Buffer Allocations - NDR", "EvSel": 14, "ExtSel": "", }, "QPI_LL.RxL_INSERTS_NDR.VN1": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NDR flits.", "Desc": "Rx Flit Buffer Allocations - NDR", "EvSel": 14, "ExtSel": "", "Umask": "bxxxxxx1x", }, "QPI_LL.RxL_INSERTS_NDR.VN0": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NDR flits.", "Desc": "Rx Flit Buffer Allocations - NDR", "EvSel": 14, "ExtSel": "", "Umask": "bxxxxxxx1", }, "QPI_LL.RxL_INSERTS_SNP": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only SNP flits.", "Desc": "Rx Flit Buffer Allocations - SNP", "EvSel": 13, "ExtSel": "", }, "QPI_LL.RxL_INSERTS_SNP.VN0": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only SNP flits.", "Desc": "Rx Flit Buffer Allocations - SNP", "EvSel": 13, "ExtSel": "", "Umask": "bxxxxxxx1", }, "QPI_LL.RxL_INSERTS_SNP.VN1": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only SNP flits.", "Desc": "Rx Flit Buffer Allocations - SNP", "EvSel": 13, "ExtSel": "", "Umask": "bxxxxxx1x", }, "QPI_LL.RxL_OCCUPANCY": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.", "Desc": "RxQ Occupancy - All Packets", "EvSel": 11, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, }, "QPI_LL.RxL_OCCUPANCY_DRS": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors DRS flits only.", "Desc": "RxQ Occupancy - DRS", "EvSel": 21, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, }, "QPI_LL.RxL_OCCUPANCY_DRS.VN1": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors DRS flits only.", "Desc": "RxQ Occupancy - DRS", "EvSel": 21, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bxxxxxx1x", }, "QPI_LL.RxL_OCCUPANCY_DRS.VN0": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors DRS flits only.", "Desc": "RxQ Occupancy - DRS", "EvSel": 21, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bxxxxxxx1", }, "QPI_LL.RxL_OCCUPANCY_HOM": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors HOM flits only.", "Desc": "RxQ Occupancy - HOM", "EvSel": 24, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, }, "QPI_LL.RxL_OCCUPANCY_HOM.VN0": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors HOM flits only.", "Desc": "RxQ Occupancy - HOM", "EvSel": 24, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bxxxxxxx1", }, "QPI_LL.RxL_OCCUPANCY_HOM.VN1": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors HOM flits only.", "Desc": "RxQ Occupancy - HOM", "EvSel": 24, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bxxxxxx1x", }, "QPI_LL.RxL_OCCUPANCY_NCB": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCB flits only.", "Desc": "RxQ Occupancy - NCB", "EvSel": 22, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, }, "QPI_LL.RxL_OCCUPANCY_NCB.VN0": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCB flits only.", "Desc": "RxQ Occupancy - NCB", "EvSel": 22, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bxxxxxxx1", }, "QPI_LL.RxL_OCCUPANCY_NCB.VN1": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCB flits only.", "Desc": "RxQ Occupancy - NCB", "EvSel": 22, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bxxxxxx1x", }, "QPI_LL.RxL_OCCUPANCY_NCS": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCS flits only.", "Desc": "RxQ Occupancy - NCS", "EvSel": 23, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, }, "QPI_LL.RxL_OCCUPANCY_NCS.VN0": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCS flits only.", "Desc": "RxQ Occupancy - NCS", "EvSel": 23, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bxxxxxxx1", }, "QPI_LL.RxL_OCCUPANCY_NCS.VN1": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCS flits only.", "Desc": "RxQ Occupancy - NCS", "EvSel": 23, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bxxxxxx1x", }, "QPI_LL.RxL_OCCUPANCY_NDR": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NDR flits only.", "Desc": "RxQ Occupancy - NDR", "EvSel": 26, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, }, "QPI_LL.RxL_OCCUPANCY_NDR.VN0": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NDR flits only.", "Desc": "RxQ Occupancy - NDR", "EvSel": 26, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bxxxxxxx1", }, "QPI_LL.RxL_OCCUPANCY_NDR.VN1": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NDR flits only.", "Desc": "RxQ Occupancy - NDR", "EvSel": 26, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bxxxxxx1x", }, "QPI_LL.RxL_OCCUPANCY_SNP": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors SNP flits only.", "Desc": "RxQ Occupancy - SNP", "EvSel": 25, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, }, "QPI_LL.RxL_OCCUPANCY_SNP.VN0": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors SNP flits only.", "Desc": "RxQ Occupancy - SNP", "EvSel": 25, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bxxxxxxx1", }, "QPI_LL.RxL_OCCUPANCY_SNP.VN1": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors SNP flits only.", "Desc": "RxQ Occupancy - SNP", "EvSel": 25, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bxxxxxx1x", }, "QPI_LL.TxL0P_POWER_CYCLES": { "Box": "QPI_LL", "Category": "QPI_LL POWER_TX Events", "Counters": "0-3", "Defn": "Number of QPI qfclk cycles spent in L0p power mode. L0p is a mode where we disable 1/2 of the QPI lanes, decreasing our bandwidth in order to save power. It increases snoop and data transfer latencies and decreases overall bandwidth. This mode can be very useful in NUMA optimized workloads that largely only utilize QPI for snoops and their responses. Use edge detect to count the number of instances when the QPI link entered L0p. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another.", "Desc": "Cycles in L0p", "EvSel": 13, "ExtSel": "", "Notes": "Using .edge_det to count transitions does not function if L1_POWER_CYCLES > 0.", }, "QPI_LL.TxL0_POWER_CYCLES": { "Box": "QPI_LL", "Category": "QPI_LL POWER_TX Events", "Counters": "0-3", "Defn": "Number of QPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.", "Desc": "Cycles in L0", "EvSel": 12, "ExtSel": "", "Notes": "Includes L0p cycles. To get just L0, subtract TxL0P_POWER_CYCLES", }, "QPI_LL.TxL_BYPASSED": { "Box": "QPI_LL", "Category": "QPI_LL TXQ Events", "Counters": "0-3", "Defn": "Counts the number of times that an incoming flit was able to bypass the Tx flit buffer and pass directly out the QPI Link. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link.", "Desc": "Tx Flit Buffer Bypassed", "EvSel": 5, "ExtSel": "", }, "QPI_LL.TxL_CYCLES_NE": { "Box": "QPI_LL", "Category": "QPI_LL TXQ Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the TxQ is not empty. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link.", "Desc": "Tx Flit Buffer Cycles not Empty", "EvSel": 6, "ExtSel": "", }, "QPI_LL.TxL_FLITS_G0": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.", "Desc": "Flits Transferred - Group 0", "EvSel": 0, "ExtSel": "", "MaxIncCyc": 2, }, "QPI_LL.TxL_FLITS_G0.DATA": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.", "Desc": "Flits Transferred - Group 0", "EvSel": 0, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000010", }, "QPI_LL.TxL_FLITS_G0.NON_DATA": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.", "Desc": "Flits Transferred - Group 0", "EvSel": 0, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000100", }, "QPI_LL.TxL_FLITS_G1": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits trasmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 1", "EvSel": 0, "ExtSel": "", "MaxIncCyc": 2, }, "QPI_LL.TxL_FLITS_G1.DRS": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits trasmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 1", "EvSel": 0, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00011000", }, "QPI_LL.TxL_FLITS_G1.HOM": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits trasmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 1", "EvSel": 0, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000110", }, "QPI_LL.TxL_FLITS_G1.DRS_DATA": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits trasmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 1", "EvSel": 0, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00001000", }, "QPI_LL.TxL_FLITS_G1.HOM_REQ": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits trasmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 1", "EvSel": 0, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000010", }, "QPI_LL.TxL_FLITS_G1.DRS_NONDATA": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits trasmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 1", "EvSel": 0, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00010000", }, "QPI_LL.TxL_FLITS_G1.SNP": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits trasmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 1", "EvSel": 0, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000001", }, "QPI_LL.TxL_FLITS_G1.HOM_NONREQ": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits trasmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 1", "EvSel": 0, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000100", }, "QPI_LL.TxL_FLITS_G2": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits trasmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 2", "EvSel": 1, "ExtSel": "", "MaxIncCyc": 2, }, "QPI_LL.TxL_FLITS_G2.NCB_NONDATA": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits trasmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 2", "EvSel": 1, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00001000", }, "QPI_LL.TxL_FLITS_G2.NCB_DATA": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits trasmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 2", "EvSel": 1, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000100", }, "QPI_LL.TxL_FLITS_G2.NDR_AK": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits trasmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 2", "EvSel": 1, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000010", }, "QPI_LL.TxL_FLITS_G2.NCB": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits trasmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 2", "EvSel": 1, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00001100", }, "QPI_LL.TxL_FLITS_G2.NDR_AD": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits trasmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 2", "EvSel": 1, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000001", }, "QPI_LL.TxL_FLITS_G2.NCS": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits trasmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 2", "EvSel": 1, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00010000", }, "QPI_LL.TxL_INSERTS": { "Box": "QPI_LL", "Category": "QPI_LL TXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Tx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.", "Desc": "Tx Flit Buffer Allocations", "EvSel": 4, "ExtSel": "", }, "QPI_LL.TxL_OCCUPANCY": { "Box": "QPI_LL", "Category": "QPI_LL TXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of flits in the TxQ. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This can be used with the cycles not empty event to track average occupancy, or the allocations event to track average lifetime in the TxQ.", "Desc": "Tx Flit Buffer Occupancy", "EvSel": 7, "ExtSel": "", }, "QPI_LL.TxR_AD_HOM_CREDIT_ACQUIRED": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for Home messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - HOM", "EvSel": 38, "ExtSel": "", }, "QPI_LL.TxR_AD_HOM_CREDIT_ACQUIRED.VN0": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for Home messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - HOM", "EvSel": 38, "ExtSel": "", "Umask": "b00000001", }, "QPI_LL.TxR_AD_HOM_CREDIT_ACQUIRED.VN1": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for Home messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - HOM", "EvSel": 38, "ExtSel": "", "Umask": "b00000010", }, "QPI_LL.TxR_AD_HOM_CREDIT_OCCUPANCY": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO for HOM messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - AD HOM", "EvSel": 34, "ExtSel": "", "MaxIncCyc": 28, "SubCtr": 1, }, "QPI_LL.TxR_AD_HOM_CREDIT_OCCUPANCY.VN1": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO for HOM messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - AD HOM", "EvSel": 34, "ExtSel": "", "MaxIncCyc": 28, "SubCtr": 1, "Umask": "b00000010", }, "QPI_LL.TxR_AD_HOM_CREDIT_OCCUPANCY.VN0": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO for HOM messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - AD HOM", "EvSel": 34, "ExtSel": "", "MaxIncCyc": 28, "SubCtr": 1, "Umask": "b00000001", }, "QPI_LL.TxR_AD_NDR_CREDIT_ACQUIRED": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for NDR messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - AD NDR", "EvSel": 40, "ExtSel": "", }, "QPI_LL.TxR_AD_NDR_CREDIT_ACQUIRED.VN1": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for NDR messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - AD NDR", "EvSel": 40, "ExtSel": "", "Umask": "b00000010", }, "QPI_LL.TxR_AD_NDR_CREDIT_ACQUIRED.VN0": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for NDR messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - AD NDR", "EvSel": 40, "ExtSel": "", "Umask": "b00000001", }, "QPI_LL.TxR_AD_NDR_CREDIT_OCCUPANCY": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO for NDR messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - AD NDR", "EvSel": 36, "ExtSel": "", "MaxIncCyc": 8, "SubCtr": 1, }, "QPI_LL.TxR_AD_NDR_CREDIT_OCCUPANCY.VN1": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO for NDR messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - AD NDR", "EvSel": 36, "ExtSel": "", "MaxIncCyc": 8, "SubCtr": 1, "Umask": "b00000010", }, "QPI_LL.TxR_AD_NDR_CREDIT_OCCUPANCY.VN0": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO for NDR messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - AD NDR", "EvSel": 36, "ExtSel": "", "MaxIncCyc": 8, "SubCtr": 1, "Umask": "b00000001", }, "QPI_LL.TxR_AD_SNP_CREDIT_ACQUIRED": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for Snoop messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - SNP", "EvSel": 39, "ExtSel": "", }, "QPI_LL.TxR_AD_SNP_CREDIT_ACQUIRED.VN1": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for Snoop messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - SNP", "EvSel": 39, "ExtSel": "", "Umask": "b00000010", }, "QPI_LL.TxR_AD_SNP_CREDIT_ACQUIRED.VN0": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for Snoop messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - SNP", "EvSel": 39, "ExtSel": "", "Umask": "b00000001", }, "QPI_LL.TxR_AD_SNP_CREDIT_OCCUPANCY": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO fro Snoop messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - AD SNP", "EvSel": 35, "ExtSel": "", "MaxIncCyc": 28, "SubCtr": 1, }, "QPI_LL.TxR_AD_SNP_CREDIT_OCCUPANCY.VN0": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO fro Snoop messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - AD SNP", "EvSel": 35, "ExtSel": "", "MaxIncCyc": 28, "SubCtr": 1, "Umask": "b00000001", }, "QPI_LL.TxR_AD_SNP_CREDIT_OCCUPANCY.VN1": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO fro Snoop messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - AD SNP", "EvSel": 35, "ExtSel": "", "MaxIncCyc": 28, "SubCtr": 1, "Umask": "b00000010", }, "QPI_LL.TxR_AK_NDR_CREDIT_ACQUIRED": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. Local NDR message class to AK Egress.", "Desc": "R3QPI Egress Credit Occupancy - AK NDR", "EvSel": 41, "ExtSel": "", }, "QPI_LL.TxR_AK_NDR_CREDIT_OCCUPANCY": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. Local NDR message class to AK Egress.", "Desc": "R3QPI Egress Credit Occupancy - AK NDR", "EvSel": 37, "ExtSel": "", "MaxIncCyc": 6, "SubCtr": 1, }, "QPI_LL.TxR_BL_DRS_CREDIT_ACQUIRED": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. DRS message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - DRS", "EvSel": 42, "ExtSel": "", }, "QPI_LL.TxR_BL_DRS_CREDIT_ACQUIRED.VN1": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. DRS message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - DRS", "EvSel": 42, "ExtSel": "", "Umask": "b00000010", }, "QPI_LL.TxR_BL_DRS_CREDIT_ACQUIRED.VN_SHR": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. DRS message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - DRS", "EvSel": 42, "ExtSel": "", "Umask": "b00000100", }, "QPI_LL.TxR_BL_DRS_CREDIT_ACQUIRED.VN0": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. DRS message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - DRS", "EvSel": 42, "ExtSel": "", "Umask": "b00000001", }, "QPI_LL.TxR_BL_DRS_CREDIT_OCCUPANCY": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. DRS message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - BL DRS", "EvSel": 31, "ExtSel": "", "MaxIncCyc": 8, "SubCtr": 1, }, "QPI_LL.TxR_BL_DRS_CREDIT_OCCUPANCY.VN_SHR": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. DRS message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - BL DRS", "EvSel": 31, "ExtSel": "", "MaxIncCyc": 8, "SubCtr": 1, "Umask": "b00000100", }, "QPI_LL.TxR_BL_DRS_CREDIT_OCCUPANCY.VN0": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. DRS message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - BL DRS", "EvSel": 31, "ExtSel": "", "MaxIncCyc": 8, "SubCtr": 1, "Umask": "b00000001", }, "QPI_LL.TxR_BL_DRS_CREDIT_OCCUPANCY.VN1": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. DRS message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - BL DRS", "EvSel": 31, "ExtSel": "", "MaxIncCyc": 8, "SubCtr": 1, "Umask": "b00000010", }, "QPI_LL.TxR_BL_NCB_CREDIT_ACQUIRED": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. NCB message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - NCB", "EvSel": 43, "ExtSel": "", }, "QPI_LL.TxR_BL_NCB_CREDIT_ACQUIRED.VN1": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. NCB message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - NCB", "EvSel": 43, "ExtSel": "", "Umask": "b00000010", }, "QPI_LL.TxR_BL_NCB_CREDIT_ACQUIRED.VN0": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. NCB message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - NCB", "EvSel": 43, "ExtSel": "", "Umask": "b00000001", }, "QPI_LL.TxR_BL_NCB_CREDIT_OCCUPANCY": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. NCB message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - BL NCB", "EvSel": 32, "ExtSel": "", "MaxIncCyc": 2, "SubCtr": 1, }, "QPI_LL.TxR_BL_NCB_CREDIT_OCCUPANCY.VN0": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. NCB message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - BL NCB", "EvSel": 32, "ExtSel": "", "MaxIncCyc": 2, "SubCtr": 1, "Umask": "b00000001", }, "QPI_LL.TxR_BL_NCB_CREDIT_OCCUPANCY.VN1": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. NCB message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - BL NCB", "EvSel": 32, "ExtSel": "", "MaxIncCyc": 2, "SubCtr": 1, "Umask": "b00000010", }, "QPI_LL.TxR_BL_NCS_CREDIT_ACQUIRED": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. NCS message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - NCS", "EvSel": 44, "ExtSel": "", }, "QPI_LL.TxR_BL_NCS_CREDIT_ACQUIRED.VN0": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. NCS message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - NCS", "EvSel": 44, "ExtSel": "", "Umask": "b00000001", }, "QPI_LL.TxR_BL_NCS_CREDIT_ACQUIRED.VN1": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. NCS message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - NCS", "EvSel": 44, "ExtSel": "", "Umask": "b00000010", }, "QPI_LL.TxR_BL_NCS_CREDIT_OCCUPANCY": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. NCS message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - BL NCS", "EvSel": 33, "ExtSel": "", "MaxIncCyc": 2, "SubCtr": 1, }, "QPI_LL.TxR_BL_NCS_CREDIT_OCCUPANCY.VN1": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. NCS message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - BL NCS", "EvSel": 33, "ExtSel": "", "MaxIncCyc": 2, "SubCtr": 1, "Umask": "b00000010", }, "QPI_LL.TxR_BL_NCS_CREDIT_OCCUPANCY.VN0": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. NCS message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - BL NCS", "EvSel": 33, "ExtSel": "", "MaxIncCyc": 2, "SubCtr": 1, "Umask": "b00000001", }, "QPI_LL.VNA_CREDIT_RETURNS": { "Box": "QPI_LL", "Category": "QPI_LL VNA_CREDIT_RETURN Events", "Counters": "0-3", "Defn": "Number of VNA credits returned.", "Desc": "VNA Credits Returned", "EvSel": 28, "ExtSel": "", }, "QPI_LL.VNA_CREDIT_RETURN_OCCUPANCY": { "Box": "QPI_LL", "Category": "QPI_LL VNA_CREDIT_RETURN Events", "Counters": "0-3", "Defn": "Number of VNA credits in the Rx side that are waitng to be returned back across the link.", "Desc": "VNA Credits Pending Return - Occupancy", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, }, # HA: "HA.ADDR_OPC_MATCH": { "Box": "HA", "Category": "HA ADDR_OPCODE_MATCH Events", "Counters": "0-3", "Desc": "QPI Address/Opcode Match", "EvSel": 32, "ExtSel": "", }, "HA.ADDR_OPC_MATCH.OPC": { "Box": "HA", "Category": "HA ADDR_OPCODE_MATCH Events", "Counters": "0-3", "Desc": "QPI Address/Opcode Match", "EvSel": 32, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.ADDR_OPC_MATCH.AK": { "Box": "HA", "Category": "HA ADDR_OPCODE_MATCH Events", "Counters": "0-3", "Desc": "QPI Address/Opcode Match", "EvSel": 32, "ExtSel": "", "Umask": "bxxx1xxxx", }, "HA.ADDR_OPC_MATCH.BL": { "Box": "HA", "Category": "HA ADDR_OPCODE_MATCH Events", "Counters": "0-3", "Desc": "QPI Address/Opcode Match", "EvSel": 32, "ExtSel": "", "Umask": "bxxxx1xxx", }, "HA.ADDR_OPC_MATCH.FILT": { "Box": "HA", "Category": "HA ADDR_OPCODE_MATCH Events", "Counters": "0-3", "Desc": "QPI Address/Opcode Match", "EvSel": 32, "ExtSel": "", "Umask": "b00000011", }, "HA.ADDR_OPC_MATCH.ADDR": { "Box": "HA", "Category": "HA ADDR_OPCODE_MATCH Events", "Counters": "0-3", "Desc": "QPI Address/Opcode Match", "EvSel": 32, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.ADDR_OPC_MATCH.AD": { "Box": "HA", "Category": "HA ADDR_OPCODE_MATCH Events", "Counters": "0-3", "Desc": "QPI Address/Opcode Match", "EvSel": 32, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.BT_CYCLES_NE": { "Box": "HA", "Category": "HA BT (Backup Tracker) Events", "Counters": "0-3", "Defn": "Cycles the Backup Tracker (BT) is not empty. The BT is the actual HOM tracker in IVT.", "Desc": "BT Cycles Not Empty", "EvSel": 66, "ExtSel": "", "Notes": "Will not count case HT is empty and a Bypass happens.", }, "HA.BT_OCCUPANCY": { "Box": "HA", "Category": "HA BT (Backup Tracker) Events", "Counters": "0-3", "Defn": "Accumulates the occupancy of the HA BT pool in every cycle. This can be used with the \"not empty\" stat to calculate average queue occupancy or the \"allocations\" stat in order to calculate average queue latency. HA BTs are allocated as soon as a request enters the HA and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "BT Occupancy", "EvSel": 67, "ExtSel": "", "MaxIncCyc": 512, }, "HA.BYPASS_IMC": { "Box": "HA", "Category": "HA BYPASS Events", "Counters": "0-3", "Defn": "Counts the number of times when the HA was able to bypass was attempted. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filted by when the bypass was taken and when it was not.", "Desc": "HA to iMC Bypass", "EvSel": 20, "ExtSel": "", "Notes": "Only read transactions use iMC bypass", }, "HA.BYPASS_IMC.NOT_TAKEN": { "Box": "HA", "Category": "HA BYPASS Events", "Counters": "0-3", "Defn": "Counts the number of times when the HA was able to bypass was attempted. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filted by when the bypass was taken and when it was not.", "Desc": "HA to iMC Bypass", "EvSel": 20, "ExtSel": "", "Notes": "Only read transactions use iMC bypass", "Umask": "bxxxxxx1x", }, "HA.BYPASS_IMC.TAKEN": { "Box": "HA", "Category": "HA BYPASS Events", "Counters": "0-3", "Defn": "Counts the number of times when the HA was able to bypass was attempted. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filted by when the bypass was taken and when it was not.", "Desc": "HA to iMC Bypass", "EvSel": 20, "ExtSel": "", "Notes": "Only read transactions use iMC bypass", "Umask": "bxxxxxxx1", }, "HA.CLOCKTICKS": { "Box": "HA", "Category": "HA UCLK Events", "Counters": "0-3", "Defn": "Counts the number of uclks in the HA. This will be slightly different than the count in the Ubox because of enable/freeze delays. The HA is on the other side of the die from the fixed Ubox uclk counter, so the drift could be somewhat larger than in units that are closer like the QPI Agent.", "Desc": "uclks", "EvSel": 0, "ExtSel": "", }, "HA.CONFLICT_CYCLES": { "Box": "HA", "Category": "HA CONFLICTS Events", "Counters": 1, "Defn": "Counters the number of cycles there was a conflict in the HA because threads in two different sockets were requesting the same address at the same time", "Desc": "Conflict Checks", "EvSel": 11, "Filter": "N", "ExtSel": "", }, "HA.DIRECT2CORE_COUNT": { "Box": "HA", "Category": "HA DIRECT2CORE Events", "Counters": "0-3", "Defn": "Number of Direct2Core messages sent", "Desc": "Direct2Core Messages Sent", "EvSel": 17, "ExtSel": "", "Notes": "Will not be implemented since OUTBOUND_TX_BL:0x1 will count DRS to CORE which is effectively the same thing as D2C count", }, "HA.DIRECT2CORE_CYCLES_DISABLED": { "Box": "HA", "Category": "HA DIRECT2CORE Events", "Counters": "0-3", "Defn": "Number of cycles in which Direct2Core was disabled", "Desc": "Cycles when Direct2Core was Disabled", "EvSel": 18, "ExtSel": "", }, "HA.DIRECT2CORE_TXN_OVERRIDE": { "Box": "HA", "Category": "HA DIRECT2CORE Events", "Counters": "0-3", "Defn": "Number of Reads where Direct2Core overridden", "Desc": "Number of Reads that had Direct2Core Overridden", "EvSel": 19, "ExtSel": "", }, "HA.DIRECTORY_LAT_OPT": { "Box": "HA", "Category": "HA DIRECTORY Events", "Counters": "0-3", "Defn": "Directory Latency Optimization Data Return Path Taken. When directory mode is enabled and the directory retuned for a read is Dir=I, then data can be returned using a faster path if certain conditions are met (credits, free pipeline, etc).", "Desc": "Directory Lat Opt Return", "EvSel": 65, "ExtSel": "", }, "HA.DIRECTORY_LOOKUP": { "Box": "HA", "Category": "HA DIRECTORY Events", "Counters": "0-3", "Defn": "Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to.", "Desc": "Directory Lookups", "EvSel": 12, "ExtSel": "", "Notes": "Only valid for parts that implement the Directory", }, "HA.DIRECTORY_LOOKUP.SNP": { "Box": "HA", "Category": "HA DIRECTORY Events", "Counters": "0-3", "Defn": "Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to.", "Desc": "Directory Lookups", "EvSel": 12, "ExtSel": "", "Notes": "Only valid for parts that implement the Directory", "Umask": "bxxxxxxx1", }, "HA.DIRECTORY_LOOKUP.NO_SNP": { "Box": "HA", "Category": "HA DIRECTORY Events", "Counters": "0-3", "Defn": "Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to.", "Desc": "Directory Lookups", "EvSel": 12, "ExtSel": "", "Notes": "Only valid for parts that implement the Directory", "Umask": "bxxxxxx1x", }, "HA.DIRECTORY_UPDATE": { "Box": "HA", "Category": "HA DIRECTORY Events", "Counters": "0-3", "Defn": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.", "Desc": "Directory Updates", "EvSel": 13, "ExtSel": "", "Notes": "Only valid for parts that implement the Directory", }, "HA.DIRECTORY_UPDATE.SET": { "Box": "HA", "Category": "HA DIRECTORY Events", "Counters": "0-3", "Defn": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.", "Desc": "Directory Updates", "EvSel": 13, "ExtSel": "", "Notes": "Only valid for parts that implement the Directory", "Umask": "bxxxxxxx1", }, "HA.DIRECTORY_UPDATE.CLEAR": { "Box": "HA", "Category": "HA DIRECTORY Events", "Counters": "0-3", "Defn": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.", "Desc": "Directory Updates", "EvSel": 13, "ExtSel": "", "Notes": "Only valid for parts that implement the Directory", "Umask": "bxxxxxx1x", }, "HA.DIRECTORY_UPDATE.ANY": { "Box": "HA", "Category": "HA DIRECTORY Events", "Counters": "0-3", "Defn": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.", "Desc": "Directory Updates", "EvSel": 13, "ExtSel": "", "Notes": "Only valid for parts that implement the Directory", "Umask": "bxxxxxx11", }, "HA.HITME_HIT": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 113, "ExtSel": "", }, "HA.HITME_HIT.ACKCNFLTWBI": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 113, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.HITME_HIT.WBMTOE_OR_S": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 113, "ExtSel": "", "Umask": "bxxxx1xxx", }, "HA.HITME_HIT.EVICTS": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 113, "ExtSel": "", "Umask": "b01000010", }, "HA.HITME_HIT.HOM": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 113, "ExtSel": "", "Umask": "b00001111", }, "HA.HITME_HIT.ALLOCS": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 113, "ExtSel": "", "Umask": "b01110000", }, "HA.HITME_HIT.RSPFWDI_REMOTE": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 113, "ExtSel": "", "Umask": "bxxx1xxxx", }, "HA.HITME_HIT.WBMTOI": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 113, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.HITME_HIT.RSP": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 113, "ExtSel": "", "Umask": "b1xxxxxxx", }, "HA.HITME_HIT.READ_OR_INVITOE": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 113, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.HITME_HIT.RSPFWDS": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 113, "ExtSel": "", "Umask": "bx1xxxxxx", }, "HA.HITME_HIT.INVALS": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 113, "ExtSel": "", "Umask": "b00100110", }, "HA.HITME_HIT.RSPFWDI_LOCAL": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 113, "ExtSel": "", "Umask": "bxx1xxxxx", }, "HA.HITME_HIT.ALL": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 113, "ExtSel": "", "Umask": "b11111111", }, "HA.HITME_HIT_PV_BITS_SET": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Accumulates Number of PV bits set on HitMe Cache Hits", "EvSel": 114, "ExtSel": "", }, "HA.HITME_HIT_PV_BITS_SET.READ_OR_INVITOE": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Accumulates Number of PV bits set on HitMe Cache Hits", "EvSel": 114, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.HITME_HIT_PV_BITS_SET.RSP": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Accumulates Number of PV bits set on HitMe Cache Hits", "EvSel": 114, "ExtSel": "", "Umask": "b1xxxxxxx", }, "HA.HITME_HIT_PV_BITS_SET.WBMTOI": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Accumulates Number of PV bits set on HitMe Cache Hits", "EvSel": 114, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.HITME_HIT_PV_BITS_SET.RSPFWDS": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Accumulates Number of PV bits set on HitMe Cache Hits", "EvSel": 114, "ExtSel": "", "Umask": "bx1xxxxxx", }, "HA.HITME_HIT_PV_BITS_SET.ALL": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Accumulates Number of PV bits set on HitMe Cache Hits", "EvSel": 114, "ExtSel": "", "Umask": "b11111111", }, "HA.HITME_HIT_PV_BITS_SET.RSPFWDI_LOCAL": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Accumulates Number of PV bits set on HitMe Cache Hits", "EvSel": 114, "ExtSel": "", "Umask": "bxx1xxxxx", }, "HA.HITME_HIT_PV_BITS_SET.HOM": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Accumulates Number of PV bits set on HitMe Cache Hits", "EvSel": 114, "ExtSel": "", "Umask": "b00001111", }, "HA.HITME_HIT_PV_BITS_SET.RSPFWDI_REMOTE": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Accumulates Number of PV bits set on HitMe Cache Hits", "EvSel": 114, "ExtSel": "", "Umask": "bxxx1xxxx", }, "HA.HITME_HIT_PV_BITS_SET.ACKCNFLTWBI": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Accumulates Number of PV bits set on HitMe Cache Hits", "EvSel": 114, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.HITME_HIT_PV_BITS_SET.WBMTOE_OR_S": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Accumulates Number of PV bits set on HitMe Cache Hits", "EvSel": 114, "ExtSel": "", "Umask": "bxxxx1xxx", }, "HA.HITME_LOOKUP": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of times HitMe Cache is accessed", "EvSel": 112, "ExtSel": "", }, "HA.HITME_LOOKUP.ACKCNFLTWBI": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of times HitMe Cache is accessed", "EvSel": 112, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.HITME_LOOKUP.WBMTOE_OR_S": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of times HitMe Cache is accessed", "EvSel": 112, "ExtSel": "", "Umask": "bxxxx1xxx", }, "HA.HITME_LOOKUP.RSPFWDI_LOCAL": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of times HitMe Cache is accessed", "EvSel": 112, "ExtSel": "", "Umask": "bxx1xxxxx", }, "HA.HITME_LOOKUP.ALL": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of times HitMe Cache is accessed", "EvSel": 112, "ExtSel": "", "Umask": "b11111111", }, "HA.HITME_LOOKUP.RSPFWDS": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of times HitMe Cache is accessed", "EvSel": 112, "ExtSel": "", "Umask": "bx1xxxxxx", }, "HA.HITME_LOOKUP.INVALS": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of times HitMe Cache is accessed", "EvSel": 112, "ExtSel": "", "Umask": "b00100110", }, "HA.HITME_LOOKUP.WBMTOI": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of times HitMe Cache is accessed", "EvSel": 112, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.HITME_LOOKUP.RSP": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of times HitMe Cache is accessed", "EvSel": 112, "ExtSel": "", "Umask": "b1xxxxxxx", }, "HA.HITME_LOOKUP.READ_OR_INVITOE": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of times HitMe Cache is accessed", "EvSel": 112, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.HITME_LOOKUP.ALLOCS": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of times HitMe Cache is accessed", "EvSel": 112, "ExtSel": "", "Umask": "b01110000", }, "HA.HITME_LOOKUP.RSPFWDI_REMOTE": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of times HitMe Cache is accessed", "EvSel": 112, "ExtSel": "", "Umask": "bxxx1xxxx", }, "HA.HITME_LOOKUP.HOM": { "Box": "HA", "Category": "HA HitME Events", "Counters": "0-3", "Desc": "Counts Number of times HitMe Cache is accessed", "EvSel": 112, "ExtSel": "", "Umask": "b00001111", }, "HA.IGR_NO_CREDIT_CYCLES": { "Box": "HA", "Category": "HA QPI_IGR_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.", "Desc": "Cycles without QPI Ingress Credits", "EvSel": 34, "ExtSel": "", }, "HA.IGR_NO_CREDIT_CYCLES.AD_QPI0": { "Box": "HA", "Category": "HA QPI_IGR_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.", "Desc": "Cycles without QPI Ingress Credits", "EvSel": 34, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.IGR_NO_CREDIT_CYCLES.BL_QPI1": { "Box": "HA", "Category": "HA QPI_IGR_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.", "Desc": "Cycles without QPI Ingress Credits", "EvSel": 34, "ExtSel": "", "Umask": "bxxxx1xxx", }, "HA.IGR_NO_CREDIT_CYCLES.AD_QPI1": { "Box": "HA", "Category": "HA QPI_IGR_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.", "Desc": "Cycles without QPI Ingress Credits", "EvSel": 34, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.IGR_NO_CREDIT_CYCLES.AD_QPI2": { "Box": "HA", "Category": "HA QPI_IGR_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.", "Desc": "Cycles without QPI Ingress Credits", "EvSel": 34, "ExtSel": "", "Umask": "bxxx1xxxx", }, "HA.IGR_NO_CREDIT_CYCLES.BL_QPI2": { "Box": "HA", "Category": "HA QPI_IGR_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.", "Desc": "Cycles without QPI Ingress Credits", "EvSel": 34, "ExtSel": "", "Umask": "bxx1xxxxx", }, "HA.IGR_NO_CREDIT_CYCLES.BL_QPI0": { "Box": "HA", "Category": "HA QPI_IGR_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.", "Desc": "Cycles without QPI Ingress Credits", "EvSel": 34, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.IMC_READS": { "Box": "HA", "Category": "HA IMC_READS Events", "Counters": "0-3", "Defn": "Count of the number of reads issued to any of the memory controller channels. This can be filtered by the priority of the reads.", "Desc": "HA to iMC Normal Priority Reads Issued", "EvSel": 23, "ExtSel": "", "MaxIncCyc": 4, "Notes": "Does not count reads using the bypass path. That is counted separately in HA_IMC.BYPASS", }, "HA.IMC_READS.NORMAL": { "Box": "HA", "Category": "HA IMC_READS Events", "Counters": "0-3", "Defn": "Count of the number of reads issued to any of the memory controller channels. This can be filtered by the priority of the reads.", "Desc": "HA to iMC Normal Priority Reads Issued", "EvSel": 23, "ExtSel": "", "MaxIncCyc": 4, "Notes": "Does not count reads using the bypass path. That is counted separately in HA_IMC.BYPASS", "Umask": "b00000001", }, "HA.IMC_RETRY": { "Box": "HA", "Category": "HA IMC_MISC Events", "Counters": "0-3", "Desc": "Retry Events", "EvSel": 30, "ExtSel": "", }, "HA.IMC_WRITES": { "Box": "HA", "Category": "HA IMC_WRITES Events", "Counters": "0-3", "Defn": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.", "Desc": "HA to iMC Full Line Writes Issued", "EvSel": 26, "ExtSel": "", }, "HA.IMC_WRITES.PARTIAL_ISOCH": { "Box": "HA", "Category": "HA IMC_WRITES Events", "Counters": "0-3", "Defn": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.", "Desc": "HA to iMC Full Line Writes Issued", "EvSel": 26, "ExtSel": "", "Umask": "bxxxx1xxx", }, "HA.IMC_WRITES.FULL": { "Box": "HA", "Category": "HA IMC_WRITES Events", "Counters": "0-3", "Defn": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.", "Desc": "HA to iMC Full Line Writes Issued", "EvSel": 26, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.IMC_WRITES.PARTIAL": { "Box": "HA", "Category": "HA IMC_WRITES Events", "Counters": "0-3", "Defn": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.", "Desc": "HA to iMC Full Line Writes Issued", "EvSel": 26, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.IMC_WRITES.ALL": { "Box": "HA", "Category": "HA IMC_WRITES Events", "Counters": "0-3", "Defn": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.", "Desc": "HA to iMC Full Line Writes Issued", "EvSel": 26, "ExtSel": "", "Umask": "b00001111", }, "HA.IMC_WRITES.FULL_ISOCH": { "Box": "HA", "Category": "HA IMC_WRITES Events", "Counters": "0-3", "Defn": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.", "Desc": "HA to iMC Full Line Writes Issued", "EvSel": 26, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.OSB": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.", "Desc": "OSB Snoop Broadcast", "EvSel": 83, "ExtSel": "", }, "HA.OSB.REMOTE_USEFUL": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.", "Desc": "OSB Snoop Broadcast", "EvSel": 83, "ExtSel": "", "Umask": "bx1xxxxxx", }, "HA.OSB.REMOTE": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.", "Desc": "OSB Snoop Broadcast", "EvSel": 83, "ExtSel": "", "Umask": "bxxxx1xxx", }, "HA.OSB.READS_LOCAL": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.", "Desc": "OSB Snoop Broadcast", "EvSel": 83, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.OSB.READS_LOCAL_USEFUL": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.", "Desc": "OSB Snoop Broadcast", "EvSel": 83, "ExtSel": "", "Umask": "bxx1xxxxx", }, "HA.OSB.CANCELLED": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.", "Desc": "OSB Snoop Broadcast", "EvSel": 83, "ExtSel": "", "Umask": "bxxx1xxxx", }, "HA.OSB.INVITOE_LOCAL": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.", "Desc": "OSB Snoop Broadcast", "EvSel": 83, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.OSB_EDR": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return", "Desc": "OSB Early Data Return", "EvSel": 84, "ExtSel": "", }, "HA.OSB_EDR.READS_LOCAL_I": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return", "Desc": "OSB Early Data Return", "EvSel": 84, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.OSB_EDR.READS_REMOTE_I": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return", "Desc": "OSB Early Data Return", "EvSel": 84, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.OSB_EDR.READS_REMOTE_S": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return", "Desc": "OSB Early Data Return", "EvSel": 84, "ExtSel": "", "Umask": "bxxx1xxxx", }, "HA.OSB_EDR.READS_LOCAL_S": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return", "Desc": "OSB Early Data Return", "EvSel": 84, "ExtSel": "", "Umask": "bxxxx1xxx", }, "HA.OSB_EDR.ALL": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return", "Desc": "OSB Early Data Return", "EvSel": 84, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.REQUESTS": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "Read and Write Requests", "EvSel": 1, "ExtSel": "", }, "HA.REQUESTS.INVITOE_LOCAL": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "Read and Write Requests", "EvSel": 1, "ExtSel": "", "Umask": "bxxx1xxxx", }, "HA.REQUESTS.WRITES_REMOTE": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "Read and Write Requests", "EvSel": 1, "ExtSel": "", "Umask": "bxxxx1xxx", }, "HA.REQUESTS.READS": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "Read and Write Requests", "EvSel": 1, "ExtSel": "", "Umask": "b00000011", }, "HA.REQUESTS.READS_REMOTE": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "Read and Write Requests", "EvSel": 1, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.REQUESTS.READS_LOCAL": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "Read and Write Requests", "EvSel": 1, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.REQUESTS.WRITES": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "Read and Write Requests", "EvSel": 1, "ExtSel": "", "Umask": "b00001100", }, "HA.REQUESTS.INVITOE_REMOTE": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "Read and Write Requests", "EvSel": 1, "ExtSel": "", "Umask": "bxx1xxxxx", }, "HA.REQUESTS.WRITES_LOCAL": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "Read and Write Requests", "EvSel": 1, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.RING_AD_USED": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AD Ring in Use", "EvSel": 62, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", }, "HA.RING_AD_USED.CCW_ODD": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AD Ring in Use", "EvSel": 62, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxx1xxx", }, "HA.RING_AD_USED.CW": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AD Ring in Use", "EvSel": 62, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00000011", }, "HA.RING_AD_USED.CCW_EVEN": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AD Ring in Use", "EvSel": 62, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxx1xx", }, "HA.RING_AD_USED.CW_ODD": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AD Ring in Use", "EvSel": 62, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxx1x", }, "HA.RING_AD_USED.CW_EVEN": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AD Ring in Use", "EvSel": 62, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxxx1", }, "HA.RING_AD_USED.CCW": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AD Ring in Use", "EvSel": 62, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00001100", }, "HA.RING_AK_USED": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AK Ring in Use", "EvSel": 63, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", }, "HA.RING_AK_USED.CCW_ODD": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AK Ring in Use", "EvSel": 63, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxx1xxx", }, "HA.RING_AK_USED.CCW_EVEN": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AK Ring in Use", "EvSel": 63, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxx1xx", }, "HA.RING_AK_USED.ALL": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AK Ring in Use", "EvSel": 63, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00001111", }, "HA.RING_AK_USED.CW_ODD": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AK Ring in Use", "EvSel": 63, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxx1x", }, "HA.RING_AK_USED.CW": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AK Ring in Use", "EvSel": 63, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00000011", }, "HA.RING_AK_USED.CCW": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AK Ring in Use", "EvSel": 63, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00001100", }, "HA.RING_AK_USED.CW_EVEN": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AK Ring in Use", "EvSel": 63, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxxx1", }, "HA.RING_BL_USED": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA BL Ring in Use", "EvSel": 64, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", }, "HA.RING_BL_USED.CW": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA BL Ring in Use", "EvSel": 64, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00000011", }, "HA.RING_BL_USED.ALL": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA BL Ring in Use", "EvSel": 64, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00001111", }, "HA.RING_BL_USED.CW_ODD": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA BL Ring in Use", "EvSel": 64, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxx1x", }, "HA.RING_BL_USED.CCW_EVEN": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA BL Ring in Use", "EvSel": 64, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxx1xx", }, "HA.RING_BL_USED.CCW_ODD": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA BL Ring in Use", "EvSel": 64, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxx1xxx", }, "HA.RING_BL_USED.CW_EVEN": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA BL Ring in Use", "EvSel": 64, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxxx1", }, "HA.RING_BL_USED.CCW": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA BL Ring in Use", "EvSel": 64, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00001100", }, "HA.RPQ_CYCLES_NO_REG_CREDITS": { "Box": "HA", "Category": "HA RPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and \"special\" requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "iMC RPQ Credits Empty - Regular", "EvSel": 21, "ExtSel": "", "MaxIncCyc": 4, }, "HA.RPQ_CYCLES_NO_REG_CREDITS.CHN3": { "Box": "HA", "Category": "HA RPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and \"special\" requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "iMC RPQ Credits Empty - Regular", "EvSel": 21, "ExtSel": "", "MaxIncCyc": 4, "Umask": "b00001000", }, "HA.RPQ_CYCLES_NO_REG_CREDITS.CHN0": { "Box": "HA", "Category": "HA RPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and \"special\" requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "iMC RPQ Credits Empty - Regular", "EvSel": 21, "ExtSel": "", "MaxIncCyc": 4, "Umask": "b00000001", }, "HA.RPQ_CYCLES_NO_REG_CREDITS.CHN1": { "Box": "HA", "Category": "HA RPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and \"special\" requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "iMC RPQ Credits Empty - Regular", "EvSel": 21, "ExtSel": "", "MaxIncCyc": 4, "Umask": "b00000010", }, "HA.RPQ_CYCLES_NO_REG_CREDITS.CHN2": { "Box": "HA", "Category": "HA RPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and \"special\" requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "iMC RPQ Credits Empty - Regular", "EvSel": 21, "ExtSel": "", "MaxIncCyc": 4, "Umask": "b00000100", }, "HA.SBO0_CREDITS_ACQUIRED": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of Sbo 0 credits acquired in a given cycle, per ring.", "Desc": "SBo0 Credits Acquired", "EvSel": 104, "ExtSel": "", }, "HA.SBO0_CREDITS_ACQUIRED.AD": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of Sbo 0 credits acquired in a given cycle, per ring.", "Desc": "SBo0 Credits Acquired", "EvSel": 104, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.SBO0_CREDITS_ACQUIRED.BL": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of Sbo 0 credits acquired in a given cycle, per ring.", "Desc": "SBo0 Credits Acquired", "EvSel": 104, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.SBO0_CREDIT_OCCUPANCY": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of Sbo 0 credits in use in a given cycle, per ring.", "Desc": "SBo0 Credits Occupancy", "EvSel": 106, "ExtSel": "", }, "HA.SBO0_CREDIT_OCCUPANCY.BL": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of Sbo 0 credits in use in a given cycle, per ring.", "Desc": "SBo0 Credits Occupancy", "EvSel": 106, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.SBO0_CREDIT_OCCUPANCY.AD": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of Sbo 0 credits in use in a given cycle, per ring.", "Desc": "SBo0 Credits Occupancy", "EvSel": 106, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.SBO1_CREDITS_ACQUIRED": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of Sbo 1 credits acquired in a given cycle, per ring.", "Desc": "SBo1 Credits Acquired", "EvSel": 105, "ExtSel": "", }, "HA.SBO1_CREDITS_ACQUIRED.AD": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of Sbo 1 credits acquired in a given cycle, per ring.", "Desc": "SBo1 Credits Acquired", "EvSel": 105, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.SBO1_CREDITS_ACQUIRED.BL": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of Sbo 1 credits acquired in a given cycle, per ring.", "Desc": "SBo1 Credits Acquired", "EvSel": 105, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.SBO1_CREDIT_OCCUPANCY": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of Sbo 1 credits in use in a given cycle, per ring.", "Desc": "SBo1 Credits Occupancy", "EvSel": 107, "ExtSel": "", }, "HA.SBO1_CREDIT_OCCUPANCY.AD": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of Sbo 1 credits in use in a given cycle, per ring.", "Desc": "SBo1 Credits Occupancy", "EvSel": 107, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.SBO1_CREDIT_OCCUPANCY.BL": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of Sbo 1 credits in use in a given cycle, per ring.", "Desc": "SBo1 Credits Occupancy", "EvSel": 107, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.SNOOPS_RSP_AFTER_DATA": { "Box": "HA", "Category": "HA SNOOPS Events", "Counters": "0-3", "Defn": "Counts the number of reads when the snoop was on the critical path to the data return.", "Desc": "Data beat the Snoop Responses", "EvSel": 10, "ExtSel": "", "MaxIncCyc": 127, }, "HA.SNOOPS_RSP_AFTER_DATA.REMOTE": { "Box": "HA", "Category": "HA SNOOPS Events", "Counters": "0-3", "Defn": "Counts the number of reads when the snoop was on the critical path to the data return.", "Desc": "Data beat the Snoop Responses", "EvSel": 10, "ExtSel": "", "MaxIncCyc": 127, "Umask": "b00000010", }, "HA.SNOOPS_RSP_AFTER_DATA.LOCAL": { "Box": "HA", "Category": "HA SNOOPS Events", "Counters": "0-3", "Defn": "Counts the number of reads when the snoop was on the critical path to the data return.", "Desc": "Data beat the Snoop Responses", "EvSel": 10, "ExtSel": "", "MaxIncCyc": 127, "Umask": "b00000001", }, "HA.SNOOP_CYCLES_NE": { "Box": "HA", "Category": "HA SNOOPS Events", "Counters": "0-3", "Defn": "Counts cycles when one or more snoops are outstanding.", "Desc": "Cycles with Snoops Outstanding", "EvSel": 8, "ExtSel": "", }, "HA.SNOOP_CYCLES_NE.REMOTE": { "Box": "HA", "Category": "HA SNOOPS Events", "Counters": "0-3", "Defn": "Counts cycles when one or more snoops are outstanding.", "Desc": "Cycles with Snoops Outstanding", "EvSel": 8, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.SNOOP_CYCLES_NE.ALL": { "Box": "HA", "Category": "HA SNOOPS Events", "Counters": "0-3", "Defn": "Counts cycles when one or more snoops are outstanding.", "Desc": "Cycles with Snoops Outstanding", "EvSel": 8, "ExtSel": "", "Umask": "b00000011", }, "HA.SNOOP_CYCLES_NE.LOCAL": { "Box": "HA", "Category": "HA SNOOPS Events", "Counters": "0-3", "Defn": "Counts cycles when one or more snoops are outstanding.", "Desc": "Cycles with Snoops Outstanding", "EvSel": 8, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.SNOOP_OCCUPANCY": { "Box": "HA", "Category": "HA SNOOPS Events", "Counters": "0-3", "Defn": "Accumulates the occupancy of either the local HA tracker pool that have snoops pending in every cycle. This can be used in conjection with the \"not empty\" stat to calculate average queue occupancy or the \"allocations\" stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if an HT (HomeTracker) entry is available and this occupancy is decremented when all the snoop responses have returned.", "Desc": "Tracker Snoops Outstanding Accumulator", "EvSel": 9, "ExtSel": "", "MaxIncCyc": 127, "SubCtr": 1, }, "HA.SNOOP_OCCUPANCY.LOCAL": { "Box": "HA", "Category": "HA SNOOPS Events", "Counters": "0-3", "Defn": "Accumulates the occupancy of either the local HA tracker pool that have snoops pending in every cycle. This can be used in conjection with the \"not empty\" stat to calculate average queue occupancy or the \"allocations\" stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if an HT (HomeTracker) entry is available and this occupancy is decremented when all the snoop responses have returned.", "Desc": "Tracker Snoops Outstanding Accumulator", "EvSel": 9, "ExtSel": "", "MaxIncCyc": 127, "SubCtr": 1, "Umask": "b00000001", }, "HA.SNOOP_OCCUPANCY.REMOTE": { "Box": "HA", "Category": "HA SNOOPS Events", "Counters": "0-3", "Defn": "Accumulates the occupancy of either the local HA tracker pool that have snoops pending in every cycle. This can be used in conjection with the \"not empty\" stat to calculate average queue occupancy or the \"allocations\" stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if an HT (HomeTracker) entry is available and this occupancy is decremented when all the snoop responses have returned.", "Desc": "Tracker Snoops Outstanding Accumulator", "EvSel": 9, "ExtSel": "", "MaxIncCyc": 127, "SubCtr": 1, "Umask": "b00000010", }, "HA.SNOOP_RESP": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 33, "ExtSel": "", }, "HA.SNOOP_RESP.RSPSFWD": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 33, "ExtSel": "", "Umask": "bxxxx1xxx", }, "HA.SNOOP_RESP.RSPS": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 33, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.SNOOP_RESP.RSPIFWD": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 33, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.SNOOP_RESP.RSP_FWD_WB": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 33, "ExtSel": "", "Umask": "bxx1xxxxx", }, "HA.SNOOP_RESP.RSPI": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 33, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.SNOOP_RESP.RSP_WB": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 33, "ExtSel": "", "Umask": "bxxx1xxxx", }, "HA.SNOOP_RESP.RSPCNFLCT": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 33, "ExtSel": "", "Umask": "bx1xxxxxx", }, "HA.SNP_RESP_RECV_LOCAL": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 96, "ExtSel": "", }, "HA.SNP_RESP_RECV_LOCAL.RSPSFWD": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 96, "ExtSel": "", "Umask": "bxxxx1xxx", }, "HA.SNP_RESP_RECV_LOCAL.RSPCNFLCT": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 96, "ExtSel": "", "Umask": "bx1xxxxxx", }, "HA.SNP_RESP_RECV_LOCAL.RSPxWB": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 96, "ExtSel": "", "Umask": "bxxx1xxxx", }, "HA.SNP_RESP_RECV_LOCAL.RSPI": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 96, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.SNP_RESP_RECV_LOCAL.OTHER": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 96, "ExtSel": "", "Umask": "b1xxxxxxx", }, "HA.SNP_RESP_RECV_LOCAL.RSPS": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 96, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.SNP_RESP_RECV_LOCAL.RSPIFWD": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 96, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.SNP_RESP_RECV_LOCAL.RSPxFWDxWB": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 96, "ExtSel": "", "Umask": "bxx1xxxxx", }, "HA.STALL_NO_SBO_CREDIT": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.", "Desc": "Stall on No Sbo Credits", "EvSel": 108, "ExtSel": "", }, "HA.STALL_NO_SBO_CREDIT.SBO1_AD": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.", "Desc": "Stall on No Sbo Credits", "EvSel": 108, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.STALL_NO_SBO_CREDIT.SBO0_BL": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.", "Desc": "Stall on No Sbo Credits", "EvSel": 108, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.STALL_NO_SBO_CREDIT.SBO1_BL": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.", "Desc": "Stall on No Sbo Credits", "EvSel": 108, "ExtSel": "", "Umask": "bxxxx1xxx", }, "HA.STALL_NO_SBO_CREDIT.SBO0_AD": { "Box": "HA", "Category": "HA SBO Credit Events", "Counters": "0-3", "Defn": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.", "Desc": "Stall on No Sbo Credits", "EvSel": 108, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.TAD_REQUESTS_G0": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 0", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, }, "HA.TAD_REQUESTS_G0.REGION4": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 0", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00010000", }, "HA.TAD_REQUESTS_G0.REGION0": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 0", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000001", }, "HA.TAD_REQUESTS_G0.REGION7": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 0", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b10000000", }, "HA.TAD_REQUESTS_G0.REGION2": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 0", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000100", }, "HA.TAD_REQUESTS_G0.REGION5": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 0", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00100000", }, "HA.TAD_REQUESTS_G0.REGION1": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 0", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000010", }, "HA.TAD_REQUESTS_G0.REGION6": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 0", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b01000000", }, "HA.TAD_REQUESTS_G0.REGION3": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 0", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00001000", }, "HA.TAD_REQUESTS_G1": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 1", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, }, "HA.TAD_REQUESTS_G1.REGION9": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 1", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000010", }, "HA.TAD_REQUESTS_G1.REGION11": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 1", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00001000", }, "HA.TAD_REQUESTS_G1.REGION8": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 1", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000001", }, "HA.TAD_REQUESTS_G1.REGION10": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 1", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000100", }, "HA.TRACKER_CYCLES_FULL": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the local HA tracker pool is completely used. This can be used with edge detect to identify the number of situations when the pool became fully utilized. This should not be confused with RTID credit usage -- which must be tracked inside each cbo individually -- but represents the actual tracker buffer structure. In other words, the system could be starved for RTIDs but not fill up the HA trackers. HA trackers are allocated as soon as a request enters the HA and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "Tracker Cycles Full", "EvSel": 2, "ExtSel": "", }, "HA.TRACKER_CYCLES_FULL.ALL": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the local HA tracker pool is completely used. This can be used with edge detect to identify the number of situations when the pool became fully utilized. This should not be confused with RTID credit usage -- which must be tracked inside each cbo individually -- but represents the actual tracker buffer structure. In other words, the system could be starved for RTIDs but not fill up the HA trackers. HA trackers are allocated as soon as a request enters the HA and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "Tracker Cycles Full", "EvSel": 2, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.TRACKER_CYCLES_FULL.GP": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the local HA tracker pool is completely used. This can be used with edge detect to identify the number of situations when the pool became fully utilized. This should not be confused with RTID credit usage -- which must be tracked inside each cbo individually -- but represents the actual tracker buffer structure. In other words, the system could be starved for RTIDs but not fill up the HA trackers. HA trackers are allocated as soon as a request enters the HA and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "Tracker Cycles Full", "EvSel": 2, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.TRACKER_CYCLES_NE": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the local HA tracker pool is not empty. This can be used with edge detect to identify the number of situations when the pool became empty. This should not be confused with RTID credit usage -- which must be tracked inside each cbo individually -- but represents the actual tracker buffer structure. In other words, this buffer could be completely empty, but there may still be credits in use by the CBos. This stat can be used in conjunction with the occupancy accumulation stat in order to calculate average queue occpancy. HA trackers are allocated as soon as a request enters the HA if an HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "Tracker Cycles Not Empty", "EvSel": 3, "ExtSel": "", }, "HA.TRACKER_CYCLES_NE.REMOTE": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the local HA tracker pool is not empty. This can be used with edge detect to identify the number of situations when the pool became empty. This should not be confused with RTID credit usage -- which must be tracked inside each cbo individually -- but represents the actual tracker buffer structure. In other words, this buffer could be completely empty, but there may still be credits in use by the CBos. This stat can be used in conjunction with the occupancy accumulation stat in order to calculate average queue occpancy. HA trackers are allocated as soon as a request enters the HA if an HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "Tracker Cycles Not Empty", "EvSel": 3, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.TRACKER_CYCLES_NE.LOCAL": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the local HA tracker pool is not empty. This can be used with edge detect to identify the number of situations when the pool became empty. This should not be confused with RTID credit usage -- which must be tracked inside each cbo individually -- but represents the actual tracker buffer structure. In other words, this buffer could be completely empty, but there may still be credits in use by the CBos. This stat can be used in conjunction with the occupancy accumulation stat in order to calculate average queue occpancy. HA trackers are allocated as soon as a request enters the HA if an HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "Tracker Cycles Not Empty", "EvSel": 3, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.TRACKER_CYCLES_NE.ALL": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the local HA tracker pool is not empty. This can be used with edge detect to identify the number of situations when the pool became empty. This should not be confused with RTID credit usage -- which must be tracked inside each cbo individually -- but represents the actual tracker buffer structure. In other words, this buffer could be completely empty, but there may still be credits in use by the CBos. This stat can be used in conjunction with the occupancy accumulation stat in order to calculate average queue occpancy. HA trackers are allocated as soon as a request enters the HA if an HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "Tracker Cycles Not Empty", "EvSel": 3, "ExtSel": "", "Umask": "b00000011", }, "HA.TRACKER_OCCUPANCY": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Accumulates the occupancy of the local HA tracker pool in every cycle. This can be used in conjection with the \"not empty\" stat to calculate average queue occupancy or the \"allocations\" stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if a HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "Tracker Occupancy Accumultor", "EvSel": 4, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, }, "HA.TRACKER_OCCUPANCY.INVITOE_LOCAL": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Accumulates the occupancy of the local HA tracker pool in every cycle. This can be used in conjection with the \"not empty\" stat to calculate average queue occupancy or the \"allocations\" stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if a HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "Tracker Occupancy Accumultor", "EvSel": 4, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bx1xxxxxx", }, "HA.TRACKER_OCCUPANCY.WRITES_REMOTE": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Accumulates the occupancy of the local HA tracker pool in every cycle. This can be used in conjection with the \"not empty\" stat to calculate average queue occupancy or the \"allocations\" stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if a HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "Tracker Occupancy Accumultor", "EvSel": 4, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bxx1xxxxx", }, "HA.TRACKER_OCCUPANCY.READS_REMOTE": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Accumulates the occupancy of the local HA tracker pool in every cycle. This can be used in conjection with the \"not empty\" stat to calculate average queue occupancy or the \"allocations\" stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if a HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "Tracker Occupancy Accumultor", "EvSel": 4, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bxxxx1xxx", }, "HA.TRACKER_OCCUPANCY.READS_LOCAL": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Accumulates the occupancy of the local HA tracker pool in every cycle. This can be used in conjection with the \"not empty\" stat to calculate average queue occupancy or the \"allocations\" stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if a HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "Tracker Occupancy Accumultor", "EvSel": 4, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bxxxxx1xx", }, "HA.TRACKER_OCCUPANCY.INVITOE_REMOTE": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Accumulates the occupancy of the local HA tracker pool in every cycle. This can be used in conjection with the \"not empty\" stat to calculate average queue occupancy or the \"allocations\" stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if a HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "Tracker Occupancy Accumultor", "EvSel": 4, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "b1xxxxxxx", }, "HA.TRACKER_OCCUPANCY.WRITES_LOCAL": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Accumulates the occupancy of the local HA tracker pool in every cycle. This can be used in conjection with the \"not empty\" stat to calculate average queue occupancy or the \"allocations\" stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if a HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "Tracker Occupancy Accumultor", "EvSel": 4, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bxxx1xxxx", }, "HA.TRACKER_PENDING_OCCUPANCY": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Accumulates the number of transactions that have data from the memory controller until they get scheduled to the Egress. This can be used to calculate the queuing latency for two things. (1) If the system is waiting for snoops, this will increase. (2) If the system can't schedule to the Egress because of either (a) Egress Credits or (b) QPI BL IGR credits for remote requests.", "Desc": "Data Pending Occupancy Accumultor", "EvSel": 5, "ExtSel": "", "MaxIncCyc": 127, "SubCtr": 1, }, "HA.TRACKER_PENDING_OCCUPANCY.LOCAL": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Accumulates the number of transactions that have data from the memory controller until they get scheduled to the Egress. This can be used to calculate the queuing latency for two things. (1) If the system is waiting for snoops, this will increase. (2) If the system can't schedule to the Egress because of either (a) Egress Credits or (b) QPI BL IGR credits for remote requests.", "Desc": "Data Pending Occupancy Accumultor", "EvSel": 5, "ExtSel": "", "MaxIncCyc": 127, "SubCtr": 1, "Umask": "b00000001", }, "HA.TRACKER_PENDING_OCCUPANCY.REMOTE": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Accumulates the number of transactions that have data from the memory controller until they get scheduled to the Egress. This can be used to calculate the queuing latency for two things. (1) If the system is waiting for snoops, this will increase. (2) If the system can't schedule to the Egress because of either (a) Egress Credits or (b) QPI BL IGR credits for remote requests.", "Desc": "Data Pending Occupancy Accumultor", "EvSel": 5, "ExtSel": "", "MaxIncCyc": 127, "SubCtr": 1, "Umask": "b00000010", }, "HA.TxR_AD_CYCLES_FULL": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "AD Egress Full", "Desc": "AD Egress Full", "EvSel": 42, "ExtSel": "", }, "HA.TxR_AD_CYCLES_FULL.SCHED1": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "AD Egress Full", "Desc": "AD Egress Full", "EvSel": 42, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.TxR_AD_CYCLES_FULL.SCHED0": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "AD Egress Full", "Desc": "AD Egress Full", "EvSel": 42, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.TxR_AD_CYCLES_FULL.ALL": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "AD Egress Full", "Desc": "AD Egress Full", "EvSel": 42, "ExtSel": "", "Umask": "bxxxxxx11", }, "HA.TxR_AK": { "Box": "HA", "Category": "HA OUTBOUND_TX Events", "Counters": "0-3", "Desc": "Outbound Ring Transactions on AK", "EvSel": 14, "ExtSel": "", }, "HA.TxR_AK_CYCLES_FULL": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "AK Egress Full", "Desc": "AK Egress Full", "EvSel": 50, "ExtSel": "", }, "HA.TxR_AK_CYCLES_FULL.SCHED1": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "AK Egress Full", "Desc": "AK Egress Full", "EvSel": 50, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.TxR_AK_CYCLES_FULL.SCHED0": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "AK Egress Full", "Desc": "AK Egress Full", "EvSel": 50, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.TxR_AK_CYCLES_FULL.ALL": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "AK Egress Full", "Desc": "AK Egress Full", "EvSel": 50, "ExtSel": "", "Umask": "bxxxxxx11", }, "HA.TxR_BL": { "Box": "HA", "Category": "HA OUTBOUND_TX Events", "Counters": "0-3", "Defn": "Counts the number of DRS messages sent out on the BL ring. This can be filtered by the destination.", "Desc": "Outbound DRS Ring Transactions to Cache", "EvSel": 16, "ExtSel": "", }, "HA.TxR_BL.DRS_CORE": { "Box": "HA", "Category": "HA OUTBOUND_TX Events", "Counters": "0-3", "Defn": "Counts the number of DRS messages sent out on the BL ring. This can be filtered by the destination.", "Desc": "Outbound DRS Ring Transactions to Cache", "EvSel": 16, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.TxR_BL.DRS_CACHE": { "Box": "HA", "Category": "HA OUTBOUND_TX Events", "Counters": "0-3", "Defn": "Counts the number of DRS messages sent out on the BL ring. This can be filtered by the destination.", "Desc": "Outbound DRS Ring Transactions to Cache", "EvSel": 16, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.TxR_BL.DRS_QPI": { "Box": "HA", "Category": "HA OUTBOUND_TX Events", "Counters": "0-3", "Defn": "Counts the number of DRS messages sent out on the BL ring. This can be filtered by the destination.", "Desc": "Outbound DRS Ring Transactions to Cache", "EvSel": 16, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.TxR_BL_CYCLES_FULL": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "BL Egress Full", "Desc": "BL Egress Full", "EvSel": 54, "ExtSel": "", }, "HA.TxR_BL_CYCLES_FULL.SCHED0": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "BL Egress Full", "Desc": "BL Egress Full", "EvSel": 54, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.TxR_BL_CYCLES_FULL.ALL": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "BL Egress Full", "Desc": "BL Egress Full", "EvSel": 54, "ExtSel": "", "Umask": "bxxxxxx11", }, "HA.TxR_BL_CYCLES_FULL.SCHED1": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "BL Egress Full", "Desc": "BL Egress Full", "EvSel": 54, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.TxR_BL_OCCUPANCY": { "Box": "HA", "Category": "HA BL_EGRESS Events", "Counters": "0-3", "Defn": "BL Egress Occupancy", "Desc": "BL Egress Occupancy", "EvSel": 52, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, }, "HA.TxR_STARVED": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.", "Desc": "Injection Starvation", "EvSel": 109, "ExtSel": "", }, "HA.TxR_STARVED.BL": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.", "Desc": "Injection Starvation", "EvSel": 109, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.TxR_STARVED.AK": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.", "Desc": "Injection Starvation", "EvSel": 109, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.WPQ_CYCLES_NO_REG_CREDITS": { "Box": "HA", "Category": "HA WPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and \"special\" requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "HA iMC CHN0 WPQ Credits Empty - Regular", "EvSel": 24, "ExtSel": "", "MaxIncCyc": 4, }, "HA.WPQ_CYCLES_NO_REG_CREDITS.CHN0": { "Box": "HA", "Category": "HA WPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and \"special\" requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "HA iMC CHN0 WPQ Credits Empty - Regular", "EvSel": 24, "ExtSel": "", "MaxIncCyc": 4, "Umask": "b00000001", }, "HA.WPQ_CYCLES_NO_REG_CREDITS.CHN3": { "Box": "HA", "Category": "HA WPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and \"special\" requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "HA iMC CHN0 WPQ Credits Empty - Regular", "EvSel": 24, "ExtSel": "", "MaxIncCyc": 4, "Umask": "b00001000", }, "HA.WPQ_CYCLES_NO_REG_CREDITS.CHN2": { "Box": "HA", "Category": "HA WPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and \"special\" requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "HA iMC CHN0 WPQ Credits Empty - Regular", "EvSel": 24, "ExtSel": "", "MaxIncCyc": 4, "Umask": "b00000100", }, "HA.WPQ_CYCLES_NO_REG_CREDITS.CHN1": { "Box": "HA", "Category": "HA WPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and \"special\" requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "HA iMC CHN0 WPQ Credits Empty - Regular", "EvSel": 24, "ExtSel": "", "MaxIncCyc": 4, "Umask": "b00000010", }, # SBO: "SBO.BOUNCE_CONTROL": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Desc": "Bounce Control", "EvSel": 10, "ExtSel": "", }, "SBO.CLOCKTICKS": { "Box": "SBO", "Category": "SBO UCLK Events", "Counters": "0-3", "Desc": "Uncore Clocks", "EvSel": 0, "ExtSel": "", }, "SBO.FAST_ASSERTED": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles either the local or incoming distress signals are asserted. Incoming distress includes up, dn and across.", "Desc": "FaST wire asserted", "EvSel": 9, "ExtSel": "", }, "SBO.RING_AD_USED": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", }, "SBO.RING_AD_USED.DOWN_ODD": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxx1xxx", }, "SBO.RING_AD_USED.CCW": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "b00001100", }, "SBO.RING_AD_USED.DOWN_EVEN": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxxx1xx", }, "SBO.RING_AD_USED.UP_EVEN": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxxxxx1", }, "SBO.RING_AD_USED.CW": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "b00000011", }, "SBO.RING_AD_USED.ALL": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "b00001111", }, "SBO.RING_AD_USED.UP_ODD": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxxxx1x", }, "SBO.RING_AK_USED": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", }, "SBO.RING_AK_USED.UP_EVEN": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxxxxx1", }, "SBO.RING_AK_USED.DOWN_EVEN": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxxx1xx", }, "SBO.RING_AK_USED.DOWN_ODD": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxx1xxx", }, "SBO.RING_AK_USED.CCW": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "b00001100", }, "SBO.RING_AK_USED.CW": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "b00000011", }, "SBO.RING_AK_USED.UP_ODD": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxxxx1x", }, "SBO.RING_AK_USED.ALL": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "b00001111", }, "SBO.RING_BL_USED": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", }, "SBO.RING_BL_USED.UP_ODD": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxxxx1x", }, "SBO.RING_BL_USED.ALL": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "b00001111", }, "SBO.RING_BL_USED.CW": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "b00000011", }, "SBO.RING_BL_USED.DOWN_EVEN": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxxx1xx", }, "SBO.RING_BL_USED.UP_EVEN": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxxxxx1", }, "SBO.RING_BL_USED.CCW": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "b00001100", }, "SBO.RING_BL_USED.DOWN_ODD": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "MaxIncCyc": 2, "Notes": "In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction.", "Umask": "bxxxx1xxx", }, "SBO.RING_BOUNCES": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Desc": "Number of LLC responses that bounced on the Ring.", "EvSel": 5, "ExtSel": "", "MaxIncCyc": 2, }, "SBO.RING_BOUNCES.AD_CACHE": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Desc": "Number of LLC responses that bounced on the Ring.", "EvSel": 5, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxxxxx1", }, "SBO.RING_BOUNCES.AK_CORE": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Desc": "Number of LLC responses that bounced on the Ring.", "EvSel": 5, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxxxx1x", }, "SBO.RING_BOUNCES.IV_CORE": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Desc": "Number of LLC responses that bounced on the Ring.", "EvSel": 5, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxx1xxx", }, "SBO.RING_BOUNCES.BL_CORE": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Desc": "Number of LLC responses that bounced on the Ring.", "EvSel": 5, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxxx1xx", }, "SBO.RING_IV_USED": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. There is only 1 IV ring in BDX. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "BL Ring in Use", "EvSel": 30, "ExtSel": "", "Notes": "IV messages are split into two parts. In any cycle, a ring stop can see up to one (half-)packet moving in the UP direction and one (half-)packet moving in the DN direction.", }, "SBO.RING_IV_USED.DN": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. There is only 1 IV ring in BDX. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "BL Ring in Use", "EvSel": 30, "ExtSel": "", "Notes": "IV messages are split into two parts. In any cycle, a ring stop can see up to one (half-)packet moving in the UP direction and one (half-)packet moving in the DN direction.", "Umask": "b00001100", }, "SBO.RING_IV_USED.UP": { "Box": "SBO", "Category": "SBO RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. There is only 1 IV ring in BDX. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "BL Ring in Use", "EvSel": 30, "ExtSel": "", "Notes": "IV messages are split into two parts. In any cycle, a ring stop can see up to one (half-)packet moving in the UP direction and one (half-)packet moving in the DN direction.", "Umask": "b00000011", }, "SBO.RxR_BYPASS": { "Box": "SBO", "Category": "SBO INGRESS Events", "Counters": "0-3", "Defn": "Bypass the Sbo Ingress.", "Desc": "Bypass", "EvSel": 18, "ExtSel": "", "MaxIncCyc": 12, "SubCtr": 1, }, "SBO.RxR_BYPASS.AK": { "Box": "SBO", "Category": "SBO INGRESS Events", "Counters": "0-3", "Defn": "Bypass the Sbo Ingress.", "Desc": "Bypass", "EvSel": 18, "ExtSel": "", "MaxIncCyc": 12, "SubCtr": 1, "Umask": "b00010000", }, "SBO.RxR_BYPASS.AD_BNC": { "Box": "SBO", "Category": "SBO INGRESS Events", "Counters": "0-3", "Defn": "Bypass the Sbo Ingress.", "Desc": "Bypass", "EvSel": 18, "ExtSel": "", "MaxIncCyc": 12, "SubCtr": 1, "Umask": "b00000010", }, "SBO.RxR_BYPASS.AD_CRD": { "Box": "SBO", "Category": "SBO INGRESS Events", "Counters": "0-3", "Defn": "Bypass the Sbo Ingress.", "Desc": "Bypass", "EvSel": 18, "ExtSel": "", "MaxIncCyc": 12, "SubCtr": 1, "Umask": "b00000001", }, "SBO.RxR_BYPASS.BL_CRD": { "Box": "SBO", "Category": "SBO INGRESS Events", "Counters": "0-3", "Defn": "Bypass the Sbo Ingress.", "Desc": "Bypass", "EvSel": 18, "ExtSel": "", "MaxIncCyc": 12, "SubCtr": 1, "Umask": "b00000100", }, "SBO.RxR_BYPASS.IV": { "Box": "SBO", "Category": "SBO INGRESS Events", "Counters": "0-3", "Defn": "Bypass the Sbo Ingress.", "Desc": "Bypass", "EvSel": 18, "ExtSel": "", "MaxIncCyc": 12, "SubCtr": 1, "Umask": "b00100000", }, "SBO.RxR_BYPASS.BL_BNC": { "Box": "SBO", "Category": "SBO INGRESS Events", "Counters": "0-3", "Defn": "Bypass the Sbo Ingress.", "Desc": "Bypass", "EvSel": 18, "ExtSel": "", "MaxIncCyc": 12, "SubCtr": 1, "Umask": "b00001000", }, "SBO.RxR_INSERTS": { "Box": "SBO", "Category": "SBO INGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Sbo Ingress The Ingress is used to queue up requests received from the ring.", "Desc": "Ingress Allocations", "EvSel": 19, "ExtSel": "", }, "SBO.RxR_INSERTS.BL_BNC": { "Box": "SBO", "Category": "SBO INGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Sbo Ingress The Ingress is used to queue up requests received from the ring.", "Desc": "Ingress Allocations", "EvSel": 19, "ExtSel": "", "Umask": "bxxxx1xxx", }, "SBO.RxR_INSERTS.IV": { "Box": "SBO", "Category": "SBO INGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Sbo Ingress The Ingress is used to queue up requests received from the ring.", "Desc": "Ingress Allocations", "EvSel": 19, "ExtSel": "", "Umask": "bxx1xxxxx", }, "SBO.RxR_INSERTS.BL_CRD": { "Box": "SBO", "Category": "SBO INGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Sbo Ingress The Ingress is used to queue up requests received from the ring.", "Desc": "Ingress Allocations", "EvSel": 19, "ExtSel": "", "Umask": "bxxxxx1xx", }, "SBO.RxR_INSERTS.AD_BNC": { "Box": "SBO", "Category": "SBO INGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Sbo Ingress The Ingress is used to queue up requests received from the ring.", "Desc": "Ingress Allocations", "EvSel": 19, "ExtSel": "", "Umask": "bxxxxxx1x", }, "SBO.RxR_INSERTS.AK": { "Box": "SBO", "Category": "SBO INGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Sbo Ingress The Ingress is used to queue up requests received from the ring.", "Desc": "Ingress Allocations", "EvSel": 19, "ExtSel": "", "Umask": "bxxx1xxxx", }, "SBO.RxR_INSERTS.AD_CRD": { "Box": "SBO", "Category": "SBO INGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Sbo Ingress The Ingress is used to queue up requests received from the ring.", "Desc": "Ingress Allocations", "EvSel": 19, "ExtSel": "", "Umask": "bxxxxxxx1", }, "SBO.RxR_OCCUPANCY": { "Box": "SBO", "Category": "SBO INGRESS Events", "Counters": "0-3", "Defn": "Occupancy event for the Ingress buffers in the Sbo. The Ingress is used to queue up requests received from the ring.", "Desc": "Ingress Occupancy", "EvSel": 17, "ExtSel": "", "MaxIncCyc": 12, "SubCtr": 1, }, "SBO.RxR_OCCUPANCY.AD_BNC": { "Box": "SBO", "Category": "SBO INGRESS Events", "Counters": "0-3", "Defn": "Occupancy event for the Ingress buffers in the Sbo. The Ingress is used to queue up requests received from the ring.", "Desc": "Ingress Occupancy", "EvSel": 17, "ExtSel": "", "MaxIncCyc": 12, "SubCtr": 1, "Umask": "b00000010", }, "SBO.RxR_OCCUPANCY.AK": { "Box": "SBO", "Category": "SBO INGRESS Events", "Counters": "0-3", "Defn": "Occupancy event for the Ingress buffers in the Sbo. The Ingress is used to queue up requests received from the ring.", "Desc": "Ingress Occupancy", "EvSel": 17, "ExtSel": "", "MaxIncCyc": 12, "SubCtr": 1, "Umask": "b00010000", }, "SBO.RxR_OCCUPANCY.AD_CRD": { "Box": "SBO", "Category": "SBO INGRESS Events", "Counters": "0-3", "Defn": "Occupancy event for the Ingress buffers in the Sbo. The Ingress is used to queue up requests received from the ring.", "Desc": "Ingress Occupancy", "EvSel": 17, "ExtSel": "", "MaxIncCyc": 12, "SubCtr": 1, "Umask": "b00000001", }, "SBO.RxR_OCCUPANCY.BL_CRD": { "Box": "SBO", "Category": "SBO INGRESS Events", "Counters": "0-3", "Defn": "Occupancy event for the Ingress buffers in the Sbo. The Ingress is used to queue up requests received from the ring.", "Desc": "Ingress Occupancy", "EvSel": 17, "ExtSel": "", "MaxIncCyc": 12, "SubCtr": 1, "Umask": "b00000100", }, "SBO.RxR_OCCUPANCY.IV": { "Box": "SBO", "Category": "SBO INGRESS Events", "Counters": "0-3", "Defn": "Occupancy event for the Ingress buffers in the Sbo. The Ingress is used to queue up requests received from the ring.", "Desc": "Ingress Occupancy", "EvSel": 17, "ExtSel": "", "MaxIncCyc": 12, "SubCtr": 1, "Umask": "b00100000", }, "SBO.RxR_OCCUPANCY.BL_BNC": { "Box": "SBO", "Category": "SBO INGRESS Events", "Counters": "0-3", "Defn": "Occupancy event for the Ingress buffers in the Sbo. The Ingress is used to queue up requests received from the ring.", "Desc": "Ingress Occupancy", "EvSel": 17, "ExtSel": "", "MaxIncCyc": 12, "SubCtr": 1, "Umask": "b00001000", }, "SBO.TxR_ADS_USED": { "Box": "SBO", "Category": "SBO EGRESS Events", "Counters": "0-3", "EvSel": 4, "ExtSel": "", }, "SBO.TxR_ADS_USED.AK": { "Box": "SBO", "Category": "SBO EGRESS Events", "Counters": "0-3", "EvSel": 4, "ExtSel": "", "Umask": "bxxxxxx1x", }, "SBO.TxR_ADS_USED.AD": { "Box": "SBO", "Category": "SBO EGRESS Events", "Counters": "0-3", "EvSel": 4, "ExtSel": "", "Umask": "bxxxxxxx1", }, "SBO.TxR_ADS_USED.BL": { "Box": "SBO", "Category": "SBO EGRESS Events", "Counters": "0-3", "EvSel": 4, "ExtSel": "", "Umask": "bxxxxx1xx", }, "SBO.TxR_INSERTS": { "Box": "SBO", "Category": "SBO EGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Sbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "ExtSel": "", }, "SBO.TxR_INSERTS.BL_CRD": { "Box": "SBO", "Category": "SBO EGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Sbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "ExtSel": "", "Umask": "bxxxxx1xx", }, "SBO.TxR_INSERTS.IV": { "Box": "SBO", "Category": "SBO EGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Sbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "ExtSel": "", "Umask": "bxx1xxxxx", }, "SBO.TxR_INSERTS.BL_BNC": { "Box": "SBO", "Category": "SBO EGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Sbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "ExtSel": "", "Umask": "bxxxx1xxx", }, "SBO.TxR_INSERTS.AD_CRD": { "Box": "SBO", "Category": "SBO EGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Sbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "ExtSel": "", "Umask": "bxxxxxxx1", }, "SBO.TxR_INSERTS.AK": { "Box": "SBO", "Category": "SBO EGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Sbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "ExtSel": "", "Umask": "bxxx1xxxx", }, "SBO.TxR_INSERTS.AD_BNC": { "Box": "SBO", "Category": "SBO EGRESS Events", "Counters": "0-3", "Defn": "Number of allocations into the Sbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "ExtSel": "", "Umask": "bxxxxxx1x", }, "SBO.TxR_OCCUPANCY": { "Box": "SBO", "Category": "SBO EGRESS Events", "Counters": "0-3", "Defn": "Occupancy event for the Egress buffers in the Sbo. The egress is used to queue up requests destined for the ring.", "Desc": "Egress Occupancy", "EvSel": 1, "ExtSel": "", "MaxIncCyc": 12, "SubCtr": 1, }, "SBO.TxR_OCCUPANCY.BL_CRD": { "Box": "SBO", "Category": "SBO EGRESS Events", "Counters": "0-3", "Defn": "Occupancy event for the Egress buffers in the Sbo. The egress is used to queue up requests destined for the ring.", "Desc": "Egress Occupancy", "EvSel": 1, "ExtSel": "", "MaxIncCyc": 12, "SubCtr": 1, "Umask": "b00000100", }, "SBO.TxR_OCCUPANCY.IV": { "Box": "SBO", "Category": "SBO EGRESS Events", "Counters": "0-3", "Defn": "Occupancy event for the Egress buffers in the Sbo. The egress is used to queue up requests destined for the ring.", "Desc": "Egress Occupancy", "EvSel": 1, "ExtSel": "", "MaxIncCyc": 12, "SubCtr": 1, "Umask": "b00100000", }, "SBO.TxR_OCCUPANCY.BL_BNC": { "Box": "SBO", "Category": "SBO EGRESS Events", "Counters": "0-3", "Defn": "Occupancy event for the Egress buffers in the Sbo. The egress is used to queue up requests destined for the ring.", "Desc": "Egress Occupancy", "EvSel": 1, "ExtSel": "", "MaxIncCyc": 12, "SubCtr": 1, "Umask": "b00001000", }, "SBO.TxR_OCCUPANCY.AD_BNC": { "Box": "SBO", "Category": "SBO EGRESS Events", "Counters": "0-3", "Defn": "Occupancy event for the Egress buffers in the Sbo. The egress is used to queue up requests destined for the ring.", "Desc": "Egress Occupancy", "EvSel": 1, "ExtSel": "", "MaxIncCyc": 12, "SubCtr": 1, "Umask": "b00000010", }, "SBO.TxR_OCCUPANCY.AK": { "Box": "SBO", "Category": "SBO EGRESS Events", "Counters": "0-3", "Defn": "Occupancy event for the Egress buffers in the Sbo. The egress is used to queue up requests destined for the ring.", "Desc": "Egress Occupancy", "EvSel": 1, "ExtSel": "", "MaxIncCyc": 12, "SubCtr": 1, "Umask": "b00010000", }, "SBO.TxR_OCCUPANCY.AD_CRD": { "Box": "SBO", "Category": "SBO EGRESS Events", "Counters": "0-3", "Defn": "Occupancy event for the Egress buffers in the Sbo. The egress is used to queue up requests destined for the ring.", "Desc": "Egress Occupancy", "EvSel": 1, "ExtSel": "", "MaxIncCyc": 12, "SubCtr": 1, "Umask": "b00000001", }, # UBOX: "UBOX.EVENT_MSG": { "Box": "UBOX", "Category": "UBOX EVENT_MSG Events", "Counters": "0-1", "Defn": "Virtual Logical Wire (legacy) message were received from Uncore. Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.", "Desc": "VLW Received", "EvSel": 66, "ExtSel": "", }, "UBOX.EVENT_MSG.DOORBELL_RCVD": { "Box": "UBOX", "Category": "UBOX EVENT_MSG Events", "Counters": "0-1", "Defn": "Virtual Logical Wire (legacy) message were received from Uncore. Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.", "Desc": "VLW Received", "EvSel": 66, "ExtSel": "", "Umask": "bxxxx1xxx", }, "UBOX.PHOLD_CYCLES": { "Box": "UBOX", "Category": "UBOX PHOLD Events", "Counters": "0-1", "Defn": "PHOLD cycles. Filter from source CoreID.", "Desc": "Cycles PHOLD Assert to Ack", "EvSel": 69, "ExtSel": "", }, "UBOX.PHOLD_CYCLES.ASSERT_TO_ACK": { "Box": "UBOX", "Category": "UBOX PHOLD Events", "Counters": "0-1", "Defn": "PHOLD cycles. Filter from source CoreID.", "Desc": "Cycles PHOLD Assert to Ack", "EvSel": 69, "ExtSel": "", "Umask": "bxxxxxxx1", }, "UBOX.RACU_REQUESTS": { "Box": "UBOX", "Category": "UBOX RACU Events", "Counters": "0-1", "Defn": "Number outstanding register requests within message channel tracker", "Desc": "RACU Request", "EvSel": 70, "ExtSel": "", }, # IRP: "IRP.CACHE_TOTAL_OCCUPANCY": { "Box": "IRP", "Category": "IRP WRITE_CACHE Events", "Counters": "0-1", "Defn": "Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.", "Desc": "Total Write Cache Occupancy", "EvSel": 18, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, }, "IRP.CACHE_TOTAL_OCCUPANCY.ANY": { "Box": "IRP", "Category": "IRP WRITE_CACHE Events", "Counters": "0-1", "Defn": "Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.", "Desc": "Total Write Cache Occupancy", "EvSel": 18, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "b00000001", }, "IRP.CACHE_TOTAL_OCCUPANCY.SOURCE": { "Box": "IRP", "Category": "IRP WRITE_CACHE Events", "Counters": "0-1", "Defn": "Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.", "Desc": "Total Write Cache Occupancy", "EvSel": 18, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "b00000010", }, "IRP.CLOCKTICKS": { "Box": "IRP", "Category": "IRP IO_CLKS Events", "Counters": "0-1", "Defn": "Number of clocks in the IRP.", "Desc": "Clocks in the IRP", "EvSel": 0, "ExtSel": "", }, "IRP.COHERENT_OPS": { "Box": "IRP", "Category": "IRP Coherency Events", "Counters": "0-1", "Defn": "Counts the number of coherency related operations servied by the IRP", "Desc": "Coherent Ops", "EvSel": 19, "ExtSel": "", }, "IRP.COHERENT_OPS.CLFLUSH": { "Box": "IRP", "Category": "IRP Coherency Events", "Counters": "0-1", "Defn": "Counts the number of coherency related operations servied by the IRP", "Desc": "Coherent Ops", "EvSel": 19, "ExtSel": "", "Umask": "b1xxxxxxx", }, "IRP.COHERENT_OPS.PCITOM": { "Box": "IRP", "Category": "IRP Coherency Events", "Counters": "0-1", "Defn": "Counts the number of coherency related operations servied by the IRP", "Desc": "Coherent Ops", "EvSel": 19, "ExtSel": "", "Umask": "bxxx1xxxx", }, "IRP.COHERENT_OPS.WBMTOI": { "Box": "IRP", "Category": "IRP Coherency Events", "Counters": "0-1", "Defn": "Counts the number of coherency related operations servied by the IRP", "Desc": "Coherent Ops", "EvSel": 19, "ExtSel": "", "Umask": "bx1xxxxxx", }, "IRP.COHERENT_OPS.RFO": { "Box": "IRP", "Category": "IRP Coherency Events", "Counters": "0-1", "Defn": "Counts the number of coherency related operations servied by the IRP", "Desc": "Coherent Ops", "EvSel": 19, "ExtSel": "", "Umask": "bxxxx1xxx", }, "IRP.COHERENT_OPS.DRD": { "Box": "IRP", "Category": "IRP Coherency Events", "Counters": "0-1", "Defn": "Counts the number of coherency related operations servied by the IRP", "Desc": "Coherent Ops", "EvSel": 19, "ExtSel": "", "Umask": "bxxxxx1xx", }, "IRP.COHERENT_OPS.CRD": { "Box": "IRP", "Category": "IRP Coherency Events", "Counters": "0-1", "Defn": "Counts the number of coherency related operations servied by the IRP", "Desc": "Coherent Ops", "EvSel": 19, "ExtSel": "", "Umask": "bxxxxxx1x", }, "IRP.COHERENT_OPS.PCIRDCUR": { "Box": "IRP", "Category": "IRP Coherency Events", "Counters": "0-1", "Defn": "Counts the number of coherency related operations servied by the IRP", "Desc": "Coherent Ops", "EvSel": 19, "ExtSel": "", "Umask": "bxxxxxxx1", }, "IRP.COHERENT_OPS.PCIDCAHINT": { "Box": "IRP", "Category": "IRP Coherency Events", "Counters": "0-1", "Defn": "Counts the number of coherency related operations servied by the IRP", "Desc": "Coherent Ops", "EvSel": 19, "ExtSel": "", "Umask": "bxx1xxxxx", }, "IRP.MISC0": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 0", "EvSel": 20, "ExtSel": "", }, "IRP.MISC0.PF_ACK_HINT": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 0", "EvSel": 20, "ExtSel": "", "Umask": "bx1x00000", }, "IRP.MISC0.FAST_XFER": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 0", "EvSel": 20, "ExtSel": "", "Umask": "bxx100000", }, "IRP.MISC0.2ND_WR_INSERT": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 0", "EvSel": 20, "ExtSel": "", "Umask": "bx00x1x00", }, "IRP.MISC0.FAST_REQ": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 0", "EvSel": 20, "ExtSel": "", "Umask": "b000000x1", }, "IRP.MISC0.2ND_ATOMIC_INSERT": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 0", "EvSel": 20, "ExtSel": "", "Umask": "bx001xx00", }, "IRP.MISC0.FAST_REJ": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 0", "EvSel": 20, "ExtSel": "", "Umask": "b0000001x", }, "IRP.MISC0.PF_TIMEOUT": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 0", "EvSel": 20, "ExtSel": "", "Umask": "b1xx00000", }, "IRP.MISC0.2ND_RD_INSERT": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 0", "EvSel": 20, "ExtSel": "", "Umask": "bx00xx100", }, "IRP.MISC1": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 1", "EvSel": 21, "ExtSel": "", }, "IRP.MISC1.DATA_THROTTLE": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 1", "EvSel": 21, "ExtSel": "", "Umask": "b1xxx0000", }, "IRP.MISC1.SLOW_S": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 1", "EvSel": 21, "ExtSel": "", "Umask": "b000xxx1x", }, "IRP.MISC1.LOST_FWD": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 1", "EvSel": 21, "ExtSel": "", "Umask": "b0001xxxx", }, "IRP.MISC1.SLOW_I": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 1", "EvSel": 21, "ExtSel": "", "Umask": "b000xxxx1", }, "IRP.MISC1.SEC_RCVD_VLD": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 1", "EvSel": 21, "ExtSel": "", "Umask": "bx1xx0000", }, "IRP.MISC1.SLOW_E": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 1", "EvSel": 21, "ExtSel": "", "Umask": "b000xx1xx", }, "IRP.MISC1.SEC_RCVD_INVLD": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 1", "EvSel": 21, "ExtSel": "", "Umask": "bxx1x0000", }, "IRP.MISC1.SLOW_M": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 1", "EvSel": 21, "ExtSel": "", "Umask": "b000x1xxx", }, "IRP.RxR_AK_INSERTS": { "Box": "IRP", "Category": "IRP AK_INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the AK Ingress. This queue is where the IRP receives responses from R2PCIe (the ring).", "Desc": "AK Ingress Occupancy", "EvSel": 10, "ExtSel": "", }, "IRP.RxR_BL_DRS_CYCLES_FULL": { "Box": "IRP", "Category": "IRP BL_INGRESS_DRS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.", "EvSel": 4, "ExtSel": "", }, "IRP.RxR_BL_DRS_INSERTS": { "Box": "IRP", "Category": "IRP BL_INGRESS_DRS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.", "Desc": "BL Ingress Occupancy - DRS", "EvSel": 1, "ExtSel": "", }, "IRP.RxR_BL_DRS_OCCUPANCY": { "Box": "IRP", "Category": "IRP BL_INGRESS_DRS Events", "Counters": "0-1", "Defn": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.", "EvSel": 7, "ExtSel": "", "MaxIncCyc": 24, "SubCtr": 1, }, "IRP.RxR_BL_NCB_CYCLES_FULL": { "Box": "IRP", "Category": "IRP BL_INGRESS_NCB Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.", "EvSel": 5, "ExtSel": "", }, "IRP.RxR_BL_NCB_INSERTS": { "Box": "IRP", "Category": "IRP BL_INGRESS_NCB Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.", "Desc": "BL Ingress Occupancy - NCB", "EvSel": 2, "ExtSel": "", }, "IRP.RxR_BL_NCB_OCCUPANCY": { "Box": "IRP", "Category": "IRP BL_INGRESS_NCB Events", "Counters": "0-1", "Defn": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.", "EvSel": 8, "ExtSel": "", "MaxIncCyc": 24, "SubCtr": 1, }, "IRP.RxR_BL_NCS_CYCLES_FULL": { "Box": "IRP", "Category": "IRP BL_INGRESS_NCS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.", "EvSel": 6, "ExtSel": "", }, "IRP.RxR_BL_NCS_INSERTS": { "Box": "IRP", "Category": "IRP BL_INGRESS_NCS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.", "Desc": "BL Ingress Occupancy - NCS", "EvSel": 3, "ExtSel": "", }, "IRP.RxR_BL_NCS_OCCUPANCY": { "Box": "IRP", "Category": "IRP BL_INGRESS_NCS Events", "Counters": "0-1", "Defn": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.", "EvSel": 9, "ExtSel": "", "MaxIncCyc": 24, "SubCtr": 1, }, "IRP.SNOOP_RESP": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Desc": "Snoop Responses", "EvSel": 23, "ExtSel": "", "Notes": "The first 4 subevent bits are the Responses to the Code/Data/Invalid Snoops represented by the last 3 subevent bits. At least 1 of the bottom 4 bits must be combined with 1 of the top 3 bits to obtain counts. Unsure which combinations are possible.", }, "IRP.SNOOP_RESP.SNPINV": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Desc": "Snoop Responses", "EvSel": 23, "ExtSel": "", "Notes": "The first 4 subevent bits are the Responses to the Code/Data/Invalid Snoops represented by the last 3 subevent bits. At least 1 of the bottom 4 bits must be combined with 1 of the top 3 bits to obtain counts. Unsure which combinations are possible.", "Umask": "bx1xxxxxx", }, "IRP.SNOOP_RESP.SNPCODE": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Desc": "Snoop Responses", "EvSel": 23, "ExtSel": "", "Notes": "The first 4 subevent bits are the Responses to the Code/Data/Invalid Snoops represented by the last 3 subevent bits. At least 1 of the bottom 4 bits must be combined with 1 of the top 3 bits to obtain counts. Unsure which combinations are possible.", "Umask": "bxxx1xxxx", }, "IRP.SNOOP_RESP.HIT_M": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Desc": "Snoop Responses", "EvSel": 23, "ExtSel": "", "Notes": "The first 4 subevent bits are the Responses to the Code/Data/Invalid Snoops represented by the last 3 subevent bits. At least 1 of the bottom 4 bits must be combined with 1 of the top 3 bits to obtain counts. Unsure which combinations are possible.", "Umask": "bxxxx1xxx", }, "IRP.SNOOP_RESP.HIT_ES": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Desc": "Snoop Responses", "EvSel": 23, "ExtSel": "", "Notes": "The first 4 subevent bits are the Responses to the Code/Data/Invalid Snoops represented by the last 3 subevent bits. At least 1 of the bottom 4 bits must be combined with 1 of the top 3 bits to obtain counts. Unsure which combinations are possible.", "Umask": "bxxxxx1xx", }, "IRP.SNOOP_RESP.MISS": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Desc": "Snoop Responses", "EvSel": 23, "ExtSel": "", "Notes": "The first 4 subevent bits are the Responses to the Code/Data/Invalid Snoops represented by the last 3 subevent bits. At least 1 of the bottom 4 bits must be combined with 1 of the top 3 bits to obtain counts. Unsure which combinations are possible.", "Umask": "bxxxxxxx1", }, "IRP.SNOOP_RESP.SNPDATA": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Desc": "Snoop Responses", "EvSel": 23, "ExtSel": "", "Notes": "The first 4 subevent bits are the Responses to the Code/Data/Invalid Snoops represented by the last 3 subevent bits. At least 1 of the bottom 4 bits must be combined with 1 of the top 3 bits to obtain counts. Unsure which combinations are possible.", "Umask": "bxx1xxxxx", }, "IRP.SNOOP_RESP.HIT_I": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Desc": "Snoop Responses", "EvSel": 23, "ExtSel": "", "Notes": "The first 4 subevent bits are the Responses to the Code/Data/Invalid Snoops represented by the last 3 subevent bits. At least 1 of the bottom 4 bits must be combined with 1 of the top 3 bits to obtain counts. Unsure which combinations are possible.", "Umask": "bxxxxxx1x", }, "IRP.TRANSACTIONS": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Defn": "Counts the number of \"Inbound\" transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.", "Desc": "Inbound Transaction Count", "EvSel": 22, "ExtSel": "", "Notes": "Bit 7 is a filter that can be applied to the other subevents. Meaningless by itself.", }, "IRP.TRANSACTIONS.ORDERINGQ": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Defn": "Counts the number of \"Inbound\" transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.", "Desc": "Inbound Transaction Count", "EvSel": 22, "ExtSel": "", "Notes": "Bit 7 is a filter that can be applied to the other subevents. Meaningless by itself.", "Umask": "bx1xxxxxx", }, "IRP.TRANSACTIONS.READS": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Defn": "Counts the number of \"Inbound\" transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.", "Desc": "Inbound Transaction Count", "EvSel": 22, "ExtSel": "", "Notes": "Bit 7 is a filter that can be applied to the other subevents. Meaningless by itself.", "Umask": "bxxxxxxx1", }, "IRP.TRANSACTIONS.WR_PREF": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Defn": "Counts the number of \"Inbound\" transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.", "Desc": "Inbound Transaction Count", "EvSel": 22, "ExtSel": "", "Notes": "Bit 7 is a filter that can be applied to the other subevents. Meaningless by itself.", "Umask": "bxxxx1xxx", }, "IRP.TRANSACTIONS.ATOMIC": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Defn": "Counts the number of \"Inbound\" transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.", "Desc": "Inbound Transaction Count", "EvSel": 22, "ExtSel": "", "Notes": "Bit 7 is a filter that can be applied to the other subevents. Meaningless by itself.", "Umask": "bxxx1xxxx", }, "IRP.TRANSACTIONS.OTHER": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Defn": "Counts the number of \"Inbound\" transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.", "Desc": "Inbound Transaction Count", "EvSel": 22, "ExtSel": "", "Notes": "Bit 7 is a filter that can be applied to the other subevents. Meaningless by itself.", "Umask": "bxx1xxxxx", }, "IRP.TRANSACTIONS.RD_PREF": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Defn": "Counts the number of \"Inbound\" transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.", "Desc": "Inbound Transaction Count", "EvSel": 22, "ExtSel": "", "Notes": "Bit 7 is a filter that can be applied to the other subevents. Meaningless by itself.", "Umask": "bxxxxx1xx", }, "IRP.TRANSACTIONS.WRITES": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Defn": "Counts the number of \"Inbound\" transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.", "Desc": "Inbound Transaction Count", "EvSel": 22, "ExtSel": "", "Notes": "Bit 7 is a filter that can be applied to the other subevents. Meaningless by itself.", "Umask": "bxxxxxx1x", }, "IRP.TxR_AD_STALL_CREDIT_CYCLES": { "Box": "IRP", "Category": "IRP STALL_CYCLES Events", "Counters": "0-1", "Defn": "Counts the number times when it is not possible to issue a request to the R2PCIe because there are no AD Egress Credits available.", "Desc": "No AD Egress Credit Stalls", "EvSel": 24, "ExtSel": "", }, "IRP.TxR_BL_STALL_CREDIT_CYCLES": { "Box": "IRP", "Category": "IRP STALL_CYCLES Events", "Counters": "0-1", "Defn": "Counts the number times when it is not possible to issue data to the R2PCIe because there are no BL Egress Credits available.", "Desc": "No BL Egress Credit Stalls", "EvSel": 25, "ExtSel": "", }, "IRP.TxR_DATA_INSERTS_NCB": { "Box": "IRP", "Category": "IRP OUTBOUND_REQUESTS Events", "Counters": "0-1", "Defn": "Counts the number of requests issued to the switch (towards the devices).", "Desc": "Outbound Read Requests", "EvSel": 14, "ExtSel": "", }, "IRP.TxR_DATA_INSERTS_NCS": { "Box": "IRP", "Category": "IRP OUTBOUND_REQUESTS Events", "Counters": "0-1", "Defn": "Counts the number of requests issued to the switch (towards the devices).", "Desc": "Outbound Read Requests", "EvSel": 15, "ExtSel": "", }, "IRP.TxR_REQUEST_OCCUPANCY": { "Box": "IRP", "Category": "IRP OUTBOUND_REQUESTS Events", "Counters": "0-1", "Defn": "Accumultes the number of outstanding outbound requests from the IRP to the switch (towards the devices). This can be used in conjuection with the allocations event in order to calculate average latency of outbound requests.", "Desc": "Outbound Request Queue Occupancy", "EvSel": 13, "ExtSel": "", "SubCtr": 1, }, # R2PCIe: "R2PCIe.CLOCKTICKS": { "Box": "R2PCIe", "Category": "R2PCIe UCLK Events", "Counters": "0-3", "Defn": "Counts the number of uclks in the R2PCIe uclk domain. This could be slightly different than the count in the Ubox because of enable/freeze delays. However, because the R2PCIe is close to the Ubox, they generally should not diverge by more than a handful of cycles.", "Desc": "Number of uclks in domain", "EvSel": 1, "ExtSel": "", }, "R2PCIe.IIO_CREDIT": { "Box": "R2PCIe", "Category": "R2PCIe IIO Credit Events", "Counters": "0-1", "EvSel": 45, "ExtSel": "", "MaxIncCyc": 4, }, "R2PCIe.IIO_CREDIT.ISOCH_QPI0": { "Box": "R2PCIe", "Category": "R2PCIe IIO Credit Events", "Counters": "0-1", "EvSel": 45, "ExtSel": "", "MaxIncCyc": 4, "Umask": "bxxxxx1xx", }, "R2PCIe.IIO_CREDIT.PRQ_QPI0": { "Box": "R2PCIe", "Category": "R2PCIe IIO Credit Events", "Counters": "0-1", "EvSel": 45, "ExtSel": "", "MaxIncCyc": 4, "Umask": "bxxxxxxx1", }, "R2PCIe.IIO_CREDIT.ISOCH_QPI1": { "Box": "R2PCIe", "Category": "R2PCIe IIO Credit Events", "Counters": "0-1", "EvSel": 45, "ExtSel": "", "MaxIncCyc": 4, "Umask": "bxxxx1xxx", }, "R2PCIe.IIO_CREDIT.PRQ_QPI1": { "Box": "R2PCIe", "Category": "R2PCIe IIO Credit Events", "Counters": "0-1", "EvSel": 45, "ExtSel": "", "MaxIncCyc": 4, "Umask": "bxxxxxx1x", }, "R2PCIe.RING_AD_USED": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", }, "R2PCIe.RING_AD_USED.CCW": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00001100", }, "R2PCIe.RING_AD_USED.CW_EVEN": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxxx1", }, "R2PCIe.RING_AD_USED.CCW_EVEN": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxx1xx", }, "R2PCIe.RING_AD_USED.ALL": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00001111", }, "R2PCIe.RING_AD_USED.CW_ODD": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxx1x", }, "R2PCIe.RING_AD_USED.CW": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00000011", }, "R2PCIe.RING_AD_USED.CCW_ODD": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxx1xxx", }, "R2PCIe.RING_AK_BOUNCES": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of times when a request destined for the AK ingress bounced.", "Desc": "AK Ingress Bounced", "EvSel": 18, "ExtSel": "", }, "R2PCIe.RING_AK_BOUNCES.DN": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of times when a request destined for the AK ingress bounced.", "Desc": "AK Ingress Bounced", "EvSel": 18, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R2PCIe.RING_AK_BOUNCES.UP": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of times when a request destined for the AK ingress bounced.", "Desc": "AK Ingress Bounced", "EvSel": 18, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R2PCIe.RING_AK_USED": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", }, "R2PCIe.RING_AK_USED.CW": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00000011", }, "R2PCIe.RING_AK_USED.CW_ODD": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxx1x", }, "R2PCIe.RING_AK_USED.ALL": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00001111", }, "R2PCIe.RING_AK_USED.CCW_EVEN": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxx1xx", }, "R2PCIe.RING_AK_USED.CCW_ODD": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxx1xxx", }, "R2PCIe.RING_AK_USED.CW_EVEN": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxxx1", }, "R2PCIe.RING_AK_USED.CCW": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00001100", }, "R2PCIe.RING_BL_USED": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", }, "R2PCIe.RING_BL_USED.CCW_ODD": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxx1xxx", }, "R2PCIe.RING_BL_USED.CCW_EVEN": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxx1xx", }, "R2PCIe.RING_BL_USED.ALL": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00001111", }, "R2PCIe.RING_BL_USED.CW_ODD": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxx1x", }, "R2PCIe.RING_BL_USED.CW": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00000011", }, "R2PCIe.RING_BL_USED.CCW": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00001100", }, "R2PCIe.RING_BL_USED.CW_EVEN": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxxx1", }, "R2PCIe.RING_IV_USED": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.", "Desc": "R2 IV Ring in Use", "EvSel": 10, "ExtSel": "", "Notes": "IV messages are split into two parts. In any cycle, a ring stop can see up to one (half-)packet moving in the CW direction and one (half-)packet moving in the CCW direction.", }, "R2PCIe.RING_IV_USED.CW": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.", "Desc": "R2 IV Ring in Use", "EvSel": 10, "ExtSel": "", "Notes": "IV messages are split into two parts. In any cycle, a ring stop can see up to one (half-)packet moving in the CW direction and one (half-)packet moving in the CCW direction.", "Umask": "b00000011", }, "R2PCIe.RING_IV_USED.ANY": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.", "Desc": "R2 IV Ring in Use", "EvSel": 10, "ExtSel": "", "Notes": "IV messages are split into two parts. In any cycle, a ring stop can see up to one (half-)packet moving in the CW direction and one (half-)packet moving in the CCW direction.", "Umask": "b00001111", }, "R2PCIe.RING_IV_USED.CCW": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.", "Desc": "R2 IV Ring in Use", "EvSel": 10, "ExtSel": "", "Notes": "IV messages are split into two parts. In any cycle, a ring stop can see up to one (half-)packet moving in the CW direction and one (half-)packet moving in the CCW direction.", "Umask": "b00001100", }, "R2PCIe.RxR_CYCLES_NE": { "Box": "R2PCIe", "Category": "R2PCIe INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the R2PCIe Ingress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Cycles Not Empty", "EvSel": 16, "ExtSel": "", }, "R2PCIe.RxR_CYCLES_NE.NCS": { "Box": "R2PCIe", "Category": "R2PCIe INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the R2PCIe Ingress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Cycles Not Empty", "EvSel": 16, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R2PCIe.RxR_CYCLES_NE.NCB": { "Box": "R2PCIe", "Category": "R2PCIe INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the R2PCIe Ingress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Cycles Not Empty", "EvSel": 16, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R2PCIe.RxR_INSERTS": { "Box": "R2PCIe", "Category": "R2PCIe INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the R2PCIe Ingress. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Allocations", "EvSel": 17, "ExtSel": "", }, "R2PCIe.RxR_INSERTS.NCS": { "Box": "R2PCIe", "Category": "R2PCIe INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the R2PCIe Ingress. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Allocations", "EvSel": 17, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R2PCIe.RxR_INSERTS.NCB": { "Box": "R2PCIe", "Category": "R2PCIe INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the R2PCIe Ingress. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Allocations", "EvSel": 17, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R2PCIe.RxR_OCCUPANCY": { "Box": "R2PCIe", "Category": "R2PCIe INGRESS Events", "Counters": 0, "Defn": "Accumulates the occupancy of a given R2PCIe Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the R2PCIe Ingress Not Empty event to calculate average occupancy or the R2PCIe Ingress Allocations event in order to calculate average queuing latency.", "Desc": "Ingress Occupancy Accumulator", "EvSel": 19, "ExtSel": "", "MaxIncCyc": 24, "SubCtr": 1, }, "R2PCIe.RxR_OCCUPANCY.DRS": { "Box": "R2PCIe", "Category": "R2PCIe INGRESS Events", "Counters": 0, "Defn": "Accumulates the occupancy of a given R2PCIe Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the R2PCIe Ingress Not Empty event to calculate average occupancy or the R2PCIe Ingress Allocations event in order to calculate average queuing latency.", "Desc": "Ingress Occupancy Accumulator", "EvSel": 19, "ExtSel": "", "MaxIncCyc": 24, "SubCtr": 1, "Umask": "b00001000", }, "R2PCIe.SBO0_CREDITS_ACQUIRED": { "Box": "R2PCIe", "Category": "R2PCIe SBO Credit Events", "Counters": "0-1", "Defn": "Number of Sbo 0 credits acquired in a given cycle, per ring.", "Desc": "SBo0 Credits Acquired", "EvSel": 40, "ExtSel": "", "MaxIncCyc": 2, }, "R2PCIe.SBO0_CREDITS_ACQUIRED.AD": { "Box": "R2PCIe", "Category": "R2PCIe SBO Credit Events", "Counters": "0-1", "Defn": "Number of Sbo 0 credits acquired in a given cycle, per ring.", "Desc": "SBo0 Credits Acquired", "EvSel": 40, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxxxxx1", }, "R2PCIe.SBO0_CREDITS_ACQUIRED.BL": { "Box": "R2PCIe", "Category": "R2PCIe SBO Credit Events", "Counters": "0-1", "Defn": "Number of Sbo 0 credits acquired in a given cycle, per ring.", "Desc": "SBo0 Credits Acquired", "EvSel": 40, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxxxx1x", }, "R2PCIe.STALL_NO_SBO_CREDIT": { "Box": "R2PCIe", "Category": "R2PCIe SBO Credit Events", "Counters": "0-1", "Defn": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.", "Desc": "Stall on No Sbo Credits", "EvSel": 44, "ExtSel": "", "MaxIncCyc": 4, }, "R2PCIe.STALL_NO_SBO_CREDIT.SBO0_BL": { "Box": "R2PCIe", "Category": "R2PCIe SBO Credit Events", "Counters": "0-1", "Defn": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.", "Desc": "Stall on No Sbo Credits", "EvSel": 44, "ExtSel": "", "MaxIncCyc": 4, "Umask": "bxxxxx1xx", }, "R2PCIe.STALL_NO_SBO_CREDIT.SBO1_AD": { "Box": "R2PCIe", "Category": "R2PCIe SBO Credit Events", "Counters": "0-1", "Defn": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.", "Desc": "Stall on No Sbo Credits", "EvSel": 44, "ExtSel": "", "MaxIncCyc": 4, "Umask": "bxxxxxx1x", }, "R2PCIe.STALL_NO_SBO_CREDIT.SBO1_BL": { "Box": "R2PCIe", "Category": "R2PCIe SBO Credit Events", "Counters": "0-1", "Defn": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.", "Desc": "Stall on No Sbo Credits", "EvSel": 44, "ExtSel": "", "MaxIncCyc": 4, "Umask": "bxxxx1xxx", }, "R2PCIe.STALL_NO_SBO_CREDIT.SBO0_AD": { "Box": "R2PCIe", "Category": "R2PCIe SBO Credit Events", "Counters": "0-1", "Defn": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.", "Desc": "Stall on No Sbo Credits", "EvSel": 44, "ExtSel": "", "MaxIncCyc": 4, "Umask": "bxxxxxxx1", }, "R2PCIe.TxR_CYCLES_FULL": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": 0, "Defn": "Counts the number of cycles when the R2PCIe Egress buffer is full.", "Desc": "Egress Cycles Full", "EvSel": 37, "ExtSel": "", }, "R2PCIe.TxR_CYCLES_FULL.AD": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": 0, "Defn": "Counts the number of cycles when the R2PCIe Egress buffer is full.", "Desc": "Egress Cycles Full", "EvSel": 37, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R2PCIe.TxR_CYCLES_FULL.BL": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": 0, "Defn": "Counts the number of cycles when the R2PCIe Egress buffer is full.", "Desc": "Egress Cycles Full", "EvSel": 37, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R2PCIe.TxR_CYCLES_FULL.AK": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": 0, "Defn": "Counts the number of cycles when the R2PCIe Egress buffer is full.", "Desc": "Egress Cycles Full", "EvSel": 37, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R2PCIe.TxR_CYCLES_NE": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": 0, "Defn": "Counts the number of cycles when the R2PCIe Egress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "Egress Cycles Not Empty", "EvSel": 35, "ExtSel": "", }, "R2PCIe.TxR_CYCLES_NE.AK": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": 0, "Defn": "Counts the number of cycles when the R2PCIe Egress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "Egress Cycles Not Empty", "EvSel": 35, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R2PCIe.TxR_CYCLES_NE.AD": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": 0, "Defn": "Counts the number of cycles when the R2PCIe Egress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "Egress Cycles Not Empty", "EvSel": 35, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R2PCIe.TxR_CYCLES_NE.BL": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": 0, "Defn": "Counts the number of cycles when the R2PCIe Egress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "Egress Cycles Not Empty", "EvSel": 35, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R2PCIe.TxR_NACK_CW": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": "0-1", "Desc": "Egress CCW NACK", "EvSel": 38, "ExtSel": "", }, "R2PCIe.TxR_NACK_CW.DN_AK": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": "0-1", "Desc": "Egress CCW NACK", "EvSel": 38, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R2PCIe.TxR_NACK_CW.DN_BL": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": "0-1", "Desc": "Egress CCW NACK", "EvSel": 38, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R2PCIe.TxR_NACK_CW.UP_BL": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": "0-1", "Desc": "Egress CCW NACK", "EvSel": 38, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R2PCIe.TxR_NACK_CW.DN_AD": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": "0-1", "Desc": "Egress CCW NACK", "EvSel": 38, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R2PCIe.TxR_NACK_CW.UP_AK": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": "0-1", "Desc": "Egress CCW NACK", "EvSel": 38, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R2PCIe.TxR_NACK_CW.UP_AD": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": "0-1", "Desc": "Egress CCW NACK", "EvSel": 38, "ExtSel": "", "Umask": "bxxxx1xxx", }, # iMC: "iMC.ACT_COUNT": { "Box": "iMC", "Category": "iMC ACT Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.", "Desc": "DRAM Activate Count", "EvSel": 1, "ExtSel": "", }, "iMC.ACT_COUNT.RD": { "Box": "iMC", "Category": "iMC ACT Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.", "Desc": "DRAM Activate Count", "EvSel": 1, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.ACT_COUNT.BYP": { "Box": "iMC", "Category": "iMC ACT Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.", "Desc": "DRAM Activate Count", "EvSel": 1, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.ACT_COUNT.WR": { "Box": "iMC", "Category": "iMC ACT Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.", "Desc": "DRAM Activate Count", "EvSel": 1, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.BYP_CMDS": { "Box": "iMC", "Category": "iMC BYPASS Command Events", "Counters": "0-3", "EvSel": 161, "ExtSel": "", }, "iMC.BYP_CMDS.ACT": { "Box": "iMC", "Category": "iMC BYPASS Command Events", "Counters": "0-3", "EvSel": 161, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.BYP_CMDS.PRE": { "Box": "iMC", "Category": "iMC BYPASS Command Events", "Counters": "0-3", "EvSel": 161, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.BYP_CMDS.CAS": { "Box": "iMC", "Category": "iMC BYPASS Command Events", "Counters": "0-3", "EvSel": 161, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.CAS_COUNT": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", }, "iMC.CAS_COUNT.WR_WMM": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.CAS_COUNT.ALL": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", "Umask": "b00001111", }, "iMC.CAS_COUNT.WR_RMM": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.CAS_COUNT.RD_RMM": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", "Umask": "bxx1xxxxx", }, "iMC.CAS_COUNT.RD": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", "Umask": "b00000011", }, "iMC.CAS_COUNT.RD_REG": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.CAS_COUNT.RD_UNDERFILL": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.CAS_COUNT.RD_WMM": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", "Umask": "bxxx1xxxx", }, "iMC.CAS_COUNT.WR": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", "Umask": "b00001100", }, "iMC.DCLOCKTICKS": { "Box": "iMC", "Category": "iMC DCLK Events", "Counters": "0-3", "Desc": "DRAM Clockticks", "EvSel": 0, "ExtSel": "", }, "iMC.DRAM_PRE_ALL": { "Box": "iMC", "Category": "iMC DRAM_PRE_ALL Events", "Counters": "0-3", "Defn": "Counts the number of times that the precharge all command was sent.", "Desc": "DRAM Precharge All Commands", "EvSel": 6, "ExtSel": "", }, "iMC.DRAM_REFRESH": { "Box": "iMC", "Category": "iMC DRAM_REFRESH Events", "Counters": "0-3", "Defn": "Counts the number of refreshes issued.", "Desc": "Number of DRAM Refreshes Issued", "EvSel": 5, "ExtSel": "", }, "iMC.DRAM_REFRESH.HIGH": { "Box": "iMC", "Category": "iMC DRAM_REFRESH Events", "Counters": "0-3", "Defn": "Counts the number of refreshes issued.", "Desc": "Number of DRAM Refreshes Issued", "EvSel": 5, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.DRAM_REFRESH.PANIC": { "Box": "iMC", "Category": "iMC DRAM_REFRESH Events", "Counters": "0-3", "Defn": "Counts the number of refreshes issued.", "Desc": "Number of DRAM Refreshes Issued", "EvSel": 5, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.ECC_CORRECTABLE_ERRORS": { "Box": "iMC", "Category": "iMC ECC Events", "Counters": "0-3", "Defn": "Counts the number of ECC errors detected and corrected by the iMC on this channel. This counter is only useful with ECC DRAM devices. This count will increment one time for each correction regardless of the number of bits corrected. The iMC can correct up to 4 bit errors in independent channel mode and 8 bit erros in lockstep mode.", "Desc": "ECC Correctable Errors", "EvSel": 9, "ExtSel": "", }, "iMC.MAJOR_MODES": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Defn": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.", "Desc": "Cycles in a Major Mode", "EvSel": 7, "ExtSel": "", }, "iMC.MAJOR_MODES.READ": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Defn": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.", "Desc": "Cycles in a Major Mode", "EvSel": 7, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.MAJOR_MODES.PARTIAL": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Defn": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.", "Desc": "Cycles in a Major Mode", "EvSel": 7, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.MAJOR_MODES.WRITE": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Defn": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.", "Desc": "Cycles in a Major Mode", "EvSel": 7, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.MAJOR_MODES.ISOCH": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Defn": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.", "Desc": "Cycles in a Major Mode", "EvSel": 7, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.POWER_CHANNEL_DLLOFF": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles when all the ranks in the channel are in CKE Slow (DLLOFF) mode.", "Desc": "Channel DLLOFF Cycles", "EvSel": 132, "ExtSel": "", "Notes": "IBT = Input Buffer Termination = Off", }, "iMC.POWER_CHANNEL_PPD": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles when all the ranks in the channel are in PPD mode. If IBT=off is enabled, then this can be used to count those cycles. If it is not enabled, then this can count the number of cycles when that could have been taken advantage of.", "Desc": "Channel PPD Cycles", "EvSel": 133, "ExtSel": "", "MaxIncCyc": 4, "Notes": "IBT = Input Buffer Termination = On. ALL Ranks must be populated in order to measure", }, "iMC.POWER_CKE_CYCLES": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "ExtSel": "", "MaxIncCyc": 16, }, "iMC.POWER_CKE_CYCLES.RANK2": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "ExtSel": "", "MaxIncCyc": 16, "Umask": "b00000100", }, "iMC.POWER_CKE_CYCLES.RANK1": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "ExtSel": "", "MaxIncCyc": 16, "Umask": "b00000010", }, "iMC.POWER_CKE_CYCLES.RANK3": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "ExtSel": "", "MaxIncCyc": 16, "Umask": "b00001000", }, "iMC.POWER_CKE_CYCLES.RANK7": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "ExtSel": "", "MaxIncCyc": 16, "Umask": "b10000000", }, "iMC.POWER_CKE_CYCLES.RANK6": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "ExtSel": "", "MaxIncCyc": 16, "Umask": "b01000000", }, "iMC.POWER_CKE_CYCLES.RANK5": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "ExtSel": "", "MaxIncCyc": 16, "Umask": "b00100000", }, "iMC.POWER_CKE_CYCLES.RANK4": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "ExtSel": "", "MaxIncCyc": 16, "Umask": "b00010000", }, "iMC.POWER_CKE_CYCLES.RANK0": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "ExtSel": "", "MaxIncCyc": 16, "Umask": "b00000001", }, "iMC.POWER_CRITICAL_THROTTLE_CYCLES": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the iMC is in critical thermal throttling. When this happens, all traffic is blocked. This should be rare unless something bad is going on in the platform. There is no filtering by rank for this event.", "Desc": "Critical Throttle Cycles", "EvSel": 134, "ExtSel": "", }, "iMC.POWER_PCU_THROTTLING": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "EvSel": 66, "ExtSel": "", }, "iMC.POWER_SELF_REFRESH": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the iMC is in self-refresh and the iMC still has a clock. This happens in some package C-states. For example, the PCU may ask the iMC to enter self-refresh even though some of the cores are still processing. One use of this is for Monroe technology. Self-refresh is required during package C3 and C6, but there is no clock in the iMC at this time, so it is not possible to count these cases.", "Desc": "Clock-Enabled Self-Refresh", "EvSel": 67, "ExtSel": "", }, "iMC.POWER_THROTTLE_CYCLES": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 65, "ExtSel": "", }, "iMC.POWER_THROTTLE_CYCLES.RANK2": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 65, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.POWER_THROTTLE_CYCLES.RANK1": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 65, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.POWER_THROTTLE_CYCLES.RANK3": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 65, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.POWER_THROTTLE_CYCLES.RANK7": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 65, "ExtSel": "", "Umask": "b1xxxxxxx", }, "iMC.POWER_THROTTLE_CYCLES.RANK6": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 65, "ExtSel": "", "Umask": "bx1xxxxxx", }, "iMC.POWER_THROTTLE_CYCLES.RANK4": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 65, "ExtSel": "", "Umask": "bxxx1xxxx", }, "iMC.POWER_THROTTLE_CYCLES.RANK5": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 65, "ExtSel": "", "Umask": "bxx1xxxxx", }, "iMC.POWER_THROTTLE_CYCLES.RANK0": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 65, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.PREEMPTION": { "Box": "iMC", "Category": "iMC PREEMPTION Events", "Counters": "0-3", "Defn": "Counts the number of times a read in the iMC preempts another read or write. Generally reads to an open page are issued ahead of requests to closed pages. This improves the page hit rate of the system. However, high priority requests can cause pages of active requests to be closed in order to get them out. This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.", "Desc": "Read Preemption Count", "EvSel": 8, "ExtSel": "", }, "iMC.PREEMPTION.RD_PREEMPT_RD": { "Box": "iMC", "Category": "iMC PREEMPTION Events", "Counters": "0-3", "Defn": "Counts the number of times a read in the iMC preempts another read or write. Generally reads to an open page are issued ahead of requests to closed pages. This improves the page hit rate of the system. However, high priority requests can cause pages of active requests to be closed in order to get them out. This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.", "Desc": "Read Preemption Count", "EvSel": 8, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.PREEMPTION.RD_PREEMPT_WR": { "Box": "iMC", "Category": "iMC PREEMPTION Events", "Counters": "0-3", "Defn": "Counts the number of times a read in the iMC preempts another read or write. Generally reads to an open page are issued ahead of requests to closed pages. This improves the page hit rate of the system. However, high priority requests can cause pages of active requests to be closed in order to get them out. This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.", "Desc": "Read Preemption Count", "EvSel": 8, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.PRE_COUNT": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Precharge commands sent on this channel.", "Desc": "DRAM Precharge commands.", "EvSel": 2, "ExtSel": "", }, "iMC.PRE_COUNT.PAGE_MISS": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Precharge commands sent on this channel.", "Desc": "DRAM Precharge commands.", "EvSel": 2, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.PRE_COUNT.WR": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Precharge commands sent on this channel.", "Desc": "DRAM Precharge commands.", "EvSel": 2, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.PRE_COUNT.BYP": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Precharge commands sent on this channel.", "Desc": "DRAM Precharge commands.", "EvSel": 2, "ExtSel": "", "Umask": "bxxx1xxxx", }, "iMC.PRE_COUNT.RD": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Precharge commands sent on this channel.", "Desc": "DRAM Precharge commands.", "EvSel": 2, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.PRE_COUNT.PAGE_CLOSE": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Precharge commands sent on this channel.", "Desc": "DRAM Precharge commands.", "EvSel": 2, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.RD_CAS_PRIO": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "EvSel": 160, "ExtSel": "", }, "iMC.RD_CAS_PRIO.MED": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "EvSel": 160, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.RD_CAS_PRIO.LOW": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "EvSel": 160, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.RD_CAS_PRIO.PANIC": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "EvSel": 160, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.RD_CAS_PRIO.HIGH": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "EvSel": 160, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.RD_CAS_RANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", }, "iMC.RD_CAS_RANK0.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00010100", }, "iMC.RD_CAS_RANK0.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00000110", }, "iMC.RD_CAS_RANK0.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00000111", }, "iMC.RD_CAS_RANK0.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00000010", }, "iMC.RD_CAS_RANK0.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00010011", }, "iMC.RD_CAS_RANK0.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00001001", }, "iMC.RD_CAS_RANK0.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00001100", }, "iMC.RD_CAS_RANK0.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00001010", }, "iMC.RD_CAS_RANK0.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00010000", }, "iMC.RD_CAS_RANK0.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00001111", }, "iMC.RD_CAS_RANK0.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00010001", }, "iMC.RD_CAS_RANK0.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00001011", }, "iMC.RD_CAS_RANK0.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00000101", }, "iMC.RD_CAS_RANK0.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00000100", }, "iMC.RD_CAS_RANK0.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00001101", }, "iMC.RD_CAS_RANK0.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00000011", }, "iMC.RD_CAS_RANK0.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00000000", }, "iMC.RD_CAS_RANK0.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00000001", }, "iMC.RD_CAS_RANK0.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00001000", }, "iMC.RD_CAS_RANK0.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00010010", }, "iMC.RD_CAS_RANK0.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00001110", }, "iMC.RD_CAS_RANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", }, "iMC.RD_CAS_RANK1.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00010100", }, "iMC.RD_CAS_RANK1.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00000110", }, "iMC.RD_CAS_RANK1.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00000111", }, "iMC.RD_CAS_RANK1.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00000010", }, "iMC.RD_CAS_RANK1.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00001001", }, "iMC.RD_CAS_RANK1.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00010011", }, "iMC.RD_CAS_RANK1.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00001100", }, "iMC.RD_CAS_RANK1.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00001010", }, "iMC.RD_CAS_RANK1.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00010000", }, "iMC.RD_CAS_RANK1.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00001111", }, "iMC.RD_CAS_RANK1.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00010001", }, "iMC.RD_CAS_RANK1.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00001011", }, "iMC.RD_CAS_RANK1.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00000101", }, "iMC.RD_CAS_RANK1.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00000100", }, "iMC.RD_CAS_RANK1.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00000011", }, "iMC.RD_CAS_RANK1.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00001101", }, "iMC.RD_CAS_RANK1.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00000000", }, "iMC.RD_CAS_RANK1.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00001000", }, "iMC.RD_CAS_RANK1.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00000001", }, "iMC.RD_CAS_RANK1.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00010010", }, "iMC.RD_CAS_RANK1.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00001110", }, "iMC.RD_CAS_RANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 2", "EvSel": 178, "ExtSel": "", }, "iMC.RD_CAS_RANK2.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 2", "EvSel": 178, "ExtSel": "", "Umask": "b00000000", }, "iMC.RD_CAS_RANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", }, "iMC.RD_CAS_RANK4.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00010011", }, "iMC.RD_CAS_RANK4.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00001100", }, "iMC.RD_CAS_RANK4.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00001001", }, "iMC.RD_CAS_RANK4.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00000111", }, "iMC.RD_CAS_RANK4.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00000010", }, "iMC.RD_CAS_RANK4.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00000110", }, "iMC.RD_CAS_RANK4.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00010100", }, "iMC.RD_CAS_RANK4.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00001110", }, "iMC.RD_CAS_RANK4.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00010010", }, "iMC.RD_CAS_RANK4.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00001000", }, "iMC.RD_CAS_RANK4.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00000001", }, "iMC.RD_CAS_RANK4.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00000100", }, "iMC.RD_CAS_RANK4.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00001101", }, "iMC.RD_CAS_RANK4.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00000011", }, "iMC.RD_CAS_RANK4.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00000000", }, "iMC.RD_CAS_RANK4.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00000101", }, "iMC.RD_CAS_RANK4.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00001111", }, "iMC.RD_CAS_RANK4.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00001011", }, "iMC.RD_CAS_RANK4.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00010001", }, "iMC.RD_CAS_RANK4.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00001010", }, "iMC.RD_CAS_RANK4.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00010000", }, "iMC.RD_CAS_RANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", }, "iMC.RD_CAS_RANK5.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00000010", }, "iMC.RD_CAS_RANK5.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00000111", }, "iMC.RD_CAS_RANK5.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00010011", }, "iMC.RD_CAS_RANK5.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00001100", }, "iMC.RD_CAS_RANK5.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00001001", }, "iMC.RD_CAS_RANK5.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00010100", }, "iMC.RD_CAS_RANK5.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00000110", }, "iMC.RD_CAS_RANK5.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00001101", }, "iMC.RD_CAS_RANK5.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00000011", }, "iMC.RD_CAS_RANK5.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00000000", }, "iMC.RD_CAS_RANK5.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00000100", }, "iMC.RD_CAS_RANK5.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00000001", }, "iMC.RD_CAS_RANK5.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00001000", }, "iMC.RD_CAS_RANK5.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00010010", }, "iMC.RD_CAS_RANK5.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00001110", }, "iMC.RD_CAS_RANK5.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00010000", }, "iMC.RD_CAS_RANK5.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00001010", }, "iMC.RD_CAS_RANK5.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00001011", }, "iMC.RD_CAS_RANK5.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00010001", }, "iMC.RD_CAS_RANK5.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00001111", }, "iMC.RD_CAS_RANK5.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00000101", }, "iMC.RD_CAS_RANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", }, "iMC.RD_CAS_RANK6.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00000110", }, "iMC.RD_CAS_RANK6.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00010100", }, "iMC.RD_CAS_RANK6.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00010011", }, "iMC.RD_CAS_RANK6.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00001100", }, "iMC.RD_CAS_RANK6.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00001001", }, "iMC.RD_CAS_RANK6.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00000010", }, "iMC.RD_CAS_RANK6.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00000111", }, "iMC.RD_CAS_RANK6.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00010001", }, "iMC.RD_CAS_RANK6.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00001011", }, "iMC.RD_CAS_RANK6.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00001111", }, "iMC.RD_CAS_RANK6.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00010000", }, "iMC.RD_CAS_RANK6.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00001010", }, "iMC.RD_CAS_RANK6.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00000101", }, "iMC.RD_CAS_RANK6.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00001000", }, "iMC.RD_CAS_RANK6.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00000001", }, "iMC.RD_CAS_RANK6.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00001101", }, "iMC.RD_CAS_RANK6.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00000000", }, "iMC.RD_CAS_RANK6.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00000011", }, "iMC.RD_CAS_RANK6.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00000100", }, "iMC.RD_CAS_RANK6.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00001110", }, "iMC.RD_CAS_RANK6.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00010010", }, "iMC.RD_CAS_RANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", }, "iMC.RD_CAS_RANK7.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00010010", }, "iMC.RD_CAS_RANK7.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00001110", }, "iMC.RD_CAS_RANK7.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00000100", }, "iMC.RD_CAS_RANK7.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00000011", }, "iMC.RD_CAS_RANK7.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00001101", }, "iMC.RD_CAS_RANK7.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00000000", }, "iMC.RD_CAS_RANK7.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00000001", }, "iMC.RD_CAS_RANK7.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00001000", }, "iMC.RD_CAS_RANK7.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00000101", }, "iMC.RD_CAS_RANK7.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00001010", }, "iMC.RD_CAS_RANK7.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00010000", }, "iMC.RD_CAS_RANK7.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00001111", }, "iMC.RD_CAS_RANK7.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00010001", }, "iMC.RD_CAS_RANK7.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00001011", }, "iMC.RD_CAS_RANK7.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00000111", }, "iMC.RD_CAS_RANK7.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00000010", }, "iMC.RD_CAS_RANK7.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00010011", }, "iMC.RD_CAS_RANK7.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00001001", }, "iMC.RD_CAS_RANK7.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00001100", }, "iMC.RD_CAS_RANK7.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00010100", }, "iMC.RD_CAS_RANK7.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00000110", }, "iMC.RPQ_CYCLES_NE": { "Box": "iMC", "Category": "iMC RPQ Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the Read Pending Queue is not empty. This can then be used to calculate the average occupancy (in conjunction with the Read Pending Queue Occupancy count). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This filter is to be used in conjunction with the occupancy filter so that one can correctly track the average occupancies for schedulable entries and scheduled requests.", "Desc": "Read Pending Queue Not Empty", "EvSel": 17, "ExtSel": "", }, "iMC.RPQ_INSERTS": { "Box": "iMC", "Category": "iMC RPQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.", "Desc": "Read Pending Queue Allocations", "EvSel": 16, "ExtSel": "", }, "iMC.VMSE_MXB_WR_OCCUPANCY": { "Box": "iMC", "Category": "iMC VMSE Events", "Counters": "0-3", "Desc": "VMSE MXB write buffer occupancy", "EvSel": 145, "ExtSel": "", "MaxIncCyc": 32, "SubCtr": 1, }, "iMC.VMSE_WR_PUSH": { "Box": "iMC", "Category": "iMC VMSE Events", "Counters": "0-3", "Desc": "VMSE WR PUSH issued", "EvSel": 144, "ExtSel": "", }, "iMC.VMSE_WR_PUSH.RMM": { "Box": "iMC", "Category": "iMC VMSE Events", "Counters": "0-3", "Desc": "VMSE WR PUSH issued", "EvSel": 144, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.VMSE_WR_PUSH.WMM": { "Box": "iMC", "Category": "iMC VMSE Events", "Counters": "0-3", "Desc": "VMSE WR PUSH issued", "EvSel": 144, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.WMM_TO_RMM": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Desc": "Transition from WMM to RMM because of low threshold", "EvSel": 192, "ExtSel": "", }, "iMC.WMM_TO_RMM.LOW_THRESH": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Desc": "Transition from WMM to RMM because of low threshold", "EvSel": 192, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.WMM_TO_RMM.VMSE_RETRY": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Desc": "Transition from WMM to RMM because of low threshold", "EvSel": 192, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.WMM_TO_RMM.STARVE": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Desc": "Transition from WMM to RMM because of low threshold", "EvSel": 192, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.WPQ_CYCLES_FULL": { "Box": "iMC", "Category": "iMC WPQ Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the Write Pending Queue is full. When the WPQ is full, the HA will not be able to issue any additional read requests into the iMC. This count should be similar count in the HA which tracks the number of cycles that the HA has no WPQ credits, just somewhat smaller to account for the credit return overhead.", "Desc": "Write Pending Queue Full Cycles", "EvSel": 34, "ExtSel": "", }, "iMC.WPQ_CYCLES_NE": { "Box": "iMC", "Category": "iMC WPQ Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the Write Pending Queue is not empty. This can then be used to calculate the average queue occupancy (in conjunction with the WPQ Occupancy Accumulation count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have \"posted\" to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies.", "Desc": "Write Pending Queue Not Empty", "EvSel": 33, "ExtSel": "", }, "iMC.WPQ_READ_HIT": { "Box": "iMC", "Category": "iMC WPQ Events", "Counters": "0-3", "Defn": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.", "Desc": "Write Pending Queue CAM Match", "EvSel": 35, "ExtSel": "", }, "iMC.WPQ_WRITE_HIT": { "Box": "iMC", "Category": "iMC WPQ Events", "Counters": "0-3", "Defn": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.", "Desc": "Write Pending Queue CAM Match", "EvSel": 36, "ExtSel": "", }, "iMC.WRONG_MM": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Desc": "Not getting the requested Major Mode", "EvSel": 193, "ExtSel": "", }, "iMC.WR_CAS_RANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", }, "iMC.WR_CAS_RANK0.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00000101", }, "iMC.WR_CAS_RANK0.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00001011", }, "iMC.WR_CAS_RANK0.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00010001", }, "iMC.WR_CAS_RANK0.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00001111", }, "iMC.WR_CAS_RANK0.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00010000", }, "iMC.WR_CAS_RANK0.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00001010", }, "iMC.WR_CAS_RANK0.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00001110", }, "iMC.WR_CAS_RANK0.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00010010", }, "iMC.WR_CAS_RANK0.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00000001", }, "iMC.WR_CAS_RANK0.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00001000", }, "iMC.WR_CAS_RANK0.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00000000", }, "iMC.WR_CAS_RANK0.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00001101", }, "iMC.WR_CAS_RANK0.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00000011", }, "iMC.WR_CAS_RANK0.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00000100", }, "iMC.WR_CAS_RANK0.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00010100", }, "iMC.WR_CAS_RANK0.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00000110", }, "iMC.WR_CAS_RANK0.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00010011", }, "iMC.WR_CAS_RANK0.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00001100", }, "iMC.WR_CAS_RANK0.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00001001", }, "iMC.WR_CAS_RANK0.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00000010", }, "iMC.WR_CAS_RANK0.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00000111", }, "iMC.WR_CAS_RANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", }, "iMC.WR_CAS_RANK1.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00000110", }, "iMC.WR_CAS_RANK1.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00010100", }, "iMC.WR_CAS_RANK1.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00000111", }, "iMC.WR_CAS_RANK1.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00000010", }, "iMC.WR_CAS_RANK1.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00010011", }, "iMC.WR_CAS_RANK1.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00001001", }, "iMC.WR_CAS_RANK1.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00001100", }, "iMC.WR_CAS_RANK1.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00000101", }, "iMC.WR_CAS_RANK1.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00001010", }, "iMC.WR_CAS_RANK1.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00010000", }, "iMC.WR_CAS_RANK1.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00001111", }, "iMC.WR_CAS_RANK1.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00010001", }, "iMC.WR_CAS_RANK1.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00001011", }, "iMC.WR_CAS_RANK1.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00010010", }, "iMC.WR_CAS_RANK1.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00001110", }, "iMC.WR_CAS_RANK1.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00000100", }, "iMC.WR_CAS_RANK1.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00001101", }, "iMC.WR_CAS_RANK1.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00000000", }, "iMC.WR_CAS_RANK1.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00000011", }, "iMC.WR_CAS_RANK1.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00001000", }, "iMC.WR_CAS_RANK1.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00000001", }, "iMC.WR_CAS_RANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 2", "EvSel": 186, "ExtSel": "", }, "iMC.WR_CAS_RANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 3", "EvSel": 187, "ExtSel": "", }, "iMC.WR_CAS_RANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", }, "iMC.WR_CAS_RANK4.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00010100", }, "iMC.WR_CAS_RANK4.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00000110", }, "iMC.WR_CAS_RANK4.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00000010", }, "iMC.WR_CAS_RANK4.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00000111", }, "iMC.WR_CAS_RANK4.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00010011", }, "iMC.WR_CAS_RANK4.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00001001", }, "iMC.WR_CAS_RANK4.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00001100", }, "iMC.WR_CAS_RANK4.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00010000", }, "iMC.WR_CAS_RANK4.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00001010", }, "iMC.WR_CAS_RANK4.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00010001", }, "iMC.WR_CAS_RANK4.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00001011", }, "iMC.WR_CAS_RANK4.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00001111", }, "iMC.WR_CAS_RANK4.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00000101", }, "iMC.WR_CAS_RANK4.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00001101", }, "iMC.WR_CAS_RANK4.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00000000", }, "iMC.WR_CAS_RANK4.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00000011", }, "iMC.WR_CAS_RANK4.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00000100", }, "iMC.WR_CAS_RANK4.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00000001", }, "iMC.WR_CAS_RANK4.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00001000", }, "iMC.WR_CAS_RANK4.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00010010", }, "iMC.WR_CAS_RANK4.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00001110", }, "iMC.WR_CAS_RANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", }, "iMC.WR_CAS_RANK5.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00000110", }, "iMC.WR_CAS_RANK5.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00010100", }, "iMC.WR_CAS_RANK5.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00000010", }, "iMC.WR_CAS_RANK5.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00000111", }, "iMC.WR_CAS_RANK5.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00010011", }, "iMC.WR_CAS_RANK5.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00001001", }, "iMC.WR_CAS_RANK5.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00001100", }, "iMC.WR_CAS_RANK5.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00000101", }, "iMC.WR_CAS_RANK5.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00010000", }, "iMC.WR_CAS_RANK5.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00001010", }, "iMC.WR_CAS_RANK5.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00001011", }, "iMC.WR_CAS_RANK5.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00010001", }, "iMC.WR_CAS_RANK5.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00001111", }, "iMC.WR_CAS_RANK5.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00010010", }, "iMC.WR_CAS_RANK5.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00001110", }, "iMC.WR_CAS_RANK5.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00001101", }, "iMC.WR_CAS_RANK5.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00000000", }, "iMC.WR_CAS_RANK5.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00000011", }, "iMC.WR_CAS_RANK5.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00000100", }, "iMC.WR_CAS_RANK5.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00000001", }, "iMC.WR_CAS_RANK5.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00001000", }, "iMC.WR_CAS_RANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", }, "iMC.WR_CAS_RANK6.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00010010", }, "iMC.WR_CAS_RANK6.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00001110", }, "iMC.WR_CAS_RANK6.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00000011", }, "iMC.WR_CAS_RANK6.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00001101", }, "iMC.WR_CAS_RANK6.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00000000", }, "iMC.WR_CAS_RANK6.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00000100", }, "iMC.WR_CAS_RANK6.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00000001", }, "iMC.WR_CAS_RANK6.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00001000", }, "iMC.WR_CAS_RANK6.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00000101", }, "iMC.WR_CAS_RANK6.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00010000", }, "iMC.WR_CAS_RANK6.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00001010", }, "iMC.WR_CAS_RANK6.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00010001", }, "iMC.WR_CAS_RANK6.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00001011", }, "iMC.WR_CAS_RANK6.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00001111", }, "iMC.WR_CAS_RANK6.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00000010", }, "iMC.WR_CAS_RANK6.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00000111", }, "iMC.WR_CAS_RANK6.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00001100", }, "iMC.WR_CAS_RANK6.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00010011", }, "iMC.WR_CAS_RANK6.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00001001", }, "iMC.WR_CAS_RANK6.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00010100", }, "iMC.WR_CAS_RANK6.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00000110", }, "iMC.WR_CAS_RANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", }, "iMC.WR_CAS_RANK7.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00000111", }, "iMC.WR_CAS_RANK7.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00000010", }, "iMC.WR_CAS_RANK7.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00010011", }, "iMC.WR_CAS_RANK7.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00001100", }, "iMC.WR_CAS_RANK7.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00001001", }, "iMC.WR_CAS_RANK7.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00010100", }, "iMC.WR_CAS_RANK7.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00000110", }, "iMC.WR_CAS_RANK7.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00000100", }, "iMC.WR_CAS_RANK7.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00001101", }, "iMC.WR_CAS_RANK7.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00000011", }, "iMC.WR_CAS_RANK7.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00000000", }, "iMC.WR_CAS_RANK7.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00000001", }, "iMC.WR_CAS_RANK7.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00001000", }, "iMC.WR_CAS_RANK7.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00010010", }, "iMC.WR_CAS_RANK7.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00001110", }, "iMC.WR_CAS_RANK7.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00001010", }, "iMC.WR_CAS_RANK7.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00010000", }, "iMC.WR_CAS_RANK7.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00001111", }, "iMC.WR_CAS_RANK7.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00001011", }, "iMC.WR_CAS_RANK7.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00010001", }, "iMC.WR_CAS_RANK7.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00000101", }, # PCU: "PCU.CLOCKTICKS": { "Box": "PCU", "Category": "PCU PCLK Events", "Counters": "0-3", "Defn": "The PCU runs off a fixed 1 GHz clock. This event counts the number of pclk cycles measured while the counter was enabled. The pclk, like the Memory Controller's dclk, counts at a constant rate making it a good measure of actual wall time.", "Desc": "pclk Cycles", "EvSel": 0, "ExtSel": "", }, "PCU.CORE0_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 96, "ExtSel": "", "Notes": "This only tracks the hardware portion in the RCFSM (CFCFSM). This portion is just doing the core C state transition. It does not include any necessary frequency/voltage transitions.", }, "PCU.CORE10_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 106, "ExtSel": "", }, "PCU.CORE11_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 107, "ExtSel": "", }, "PCU.CORE12_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 108, "ExtSel": "", }, "PCU.CORE13_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 109, "ExtSel": "", }, "PCU.CORE14_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 110, "ExtSel": "", }, "PCU.CORE15_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 111, "ExtSel": "", }, "PCU.CORE16_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 112, "ExtSel": "", }, "PCU.CORE17_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 113, "ExtSel": "", }, "PCU.CORE1_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 97, "ExtSel": "", }, "PCU.CORE2_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 98, "ExtSel": "", }, "PCU.CORE3_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 99, "ExtSel": "", }, "PCU.CORE4_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 100, "ExtSel": "", }, "PCU.CORE5_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 101, "ExtSel": "", }, "PCU.CORE6_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 102, "ExtSel": "", }, "PCU.CORE7_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 103, "ExtSel": "", }, "PCU.CORE8_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 104, "ExtSel": "", "Notes": "This only tracks the hardware portion in the RCFSM (CFCFSM). This portion is just doing the core C state transition. It does not include any necessary frequency/voltage transitions.", }, "PCU.CORE9_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 105, "ExtSel": "", }, "PCU.DEMOTIONS_CORE0": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 48, "ExtSel": "", }, "PCU.DEMOTIONS_CORE1": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 49, "ExtSel": "", }, "PCU.DEMOTIONS_CORE10": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 58, "ExtSel": "", }, "PCU.DEMOTIONS_CORE11": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 59, "ExtSel": "", }, "PCU.DEMOTIONS_CORE12": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 60, "ExtSel": "", }, "PCU.DEMOTIONS_CORE13": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 61, "ExtSel": "", }, "PCU.DEMOTIONS_CORE14": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 62, "ExtSel": "", }, "PCU.DEMOTIONS_CORE15": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 63, "ExtSel": "", }, "PCU.DEMOTIONS_CORE16": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 64, "ExtSel": "", }, "PCU.DEMOTIONS_CORE17": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 65, "ExtSel": "", }, "PCU.DEMOTIONS_CORE2": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 50, "ExtSel": "", }, "PCU.DEMOTIONS_CORE3": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 51, "ExtSel": "", }, "PCU.DEMOTIONS_CORE4": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 52, "ExtSel": "", }, "PCU.DEMOTIONS_CORE5": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 53, "ExtSel": "", }, "PCU.DEMOTIONS_CORE6": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 54, "ExtSel": "", }, "PCU.DEMOTIONS_CORE7": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 55, "ExtSel": "", }, "PCU.DEMOTIONS_CORE8": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 56, "ExtSel": "", }, "PCU.DEMOTIONS_CORE9": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 57, "ExtSel": "", }, "PCU.FREQ_MAX_LIMIT_THERMAL_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_MAX_LIMIT Events", "Counters": "0-3", "Defn": "Counts the number of cycles when thermal conditions are the upper limit on frequency. This is related to the THERMAL_THROTTLE CYCLES_ABOVE_TEMP event, which always counts cycles when we are above the thermal temperature. This event (STRONGEST_UPPER_LIMIT) is sampled at the output of the algorithm that determines the actual frequency, while THERMAL_THROTTLE looks at the input.", "Desc": "Thermal Strongest Upper Limit Cycles", "EvSel": 4, "ExtSel": "", }, "PCU.FREQ_MAX_OS_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_MAX_LIMIT Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the OS is the upper limit on frequency.", "Desc": "OS Strongest Upper Limit Cycles", "EvSel": 6, "ExtSel": "", "Notes": "Essentially, this event says the OS is getting the frequency it requested.", }, "PCU.FREQ_MAX_POWER_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_MAX_LIMIT Events", "Counters": "0-3", "Defn": "Counts the number of cycles when power is the upper limit on frequency.", "Desc": "Power Strongest Upper Limit Cycles", "EvSel": 5, "ExtSel": "", }, "PCU.FREQ_MIN_IO_P_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_MIN_LIMIT Events", "Counters": "0-3", "Defn": "Counts the number of cycles when IO P Limit is preventing us from dropping the frequency lower. This algorithm monitors the needs to the IO subsystem on both local and remote sockets and will maintain a frequency high enough to maintain good IO BW. This is necessary for when all the IA cores on a socket are idle but a user still would like to maintain high IO Bandwidth.", "Desc": "IO P Limit Strongest Lower Limit Cycles", "EvSel": 115, "ExtSel": "", }, "PCU.FREQ_TRANS_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_TRANS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the system is changing frequency. This can not be filtered by thread ID. One can also use it with the occupancy counter that monitors number of threads in C0 to estimate the performance impact that frequency transitions had on the system.", "Desc": "Cycles spent changing Frequency", "EvSel": 116, "ExtSel": "", }, "PCU.MEMORY_PHASE_SHEDDING_CYCLES": { "Box": "PCU", "Category": "PCU MEMORY_PHASE_SHEDDING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the PCU has triggered memory phase shedding. This is a mode that can be run in the iMC physicals that saves power at the expense of additional latency.", "Desc": "Memory Phase Shedding Cycles", "EvSel": 47, "ExtSel": "", "Notes": "Package C1", }, "PCU.PKG_RESIDENCY_C0_CYCLES": { "Box": "PCU", "Category": "PCU PKG_C_STATE_RESIDENCY Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the package was in C0. This event can be used in conjunction with edge detect to count C0 entrances (or exits using invert). Residency events do not include transition times.", "Desc": "Package C State Residency - C0", "EvSel": 42, "ExtSel": "", }, "PCU.PKG_RESIDENCY_C1E_CYCLES": { "Box": "PCU", "Category": "PCU PKG_C_STATE_RESIDENCY Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the package was in C1E. This event can be used in conjunction with edge detect to count C1E entrances (or exits using invert). Residency events do not include transition times.", "Desc": "Package C State Residency - C1E", "EvSel": 78, "ExtSel": "", }, "PCU.PKG_RESIDENCY_C2E_CYCLES": { "Box": "PCU", "Category": "PCU PKG_C_STATE_RESIDENCY Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the package was in C2E. This event can be used in conjunction with edge detect to count C2E entrances (or exits using invert). Residency events do not include transition times.", "Desc": "Package C State Residency - C2E", "EvSel": 43, "ExtSel": "", }, "PCU.PKG_RESIDENCY_C3_CYCLES": { "Box": "PCU", "Category": "PCU PKG_C_STATE_RESIDENCY Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the package was in C3. This event can be used in conjunction with edge detect to count C3 entrances (or exits using invert). Residency events do not include transition times.", "Desc": "Package C State Residency - C3", "EvSel": 44, "ExtSel": "", }, "PCU.PKG_RESIDENCY_C6_CYCLES": { "Box": "PCU", "Category": "PCU PKG_C_STATE_RESIDENCY Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the package was in C6. This event can be used in conjunction with edge detect to count C6 entrances (or exits using invert). Residency events do not include transition times.", "Desc": "Package C State Residency - C6", "EvSel": 45, "ExtSel": "", }, "PCU.PKG_RESIDENCY_C7_CYCLES": { "Box": "PCU", "Category": "PCU PKG_C_STATE_RESIDENCY Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the package was in C7. This event can be used in conjunction with edge detect to count C7 entrances (or exits using invert). Residency events do not include transition times.", "Desc": "Package C7 State Residency", "EvSel": 46, "ExtSel": "", }, "PCU.POWER_STATE_OCCUPANCY": { "Box": "PCU", "Category": "PCU POWER_STATE_OCC Events", "Counters": "0-3", "Defn": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", "Desc": "Number of cores in C-State", "EvSel": 128, "ExtSel": "", "MaxIncCyc": 8, "SubCtr": 1, }, "PCU.POWER_STATE_OCCUPANCY.CORES_C3": { "Box": "PCU", "Category": "PCU POWER_STATE_OCC Events", "Counters": "0-3", "Defn": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", "Desc": "Number of cores in C-State", "EvSel": 128, "ExtSel": "", "MaxIncCyc": 8, "SubCtr": 1, "Umask": "b10000000", }, "PCU.POWER_STATE_OCCUPANCY.CORES_C6": { "Box": "PCU", "Category": "PCU POWER_STATE_OCC Events", "Counters": "0-3", "Defn": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", "Desc": "Number of cores in C-State", "EvSel": 128, "ExtSel": "", "MaxIncCyc": 8, "SubCtr": 1, "Umask": "b11000000", }, "PCU.POWER_STATE_OCCUPANCY.CORES_C0": { "Box": "PCU", "Category": "PCU POWER_STATE_OCC Events", "Counters": "0-3", "Defn": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", "Desc": "Number of cores in C-State", "EvSel": 128, "ExtSel": "", "MaxIncCyc": 8, "SubCtr": 1, "Umask": "b01000000", }, "PCU.PROCHOT_EXTERNAL_CYCLES": { "Box": "PCU", "Category": "PCU PROCHOT Events", "Counters": "0-3", "Defn": "Counts the number of cycles that we are in external PROCHOT mode. This mode is triggered when a sensor off the die determines that something off-die (like DRAM) is too hot and must throttle to avoid damaging the chip.", "Desc": "External Prochot", "EvSel": 10, "ExtSel": "", }, "PCU.PROCHOT_INTERNAL_CYCLES": { "Box": "PCU", "Category": "PCU PROCHOT Events", "Counters": "0-3", "Defn": "Counts the number of cycles that we are in Interal PROCHOT mode. This mode is triggered when a sensor on the die determines that we are too hot and must throttle to avoid damaging the chip.", "Desc": "Internal Prochot", "EvSel": 9, "ExtSel": "", }, "PCU.TOTAL_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions across all cores.", "Desc": "Total Core C State Transition Cycles", "EvSel": 114, "ExtSel": "", }, "PCU.UFS_TRANSITIONS_RING_GV": { "Box": "PCU", "Category": "PCU UFS Events", "Counters": "0-3", "Defn": "Ring GV with same final and initial frequency", "EvSel": 121, "ExtSel": "", }, "PCU.VR_HOT_CYCLES": { "Box": "PCU", "Category": "PCU VR_HOT Events", "Counters": "0-3", "Desc": "VR Hot", "EvSel": 66, "ExtSel": "", }, } derived = { # PCU: "PCU.PCT_CYC_FREQ_OS_LTD": { "Box": "PCU", "Category": "PCU FREQ_MAX_LIMIT Events", "Defn": "Percentage of Cycles the Max Frequency is limited by the OS", "Desc": "Percent Frequency OS Limited", "Equation": "FREQ_MAX_OS_CYCLES / CLOCKTICKS", }, "PCU.PCT_CYC_FREQ_POWER_LTD": { "Box": "PCU", "Category": "PCU FREQ_MAX_LIMIT Events", "Defn": "Percentage of Cycles the Max Frequency is limited by power", "Desc": "Percent Frequency Power Limited", "Equation": "FREQ_MAX_POWER_CYCLES / CLOCKTICKS", }, "PCU.PCT_CYC_FREQ_THERMAL_LTD": { "Box": "PCU", "Category": "PCU FREQ_MAX_LIMIT Events", "Defn": "Percentage of Cycles the Max Frequency is limited by thermal issues", "Desc": "Percent Frequency Thermal Limited", "Equation": "FREQ_MAX_LIMIT_THERMAL_CYCLES / CLOCKTICKS", }, # R2PCIe: "R2PCIe.CYC_USED_DN": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Defn": "Cycles Used in the Down direction, Even polarity", "Desc": "Cycles Used Down and Even", "Equation": "RING_BL_USED.CCW / SAMPLE_INTERVAL", }, "R2PCIe.CYC_USED_UP": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Defn": "Cycles Used in the Up direction, Even polarity", "Desc": "Cycles Used Up and Even", "Equation": "RING_BL_USED.CW / SAMPLE_INTERVAL", }, "R2PCIe.RING_THRU_DN_BYTES": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Defn": "Ring throughput in the Down direction, Even polarity in Bytes", "Desc": "Ring Throughput Down and Even", "Equation": "RING_BL_USED.CCW* 32", }, "R2PCIe.RING_THRU_UP_BYTES": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Defn": "Ring throughput in the Up direction, Even polarity in Bytes", "Desc": "Ring Throughput Up and Even", "Equation": "RING_BL_USED.CW * 32", }, # iMC: "iMC.MEM_BW_READS": { "Box": "iMC", "Category": "iMC CAS Events", "Defn": "Memory bandwidth consumed by reads. Expressed in bytes.", "Desc": "Read Memory Bandwidth", "Equation": "(CAS_COUNT.RD * 64)", }, "iMC.MEM_BW_TOTAL": { "Box": "iMC", "Category": "iMC CAS Events", "Defn": "Total memory bandwidth. Expressed in bytes.", "Desc": "Total Memory Bandwidth", "Equation": "MEM_BW_READS + MEM_BW_WRITES", }, "iMC.MEM_BW_WRITES": { "Box": "iMC", "Category": "iMC CAS Events", "Defn": "Memory bandwidth consumed by writes Expressed in bytes.", "Desc": "Write Memory Bandwidth", "Equation": "(CAS_COUNT.WR * 64)", }, "iMC.PCT_CYCLES_CRITICAL_THROTTLE": { "Box": "iMC", "Category": "iMC POWER Events", "Defn": "The percentage of cycles all DRAM ranks in critical thermal throttling", "Desc": "Percent Cycles Critical Throttle", "Equation": "POWER_CRITICAL_THROTTLE_CYCLES / MC_Chy_PCI_PMON_CTR_FIXED", }, "iMC.PCT_CYCLES_DLLOFF": { "Box": "iMC", "Category": "iMC POWER Events", "Defn": "The percentage of cycles all DRAM ranks in CKE slow (DLOFF) mode", "Desc": "Percent Cycles DLOFF", "Equation": "POWER_CHANNEL_DLLOFF / MC_Chy_PCI_PMON_CTR_FIXED", }, "iMC.PCT_CYCLES_DRAM_RANKx_IN_CKE": { "Box": "iMC", "Category": "iMC POWER Events", "Defn": "The percentage of cycles DRAM rank (x) spent in CKE ON mode.", "Desc": "Percent Cycles DRAM Rank x in CKE", "Equation": "POWER_CKE_CYCLES.RANKx / MC_Chy_PCI_PMON_CTR_FIXED", }, "iMC.PCT_CYCLES_DRAM_RANKx_IN_THR": { "Box": "iMC", "Category": "iMC POWER Events", "Defn": "The percentage of cycles DRAM rank (x) spent in thermal throttling.", "Desc": "Percent Cycles DRAM Rank x in CKE", "Equation": "POWER_THROTTLE_CYCLES.RANKx / MC_Chy_PCI_PMON_CTR_FIXED", }, "iMC.PCT_CYCLES_PPD": { "Box": "iMC", "Category": "iMC POWER Events", "Defn": "The percentage of cycles all DRAM ranks in PPD mode", "Desc": "Percent Cycles PPD", "Equation": "POWER_CHANNEL_PPD / MC_Chy_PCI_PMON_CTR_FIXED", }, "iMC.PCT_CYCLES_SELF_REFRESH": { "Box": "iMC", "Category": "iMC POWER Events", "Defn": "The percentage of cycles Memory is in self refresh power mode", "Desc": "Percent Cycles Self Refresh", "Equation": "POWER_SELF_REFRESH / MC_Chy_PCI_PMON_CTR_FIXED", }, "iMC.PCT_REQUESTS_PAGE_EMPTY": { "Box": "iMC", "Category": "iMC CAS Events", "Defn": "Percentage of memory requests that resulted in Page Empty", "Desc": "Percent Requests Page Empty", "Equation": "(ACT_COUNT - PRE_COUNT.PAGE_MISS)/ (CAS_COUNT.RD + CAS_COUNT.WR)", }, "iMC.PCT_REQUESTS_PAGE_HIT": { "Box": "iMC", "Category": "iMC CAS Events", "Defn": "Percentage of memory requests that resulted in Page Hits", "Desc": "Percent Requests Page Hit", "Equation": "1 - (PCT_REQUESTS_PAGE_EMPTY + PCT_REQUESTS_PAGE_MISS)", }, "iMC.PCT_REQUESTS_PAGE_MISS": { "Box": "iMC", "Category": "iMC CAS Events", "Defn": "Percentage of memory requests that resulted in Page Misses", "Desc": "Percent Requests Page Miss", "Equation": "PRE_COUNT.PAGE_MISS / (CAS_COUNT.RD + CAS_COUNT.WR)", }, # SBO: "SBO.CYC_USED_DN": { "Box": "SBO", "Category": "SBO RING Events", "Defn": "Cycles Used in the Down direction, Even polarity", "Desc": "Cycles Used Down and Even", "Equation": "RING_BL_USED.CCW / SAMPLE_INTERVAL", }, "SBO.CYC_USED_UP": { "Box": "SBO", "Category": "SBO RING Events", "Defn": "Cycles Used in the Up direction, Even polarity", "Desc": "Cycles Used Up and Even", "Equation": "RING_BL_USED.CW / SAMPLE_INTERVAL", }, "SBO.RING_THRU_DN_BYTES": { "Box": "SBO", "Category": "SBO RING Events", "Defn": "Ring throughput in the Down direction, Even polarity in Bytes", "Desc": "Ring Throughput Down and Even", "Equation": "RING_BL_USED.CCW* 32", }, "SBO.RING_THRU_UP_BYTES": { "Box": "SBO", "Category": "SBO RING Events", "Defn": "Ring throughput in the Up direction, Even polarity in Bytes", "Desc": "Ring Throughput Up and Even", "Equation": "RING_BL_USED.CW * 32", }, # CBO: "CBO.AVG_INGRESS_DEPTH": { "Box": "CBO", "Category": "CBO INGRESS Events", "Defn": "Average Depth of the Ingress Queue through the sample interval", "Desc": "Average Ingress Depth", "Equation": "RxR_OCCUPANCY.IRQ / SAMPLE_INTERVAL", }, "CBO.AVG_INGRESS_LATENCY": { "Box": "CBO", "Category": "CBO INGRESS Events", "Defn": "Average Latency of Requests through the Ingress Queue in Uncore Clocks", "Desc": "Average Ingress Latency", "Equation": "RxR_OCCUPANCY.IRQ / RxR_INSERTS.IRQ", }, "CBO.AVG_INGRESS_LATENCY_WHEN_NE": { "Box": "CBO", "Category": "CBO INGRESS Events", "Defn": "Average Latency of Requests through the Ingress Queue in Uncore Clocks when Ingress Queue has at least one entry", "Desc": "Average Latency in Non-Empty Ingress", "Equation": "RxR_OCCUPANCY.IRQ / COUNTER0_OCCUPANCY{edge_det,thresh=0x1}", }, "CBO.AVG_TOR_DRDS_MISS_WHEN_NE": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Average Number of Data Read Entries that Miss the LLC when the TOR is not empty.", "Desc": "Average Data Read Misses in Non-Empty TOR", "Equation": "(TOR_OCCUPANCY.MISS_OPCODE / COUNTER0_OCCUPANCY{edge_det,thresh=0x1}) with:Cn_MSR_PMON_BOX_FILTER1.opc=0x182", "Filter": "CBoFilter1[28:20]", }, "CBO.AVG_TOR_DRDS_WHEN_NE": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Average Number of Data Read Entries when the TOR is not empty.", "Desc": "Average Data Reads in Non-Empty TOR", "Equation": "(TOR_OCCUPANCY.OPCODE / COUNTER0_OCCUPANCY{edge_det,thresh=0x1}) with:Cn_MSR_PMON_BOX_FILTER1.opc=0x182", "Filter": "CBoFilter1[28:20]", }, "CBO.AVG_TOR_DRD_HIT_LATENCY": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Average Latency of Data Reads through the TOR that hit the LLC", "Desc": "Data Read Hit Latency through TOR", "Equation": "((TOR_OCCUPANCY.OPCODE - TOR_OCCUPANCY.MISS_OPCODE) / (TOR_INSERTS.OPCODE - TOR_INSERTS.MISS_OPCODE)) with:Cn_MSR_PMON_BOX_FILTER.opc=0x182", "Filter": "CBoFilter1[28:20]", }, "CBO.AVG_TOR_DRD_LATENCY": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Average Latency of Data Read Entries making their way through the TOR", "Desc": "Data Read Latency through TOR", "Equation": "(TOR_OCCUPANCY.OPCODE / TOR_INSERTS.OPCODE) with:Cn_MSR_PMON_BOX_FILTER1.opc=0x182", "Filter": "CBoFilter1[28:20]", }, #"CBO.AVG_TOR_DRD_LOC_MISS_LATENCY": { # "Box": "CBO", # "Category": "CBO TOR Events", # "Defn": "Average Latency of Data Reads through the TOR that miss the LLC and were satsified by Local Memory", # "Desc": "Data Read Local Miss Latency through TOR", # "Equation": "(TOR_OCCUPANCY.MISS_OPCODE / TOR_INSERTS.MISS_OPCODE) with:Cn_MSR_PMON_BOX_FILTER1.{opc,nid}={0x182,my_node}", # "Filter": "CBoFilter1[28:20], CBoFilter1[15:0]", #}, "CBO.AVG_TOR_DRD_MISS_LATENCY": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Average Latency of Data Reads through the TOR that miss the LLC", "Desc": "Data Read Miss Latency through TOR", "Equation": "(TOR_OCCUPANCY.MISS_OPCODE / TOR_INSERTS.MISS_OPCODE) with:Cn_MSR_PMON_BOX_FILTER1.opc=0x182", "Filter": "CBoFilter1[28:20]", }, #"CBO.AVG_TOR_DRD_REM_MISS_LATENCY": { # "Box": "CBO", # "Category": "CBO TOR Events", # "Defn": "Average Latency of Data Reads through the TOR that miss the LLC and were satsified by a Remote cache or Remote Memory", # "Desc": "Data Read Remote Miss Latency through TOR", # "Equation": "(TOR_OCCUPANCY.MISS_OPCODE / TOR_INSERTS.MISS_OPCODE) with:Cn_MSR_PMON_BOX_FILTER.{opc,nid}={0x182,other_nodes}", # "Filter": "CBoFilter1[28:20], CBoFilter1[15:0]", #}, "CBO.CYC_INGRESS_BLOCKED": { "Box": "CBO", "Category": "CBO INGRESS Events", "Defn": "Cycles the Ingress Request Queue arbiter was Blocked", "Desc": "Cycles Ingress Blocked", "Equation": "RxR_EXT_STARVED.IRQ / SAMPLE_INTERVAL", }, "CBO.CYC_USED_DN": { "Box": "CBO", "Category": "CBO RING Events", "Defn": "Cycles Used in the Down direction, Even polarity", "Desc": "Cycles Used Down and Even", "Equation": "RING_BL_USED.CCW / SAMPLE_INTERVAL", }, "CBO.CYC_USED_UP": { "Box": "CBO", "Category": "CBO RING Events", "Defn": "Cycles Used in the Up direction, Even polarity", "Desc": "Cycles Used Up and Even", "Equation": "RING_BL_USED.CW / SAMPLE_INTERVAL", }, "CBO.FAST_STR_LLC_MISS": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Number of ItoM (fast string) operations that miss the LLC", "Desc": "Fast String misses", "Equation": "TOR_INSERTS.MISS_OPCODE with:Cn_MSR_PMON_BOX_FILTER1.opc=0x1C8 + TOR_INSERTS.MISS_OPCODE with:{Cn_MSR_PMON_BOX_FILTER0.tid=0x3E, Cn_MSR_PMON_BOX_FILTER1.opc=0x1C8}", "Filter": "CBoFilter1[28:20]", }, "CBO.FAST_STR_LLC_REQ": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Number of ItoM (fast string) operations that reference the LLC", "Desc": "Fast String operations", "Equation": "TOR_INSERTS.OPCODE with:Cn_MSR_PMON_BOX_FILTER1.opc=0x1C8 + TOR_INSERTS.OPCODE with:{Cn_MSR_PMON_BOX_FILTER0.tid=0x3E, Cn_MSR_PMON_BOX_FILTER1.opc=0x1C8}", "Filter": "CBoFilter1[28:20]", }, "CBO.INGRESS_REJ_V_INS": { "Box": "CBO", "Category": "CBO INGRESS Events", "Defn": "Ratio of Ingress Request Entries that were rejected vs. inserted", "Desc": "Ingress Rejects vs. Inserts", "Equation": "RxR_INSERTS.IRQ_REJ / RxR_INSERTS.IRQ", }, "CBO.IO_READ_BW": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "IO Read Bandwidth in MB - Disk or Network Reads", "Desc": "IO Read Bandwidth", "Equation": "(TOR_INSERTS.OPCODE with:{Cn_MSR_PMON_BOX_FILTER0.tid=0x3E, Cn_MSR_PMON_BOX_FILTER1.opc=0x1C8} + TOR_INSERTS.OPCODE with:Cn_MSR_PMON_BOX_FILTER.opc=0x1E6) * 64 / 1000000", "Filter": "CBoFilter0[5:0], CBoFilter1[28:20]", }, "CBO.IO_WRITE_BW": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "IO Write Bandwidth in MB - Disk or Network Writes", "Desc": "IO Write Bandwidth", "Equation": "(TOR_INSERTS.OPCODE with:Cn_MSR_PMON_BOX_FILTER1.opc=0x19E + TOR_INSERTS.OPCODE with:Cn_MSR_PMON_BOX_FILTER.opc=0x1E4) * 64 / 1000000", "Filter": "CBoFilter1[28:20]", }, "CBO.LLC_DRD_MISS_PCT": { "Box": "CBO", "Category": "CBO CACHE Events", "Defn": "LLC Data Read miss ratio", "Desc": "LLC DRD Miss Ratio", "Equation": "LLC_LOOKUP.DATA_READ with:Cn_MSR_PMON_BOX_FILTER0.state=0x1 / LLC_LOOKUP.DATA_READ with:Cn_MSR_PMON_BOX_FILTER0.state=0x3F", "Filter": "CBoFilter0[23:17]", }, "CBO.LLC_RFO_MISS_PCT": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "LLC RFO Miss Ratio", "Desc": "LLC RFO Miss Ratio", "Equation": "(TOR_INSERTS.MISS_OPCODE / TOR_INSERTS.OPCODE) with:Cn_MSR_PMON_BOX_FILTER1.opc=0x180 - (TOR_INSERTS.MISS_OPCODE / TOR_INSERTS.OPCODE) with:{Cn_MSR_PMON_BOX_FILTER0.tid=0x3E,Cn_MSR_PMON_BOX_FILTER1.opc=0x180}", "Filter": "CBoFilter1[28:20]", }, "CBO.MEM_WB_BYTES": { "Box": "CBO", "Category": "CBO CACHE Events", "Defn": "Data written back to memory in Number of Bytes", "Desc": "Memory Writebacks", "Equation": "LLC_VICTIMS.M_STATE * 64", }, "CBO.MMIO_PARTIAL_READS_CPU": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Number of Partial MMIO Reads initiated by a Core", "Desc": "MMIO Partial Reads - CPU", "Equation": "TOR_INSERTS.OPCODE with:{Cn_MSR_PMON_BOX_FILTER0.nc=1, Cn_MSR_PMON_BOX_FILTER1.opc=0x187}", "Filter": "CBoFilter1[28:20], CBoFilter1[30]", }, "CBO.MMIO_WRITES_CPU": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Number of MMIO Writes initiated by a Core", "Desc": "MMIO Writes - CPU", "Equation": "TOR_INSERTS.OPCODE with:{Cn_MSR_PMON_BOX_FILTER0.nc=1, Cn_MSR_PMON_BOX_FILTER1.opc=0x18F}", "Filter": "CBoFilter1[28:20], CBoFilter1[30]", }, "CBO.PARTIAL_PCI_WRITES": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Number of partial PCI writes", "Desc": "Partial PCI Writes", "Equation": "TOR_INSERTS.OPCODE with:{Cn_MSR_PMON_BOX_FILTER0.tid=0x3E,Cn_MSR_PMON_BOX_FILTER1.opc=0x180}", "Filter": "CBoFilter0[5:0], CBoFilter1[28:20]", }, "CBO.PCI_READS": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Number of PCI reads (full and partial)", "Desc": "PCI Reads", "Equation": "TOR_INSERTS.OPCODE with:Cn_MSR_PMON_BOX_FILTER1.opc=0x19E", "Filter": "CBoFilter1[28:20]", }, "CBO.PCI_WRITES": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Number of PCI writes", "Desc": "PCI Writes", "Equation": "TOR_INSERTS.OPCODE with:{Cn_MSR_PMON_BOX_FILTER0.tid=0x3E,Cn_MSR_PMON_BOX_FILTER1.opc=0x1C8}", "Filter": "CBoFilter0[5:0], CBoFilter1[28:20]", }, "CBO.RING_THRU_DN_BYTES": { "Box": "CBO", "Category": "CBO RING Events", "Defn": "Ring throughput in the Down direction, Even polarity in Bytes", "Desc": "Ring Throughput Down and Even", "Equation": "RING_BL_USED.CCW* 32", }, "CBO.RING_THRU_UP_BYTES": { "Box": "CBO", "Category": "CBO RING Events", "Defn": "Ring throughput in the Up direction, Even polarity in Bytes", "Desc": "Ring Throughput Up and Even", "Equation": "RING_BL_USED.CW * 32", }, "CBO.STREAMED_FULL_STORES": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Number of Streamed Store (of Full Cache Line) Transactions", "Desc": "Streaming Stores (Full Line)", "Equation": "TOR_INSERTS.OPCODE with:Cn_MSR_PMON_BOX_FILTER1.opc=0x18C", "Filter": "CBoFilter1[28:20]", }, "CBO.STREAMED_PART_STORES": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Number of Streamed Store (of Partial Cache Line) Transactions", "Desc": "Streaming Stores (Partial Line)", "Equation": "TOR_INSERTS.OPCODE with:Cn_MSR_PMON_BOX_FILTER1.opc=0x18D", "Filter": "CBoFilter1[28:20]", }, "CBO.UC_READS": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Uncachable Read Transactions", "Desc": "Uncacheable Reads", "Equation": "TOR_INSERTS.MISS_OPCODE with:Cn_MSR_PMON_BOX_FILTER1.opc=0x187", "Filter": "CBoFilter1[28:20]", }, # R3QPI: # QPI_LL: "QPI_LL.DATA_FROM_QPI": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "Data received from QPI in bytes ( = DRS + NCB Data messages received from QPI)", "Desc": "Data From QPI", "Equation": "DRS_DATA_MSGS_FROM_QPI + NCB_DATA_MSGS_FROM_QPI", }, "QPI_LL.DATA_FROM_QPI_TO_HA_OR_IIO": { "Box": "QPI_LL", "Category": "QPI_LL DIRECT2CORE Events", "Defn": "Data received from QPI forwarded to HA or IIO. Expressed in Bytes", "Desc": "Data From QPI To HA or IIO", "Equation": "DATA_FROM_QPI - DATA_FROM_QPI_TO_LLC", }, "QPI_LL.DATA_FROM_QPI_TO_LLC": { "Box": "QPI_LL", "Category": "QPI_LL DIRECT2CORE Events", "Defn": "Data received from QPI forwarded to LLC. Expressed in Bytes", "Desc": "Data From QPI To LLC", "Equation": "DIRECT2CORE.SUCCESS_RBT_HIT * 64", }, "QPI_LL.DATA_FROM_QPI_TO_NODEx": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "Data packets received from QPI sent to Node ID 'x'. Expressed in bytes", "Desc": "Data From QPI To Node x", "Equation": "DRS_DataC_FROM_QPI_TO_NODEx + DRS_WRITE_FROM_QPI_TO_NODEx + NCB_DATA_FROM_QPI_TO_NODEx", }, "QPI_LL.DRS_DATA_MSGS_FROM_QPI": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Defn": "DRS Data Messges From QPI in bytes", "Desc": "DRS Data Messges From QPI", "Equation": "(RxL_FLITS_G1.DRS_DATA * 8)", }, "QPI_LL.DRS_DataC_FROM_QPI_TO_NODEx": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "DRS DataC packets received from QPI sent to Node ID 'x'. Expressed in bytes", "Desc": "DRS DataC From QPI To Node x", "Equation": "(CTO_COUNT with:{Q_Py_PCI_PMON_PKT_z_MATCH0[12:0]=0x1c00,Q_Py_PCI_PMON_PKT_z_MATCH0.dnid=x, Q_Py_PCI_PMON_PKT_z_MASK0[17:0]=0x3FF80}) * 64", "Filter": "QPIRxMask0[17:0],QPIRxMatch0[17:0];QPITxMask0[17:0],QPITxMatch0[17:0]", }, "QPI_LL.DRS_DataC_M_FROM_QPI": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "DRS DataC_F packets received from QPI. Expressed in bytes", "Desc": "DRS DataC_Fs From QPI", "Equation": "(CTO_COUNT with:{Q_Py_PCI_PMON_PKT_z_MATCH0[12:0]=0x1C00, Q_Py_PCI_PMON_PKT_z_MASK0[12:0]=0x1FE0, Q_Py_PCI_PMON_PKT_z_MATCH1[19:16]=0x1, Q_Py_PCI_PMON_PKT_z_MASK1[19:16]=0xF }) * 64", "Filter": "QPIMask0[17:0],QPIMatch0[17:0],QPIMask1[19:16],QPIMatch1[19:16]", }, "QPI_LL.DRS_FULL_CACHELINE_MSGS_FROM_QPI": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "DRS Full Cacheline Data Messges From QPI in bytes", "Desc": "DRS Full Cacheline Data Messges From QPI", "Equation": "(CTO_COUNT with:{Q_Py_PCI_PMON_PKT_z_MATCH0[12:0]=0x1C00,Q_Py_PCI_PMON_PKT_z_MASK0[12:0]=0x1F00}) * 64", "Filter": "QPIRxMask0[12:0],QPIRxMatch0[12:0];QPITxMask0[12:0],QPITxMatch0[12:0]", }, "QPI_LL.DRS_F_OR_E_FROM_QPI": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "DRS response in F or E states received from QPI in bytes. To calculate the total data response for each cache line state, it's necessary to add the contribution from three flavors {DataC, DataC_FrcAckCnflt, DataC_Cmp} of data response packets for each cache line state.", "Desc": "DRS Data in F or E From QPI", "Equation": "((CTO_COUNT with:{Q_Py_PCI_PMON_PKT_z_MATCH0[12:0]=0x1C00, Q_Py_PCI_PMON_PKT_z_MASK0[12:0]=0x1FE0, Q_Py_PCI_PMON_PKT_z_MATCH1[19:16]=0x4, Q_Py_PCI_PMON_PKT_z_MASK1[19:16]=0xF }) + (CTO_COUNT with:{Q_Py_PCI_PMON_PKT_z_MATCH0[12:0]=0x1C00, Q_Py_PCI_PMON_PKT_z_MASK0[12:0]=0x1FE0, Q_Py_PCI_PMON_PKT_z_MATCH1[19:16]=0x1, Q_Py_PCI_PMON_PKT_z_MASK1[19:16]=0xF }) + (CTO_COUNT with:{Q_Py_PCI_PMON_PKT_z_MATCH0[12:0]=0x1C40, Q_Py_PCI_PMON_PKT_z_MASK0[12:0]=0x1FE0, Q_Py_PCI_PMON_PKT_z_MATCH1[19:16]=0x4, Q_Py_PCI_PMON_PKT_z_MASK1[19:16]=0xF }) + (CTO_COUNT with:{Q_Py_PCI_PMON_PKT_z_MATCH0[12:0]=0x1C40, Q_Py_PCI_PMON_PKT_z_MASK0[12:0]=0x1FE0, Q_Py_PCI_PMON_PKT_z_MATCH1[19:16]=0x1, Q_Py_PCI_PMON_PKT_z_MASK1[19:16]=0xF }) + (CTO_COUNT with:{Q_Py_PCI_PMON_PKT_z_MATCH0[12:0]=0x1C20, Q_Py_PCI_PMON_PKT_z_MASK0[12:0]=0x1FE0, Q_Py_PCI_PMON_PKT_z_MATCH1[19:16]=0x4, Q_Py_PCI_PMON_PKT_z_MASK1[19:16]=0xF }) + (CTO_COUNT with:{Q_Py_PCI_PMON_PKT_z_MATCH0[12:0]=0x1C20, Q_Py_PCI_PMON_PKT_z_MASK0[12:0]=0x1FE0, Q_Py_PCI_PMON_PKT_z_MATCH1[19:16]=0x1, Q_Py_PCI_PMON_PKT_z_MASK1[19:16]=0xF })) * 64", "Filter": "QPIRxMask0[12:0],QPIRxMatch0[12:0],QPIRxMask1[19:16],QPIRxMatch1[19:16];QPITxMask0[12:0],QPITxMatch0[12:0],QPITxMask1[19:16],QPITxMatch1[19:16]", }, "QPI_LL.DRS_M_FROM_QPI": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "DRS response in M state received from QPI in bytes", "Desc": "DRS Data in M From QPI", "Equation": "(CTO_COUNT with:{Q_Py_PCI_PMON_PKT_z_MATCH0[12:0]=0x1C00, Q_Py_PCI_PMON_PKT_z_MASK0[12:0]=0x1FE0, Q_Py_PCI_PMON_PKT_z_MATCH1[19:16]=0x8, Q_Py_PCI_PMON_PKT_z_MASK1[19:16]=0xF }) * 64", "Filter": "QPIRxMask0[12:0],QPIRxMatch0[12:0],QPIRxMask1[19:16],QPIRxMatch1[19:16];QPITxMask0[12:0],QPITxMatch0[12:0],QPITxMask1[19:16],QPITxMatch1[19:16]", }, "QPI_LL.DRS_PTL_CACHELINE_MSGS_FROM_QPI": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "DRS Partial Cacheline Data Messges From QPI in bytes", "Desc": "DRS Partial Cacheline Data Messges From QPI", "Equation": "(CTO_COUNT with:{Q_Py_PCI_PMON_PKT_z_MATCH0[12:0]=0x1D00, Q_Py_PCI_PMON_PKT_z_MASK0[12:0]=0x1F00}) * 64", "Filter": "QPIRxMask0[12:0],QPIRxMatch0[12:0];QPITxMask0[12:0],QPITxMatch0[12:0]", }, "QPI_LL.DRS_WB_FROM_QPI": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "DRS writeback packets received from QPI in bytes. This is the sum of Wb{I,S,E} DRS packets", "Desc": "DRS Writeback From QPI", "Equation": "DRS_WbI_FROM_QPI + DRS_WbS_FROM_QPI + DRS_WbE_FROM_QPI", }, "QPI_LL.DRS_WRITE_FROM_QPI_TO_NODEx": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "DRS Data packets (Any - DataC) received from QPI sent to Node ID 'x'. Expressed in bytes", "Desc": "DRS Data From QPI To Node x", "Equation": "((CTO_COUNT with:{Q_Py_PCI_PMON_PKT_z_MATCH0[12:0]=0x1c00,Q_Py_PCI_PMON_PKT_z_MATCH0.dnid=x, Q_Py_PCI_PMON_PKT_z_MASK0[17:0]=0x3FE00}) - (CTO_COUNT with:{Q_Py_PCI_PMON_PKT_z_MATCH0[12:0]=0x1C00,Q_Py_PCI_PMON_PKT_z_MATCH0.dnid=x, Q_Py_PCI_PMON_PKT_z_MASK0[17:0]=0x3FF80})) * 64", "Filter": "QPIRxMask0[17:0],QPIRxMatch0[17:0];QPITxMask0[17:0],QPITxMatch0[17:0]", }, "QPI_LL.DRS_WbE_FROM_QPI": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "DRS writeback 'change to E state' packets received from QPI in bytes", "Desc": "DRS WbE From QPI", "Equation": "(CTO_COUNT with:{Q_Py_PCI_PMON_PKT_z_MATCH0[12:0]=0x1CC0, Q_Py_PCI_PMON_PKT_z_MASK0[12:0]=0x1FE0}) * 64", "Filter": "QPIRxMask0[12:0],QPIRxMatch0[12:0];QPITxMask0[12:0],QPITxMatch0[12:0]", }, "QPI_LL.DRS_WbI_FROM_QPI": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "DRS writeback 'change to I state' packets received from QPI in bytes", "Desc": "DRS WbI From QPI", "Equation": "(CTO_COUNT with:{Q_Py_PCI_PMON_PKT_z_MATCH0[12:0]=0x1C80, Q_Py_PCI_PMON_PKT_z_MASK0[12:0]=0x1FE0}) * 64", "Filter": "QPIRxMask0[12:0],QPIRxMatch0[12:0];QPITxMask0[12:0],QPITxMatch0[12:0]", }, "QPI_LL.DRS_WbS_FROM_QPI": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "DRS writeback 'change to S state' packets received from QPI in bytes", "Desc": "DRS WbSFrom QPI", "Equation": "(CTO_COUNT with:{Q_Py_PCI_PMON_PKT_z_MATCH0[12:0]=0x1CA0, Q_Py_PCI_PMON_PKT_z_MASK0[12:0]=0x1FE0}) * 64", "Filter": "QPIRxMask0[12:0],QPIRxMatch0[12:0];QPITxMask0[12:0],QPITxMatch0[12:0]", }, "QPI_LL.NCB_DATA_FROM_QPI_TO_NODEx": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "NCB Data packets (Any - Interrupts) received from QPI sent to Node ID 'x'. Expressed in bytes", "Desc": "NCB Data From QPI To Node x", "Equation": "((CTO_COUNT with:{Q_Py_PCI_PMON_PKT_z_MATCH0[12:0]=0x1800,Q_Py_PCI_PMON_PKT_z_MATCH0.dnid=x, Q_Py_PCI_PMON_PKT_z_MASK0[17:0]=0x3FE00}) - (CTO_COUNT with:{Q_Py_PCI_PMON_PKT_z_MATCH0[12:0]=0x1900,Q_Py_PCI_PMON_PKT_z_MATCH0.dnid=x, Q_Py_PCI_PMON_PKT_z_MASK0[17:0]=0x3FF80})) * 64", "Filter": "QPIRxMask0[17:0],QPIRxMatch0[17:0];QPITxMask0[17:0],QPITxMatch0[17:0]", }, "QPI_LL.NCB_DATA_MSGS_FROM_QPI": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Defn": "NCB Data Messages From QPI in bytes", "Desc": "NCB Data Messages From QPI", "Equation": "(RxL_FLITS_G2.NCB_DATA * 8)", }, "QPI_LL.PCT_LINK_FULL_POWER_CYCLES": { "Box": "QPI_LL", "Category": "QPI_LL POWER_RX Events", "Defn": "Percent of Cycles the QPI link is at Full Power", "Desc": "Percent Link Full Power Cycles", "Equation": "RxL0_POWER_CYCLES / CLOCKTICKS", }, "QPI_LL.PCT_LINK_HALF_DISABLED_CYCLES": { "Box": "QPI_LL", "Category": "QPI_LL POWER_RX Events", "Defn": "Percent of Cycles the QPI link in power mode where half of the lanes are disabled.", "Desc": "Percent Link Half Disabled Cycles", "Equation": "RxL0P_POWER_CYCLES / CLOCKTICKS", }, "QPI_LL.PCT_LINK_SHUTDOWN_CYCLES": { "Box": "QPI_LL", "Category": "QPI_LL POWER Events", "Defn": "Percent of Cycles the QPI link is Shutdown", "Desc": "Percent Link Shutdown Cycles", "Equation": "L1_POWER_CYCLES / CLOCKTICKS", }, "QPI_LL.QPI_DATA_BW": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Defn": "QPI data transmit bandwidth in Bytes", "Desc": "QPI Data Bandwidth", "Equation": "TxL_FLITS_G0.DATA * 8", }, "QPI_LL.QPI_LINK_BW": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Defn": "QPI total transmit bandwidth in Bytes (includes control)", "Desc": "QPI Link Bandwidth", "Equation": "(TxL_FLITS_G0.DATA + TxL_FLITS_G0.NON_DATA) * 8", }, "QPI_LL.QPI_LINK_UTIL": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Defn": "QPI total transmit bandwidth in Bytes (includes control)", "Desc": "QPI Link Bandwidth", "Equation": "(TxL_FLITS_G0.DATA + TxL_FLITS_G0.NON_DATA) / (2 * CLOCKTICKS)", }, "QPI_LL.QPI_SPEED": { "Box": "QPI_LL", "Category": "QPI_LL CFCLK Events", "Defn": "QPI Speed - In GT/s (GigaTransfers / Second) - Max QPI Bandwidth is 2 * ROUND ( QPI Speed , 0)", "Desc": "QPI Speed", "Equation": "ROUND (( CLOCKTICKS / TSC ) * TSC_SPEED, 0 ) * ( 8 / 1000)", }, # HA: "HA.HITME_INSERTS": { "Box": "HA", "Category": "HA HitME Events", "Equation": "HITME_LOOKUP.ALLOCS - HITME_HIT.ALLOCS", }, "HA.HITME_INVAL": { "Box": "HA", "Category": "HA HitME Events", "Equation": "HITME_HIT.INVALS", }, "HA.PCT_CYCLES_BL_FULL": { "Box": "HA", "Category": "HA EGRESS Events", "Defn": "Percentage of time the BL Egress Queue is full", "Desc": "Percent BL Egress Full", "Equation": "TxR_BL_CYCLES_FULL.ALL / SAMPLE_INTERVAL", }, "HA.PCT_CYCLES_D2C_DISABLED": { "Box": "HA", "Category": "HA DIRECT2CORE Events", "Defn": "Percentage of time that Direct2Core was disabled.", "Desc": "Percent D2C Disabled", "Equation": "DIRECT2CORE_CYCLES_DISABLED / SAMPLE_INTERVAL", }, "HA.PCT_RD_REQUESTS": { "Box": "HA", "Category": "HA REQUESTS Events", "Defn": "Percentage of HA traffic that is from Read Requests", "Desc": "Percent Read Requests", "Equation": "REQUESTS.READS / (REQUESTS.READS + REQUESTS.WRITES)", }, "HA.PCT_WR_REQUESTS": { "Box": "HA", "Category": "HA REQUESTS Events", "Defn": "Percentage of HA traffic that is from Write Requests", "Desc": "Percent Write Requests", "Equation": "REQUESTS.WRITES / (REQUESTS.READS + REQUESTS.WRITES)", }, } categories = ( "CBO CACHE Events", "CBO EGRESS Events", "CBO INGRESS Events", "CBO INGRESS_RETRY Events", "CBO MISC Events", "CBO OCCUPANCY Events", "CBO RING Events", "CBO SBO Credit Events", "CBO TOR Events", "CBO UCLK Events", "HA ADDR_OPCODE_MATCH Events", "HA BL_EGRESS Events", "HA BT (Backup Tracker) Events", "HA BYPASS Events", "HA CONFLICTS Events", "HA DIRECT2CORE Events", "HA DIRECTORY Events", "HA EGRESS Events", "HA HitME Events", "HA IMC_MISC Events", "HA IMC_READS Events", "HA IMC_WRITES Events", "HA OSB (Opportunistic Snoop Broadcast) Events", "HA OUTBOUND_TX Events", "HA QPI_IGR_CREDITS Events", "HA REQUESTS Events", "HA RING Events", "HA RPQ_CREDITS Events", "HA SBO Credit Events", "HA SNOOPS Events", "HA SNP_RESP Events", "HA TAD Events", "HA TRACKER Events", "HA UCLK Events", "HA WPQ_CREDITS Events", "IRP AK_INGRESS Events", "IRP BL_INGRESS_DRS Events", "IRP BL_INGRESS_NCB Events", "IRP BL_INGRESS_NCS Events", "IRP Coherency Events", "IRP IO_CLKS Events", "IRP MISC Events", "IRP OUTBOUND_REQUESTS Events", "IRP STALL_CYCLES Events", "IRP TRANSACTIONS Events", "IRP WRITE_CACHE Events", "PCU CORE_C_STATE_TRANSITION Events", "PCU FREQ_MAX_LIMIT Events", "PCU FREQ_MIN_LIMIT Events", "PCU FREQ_TRANS Events", "PCU MEMORY_PHASE_SHEDDING Events", "PCU PCLK Events", "PCU PKG_C_STATE_RESIDENCY Events", "PCU POWER_STATE_OCC Events", "PCU PROCHOT Events", "PCU UFS Events", "PCU VR_HOT Events", "QPI_LL CFCLK Events", "QPI_LL CTO Events", "QPI_LL DIRECT2CORE Events", "QPI_LL FLITS_RX Events", "QPI_LL FLITS_TX Events", "QPI_LL POWER Events", "QPI_LL POWER_RX Events", "QPI_LL POWER_TX Events", "QPI_LL R3QPI_EGRESS_CREDITS Events", "QPI_LL RXQ Events", "QPI_LL RX_CREDITS_CONSUMED Events", "QPI_LL TXQ Events", "QPI_LL VNA_CREDIT_RETURN Events", "R2PCIe EGRESS Events", "R2PCIe IIO Credit Events", "R2PCIe INGRESS Events", "R2PCIe RING Events", "R2PCIe SBO Credit Events", "R2PCIe UCLK Events", "R3QPI EGRESS Credit Events", "R3QPI EGRESS Events", "R3QPI INGRESS Events", "R3QPI LINK_VN0_CREDITS Events", "R3QPI LINK_VN1_CREDITS Events", "R3QPI LINK_VNA_CREDITS Events", "R3QPI RING Events", "R3QPI SBO Credit Events", "R3QPI UCLK Events", "SBO EGRESS Events", "SBO INGRESS Events", "SBO RING Events", "SBO UCLK Events", "UBOX EVENT_MSG Events", "UBOX PHOLD Events", "UBOX RACU Events", "iMC ACT Events", "iMC BYPASS Command Events", "iMC CAS Events", "iMC DCLK Events", "iMC DRAM_PRE_ALL Events", "iMC DRAM_REFRESH Events", "iMC ECC Events", "iMC MAJOR_MODES Events", "iMC POWER Events", "iMC PRE Events", "iMC PREEMPTION Events", "iMC RPQ Events", "iMC VMSE Events", "iMC WPQ Events", );
724,764
Python
.py
13,955
42.014547
1,158
0.584658
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,957
ucexpr.py
andikleen_pmu-tools/ucevent/ucexpr.py
# Copyright (c) 2013, Intel Corporation # Author: Andi Kleen # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, # version 2, as published by the Free Software Foundation. # # This program is distributed in the hope it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # do some (not so) simple expression parsing, mainly to handle the # with:... construct look up the sub events and convert them into a # function call we also expand sub expressions (references to other # events), but we need to recurse on () and for sub exprs # this does not require handling full operator precedence # from __future__ import print_function import re import inspect import ucevent import ucmsg dbg = ucmsg.debug_msg class ParseError(Exception): def __init__(self, msg): self.msg = msg def is_id(t): return re.match(r'[a-zA-Z][\[\]:a-zA-Z0.9.]*', t) def is_number(t): return re.match(r"(0x)?[0-9]+", t) cpu_events = { "INST_RETIRED.ALL": "instructions", "TOTAL_CORE_CYCLES": "cycles", } def event_expr(v, n): return "EV('%s', '%s')" % (v, n) def fix_token(t, box, user_mode): if ((t.find(".") >= 0 and re.match(r"[A-Z][0-9a-zA-Z_.]{5,}", t)) or re.match(r"[A-Z]", t) or t.startswith("iMC") or t.startswith("R2PCIe")): if t in cpu_events: return event_expr(cpu_events[t], t) if t in ucevent.cpu_aux.alias_events: return event_expr(ucevent.cpu_aux.alias_events[t], t) if user_mode: m = re.match(r"([^.]+)\.", t) if m: box = m.group(1) name = t if not t.startswith(box + "."): name = box + "." + t ev = ucevent.lookup_event(name) if not ev: ev = ucevent.lookup_event(t) if ev: if "Equation" in ev: tl = tokenize(ev["Equation"], box) n = expr(tl) return n evl = ucevent.format_event(ev) n = event_expr(evl[0], name) if len(evl) > 1: n = n.replace("_0/", "_INDEX/") return n return t # really poor man's tokenizer def tokenize(eq, box, user_mode=False): eq = eq.replace(".{", " .{") eq = eq.replace("with:", "with: ") eq = eq.replace("(on Core)", "") eq = re.sub(r"([*={}(),/<>]|>=|==|<=)", r" \1 ", eq) eq = re.sub(r"\.([a-z])", r" . \1", eq) eq = re.sub(r"([=<>]) \1", r"\1\1", eq) eq = re.sub(r"([<>]) =", r"\1=", eq) return list(map(lambda x: fix_token(x, box, user_mode), eq.split())) # expand event lists to multiple boxes after parsing # this avoids having to pass this all around the parser # we assume all are for the same box def expand_events(s): m = re.search(r"uncore_([^/]+)_INDEX", s) if not m: return [s] l = [] for j in ucevent.find_boxes(m.group(1)): m = re.search(r'_(\d+)$', j) if m: l.append(s.replace("_INDEX", "_" + m.group(1))) if len(l) == 0: return [s] return l def expect(tl, c): dbg("expect", "expecting %s" % (c)) if not tl: raise ParseError("expected %s, got end" % (c)) if tl[0] != c: raise ParseError("expected %s instead of %s: %s" % (c, tl[0], tl[:5])) return tl[1:] def convert_qual(q, v): qual_alias = ucevent.cpu_aux.qual_alias if q in qual_alias: return (qual_alias[q], v) m = re.match(r"([A-Za-z0-9_]+)\[(\d+):(\d+)\]", q) if m: q = m.group(1) val = (int(v, 0) << int(m.group(3))) v = "%#x" % (val) if q in qual_alias: return (qual_alias[q], v) return (q, v) def is_ev(l): return isinstance(l, str) and l.startswith("EV(") def apply_expr(o, fl, vl): if is_list(o): return list(map(lambda x: apply_expr(x, fl, vl), o)) if is_ev(o): for f, v in zip(fl, vl): nn, nv = convert_qual(f, v) # existing qualifier to or it to? m = re.search(r",%s=([^,/]+)" % (nn), o) if m: val = int(m.group(1), 0) val |= int(nv, 0) o = re.sub(r",(%s)=([^,/]+)", r"\1=%#x," % (val), o) else: add = ",%s=%s" % (nn, nv) o = o.replace("/'", add + "/'") return o def has_ev(l): if is_list(l): for j in l: if has_ev(j): return True elif is_ev(l): return True return False def apply_list(o, fl, vl): dbg("expr", "apply %s and %s to %s from %s" % (fl, vl, o, inspect.stack()[1][2:])) for j in fl: if is_list(j): for k in j: n = k.split('=') o = apply_expr(o, [n[0]], [n[1]]) fl = list(filter(lambda x: not is_list(x), fl)) if len(fl) != len(vl): print("MISMATCHED APPLY",fl,vl,o,inspect.stack()[1][2:]) return o if not has_ev(o): evo = [] for f, v in zip(fl, vl): if is_id(f): f = "." + f evo.append("%s%s=%s" % (o, f, v)) return evo return apply_expr(o, fl, vl) closener = { "{": "}", "(": ")" } # a = NUMBER # a # NUMBER # [...] (???) # a { list } def parse_term(tl): name = tl[0] if not (is_id(name) or is_number(name) or name[0] == '['): raise ParseError("expected identifier or number in list not %s: %s" % (tl[0], tl[:5])) tl = tl[1:] if tl[0] == '.': tl = tl[1:] name = name + "." + tl[0] tl = tl[1:] if tl[0] == '=': tl = tl[1:] if not is_number(tl[0]): raise ParseError("expected number after =, not %s" % (tl[0])) return name, tl[0], tl[1:] if tl[0] == '{': fl, vl, tl = parse_list(tl[1:]) tl = expect(tl, '}') if tl[0] == '=': tl = tl[1:] if tl[0] not in ('(', '{'): raise ParseError("expect ( or {, not %s" % (tl[:5])) opener = tl[0] vl, _, tl = parse_list(tl[1:]) tl = expect(tl, closener[opener]) name = apply_list(name, fl, vl) return name, None, tl # term { , term } def parse_list(tl): ls = [] nm = [] while True: term, nmt, tl = parse_term(tl) ls.append(term) if nmt is not None: nm.append(nmt) if tl[0] != ',': break tl = tl[1:] return ls, nm, tl # '{' list '}' # '{' list '}' = ( val_list) # a = NUMBER # a . '{' list '}' = ( val_list ) # a . x = NUMBER def parse_with(tl, orig): #print("with",tl) if tl[0] == '{': ls, nm, tl = parse_list(tl[1:]) vl = [] tl = expect(tl, '}') if tl and tl[0] == '=': tl = tl[1:] if tl[0] in ('(', '{'): opener = tl[0] nm, _, tl = parse_list(tl[1:]) tl = expect(tl, closener[opener]) else: raise ParseError("expected { or (, not %s" % (tl[:5])) orig = apply_list(orig, ls, nm) return orig, tl # id if is_id(tl[0]) or tl[0][0] == '[': idn = tl[0] tl = tl[1:] if tl[0] == '.': tl = tl[1:] # id . id if is_id(tl[0]): id2 = tl[0] tl = tl[1:] # id . id = NUMBER if tl[0] == '=': tl = tl[1:] if not is_number(tl[0]): raise ParseError("expecting number after =, not %s" % (tl[:5])) number = tl[0] tl = tl[1:] else: number = "1" orig = apply_list(orig, [idn + "." + id2], [number]) return orig, tl # .{ list } = { list } tl = expect(tl, '{') ls, vl, tl = parse_list(tl) tl = expect(tl, '}') if tl[0] == '=': tl = tl[1:] if tl[0] == '(' or tl[0] == '{': opener = tl[0] vl, _, tl = parse_list(tl[1:]) tl = expect(tl, closener[opener]) orig = apply_list(orig, ls, vl) return orig, tl tl = expect(tl, '=') num = tl[0] if not is_number(num): raise ParseError("expected number, not %s" % (num)) orig = apply_list(orig, [idn], [num]) return orig, tl[1:] elif tl[0] == '.': tl = tl[1:] if is_id(tl[0]): # id . a = NUMBER vl = [tl[0]] tl = tl[1:] if tl[0] == '=': tl = tl[1:] num = tl[0] if not is_number(num): raise ParseError("Expected number, not %s" % (tl[:5])) else: num = 1 vl = [num] else: tl = expect(tl, '{') fl, _, tl = parse_list(tl) tl = expect(tl, '}') tl = expect(tl, '=') tl = expect(tl, '(') vl, _, tl = parse_list(tl) tl = expect(tl, ')') orig = apply_list(orig, fl, vl) return orig, tl raise ParseError("parse error in with clause at %s (%s)" % (tl[0], tl)) return [], [] operators = ("+", "-", "*", "/", ">", "<", ">=", "<=", "==", "<<", ">>") def expr_term(tl): if tl[0] == '(': out, tl = expr(tl[1:]) if isinstance(out, list): out = [out] tl = expect(tl, ')') else: out = [tl[0]] tl = tl[1:] if tl and tl[0] == '{': fl, vl, tl = parse_list(tl[1:]) tl = expect(tl, '}') out = apply_list(out, fl, vl) if not tl: return out, tl if tl and tl[0] == 'with:': out, tl = parse_with(tl[1:], out) return out, tl def expr(tl): out = [] while True: no, tl = expr_term(tl) out += no if tl and tl[0] in operators: out.append(tl[0]) tl = tl[1:] else: break return out, tl def is_list(e): return isinstance(e, list) or isinstance(e, tuple) def expr_flat(e): if not is_list(e): return e l = [] for i in e: s = expr_flat(i) if is_list(s): if len(s) > 0: l.append('(') l += s l.append(')') else: l.append(s) return l def apply_one_user_qual(x, qual): if not x.startswith("EV("): return x if any(map(lambda r: re.search(r, x), ucevent.cpu_aux.clockticks)): return x return x.replace("/'", "," + qual + "/'") def apply_user_qual(e, qual): return list(map(lambda x: apply_one_user_qual(x, qual), e)) def parse(s, box, quiet=False, user_mode=False, qual=None): try: if not quiet: print("Expression", s) tl = tokenize(s, box, user_mode) dbg("tokenize", tl) e, tl = expr(tl) if len(tl) > 0: raise ParseError("unexpected token %s at end" % (tl[0])) dbg("expr", e) eflat = expr_flat(e) if qual: eflat = apply_user_qual(eflat, qual) res = " ".join(eflat) res = expand_events(res) dbg("expanded", tl) return res except ParseError as p: print("PARSE-ERROR", p.msg, s) return [] if __name__ == '__main__': assert is_id("x") print(parse("a + b + c", "foo")) print(parse("a with:x=1 + (b with:.{o}=(1) + c with:{edge}) with:{x=1,y=2} + d", "foo"))
11,817
Python
.py
367
23.542234
92
0.464927
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,958
md2hman.py
andikleen_pmu-tools/ucevent/md2hman.py
#!/usr/bin/env python # convert README.md to include files for help2man from __future__ import print_function import sys import re skip_sections = ( "Command Line options reference", "Debugging and testing", "Support", "Author", "Other Projects providing uncore monitoring") tabmode = False skip = False for l in sys.stdin: after = "" l = l.rstrip() if l and l[0] == '#': skip = False if l[2:] in skip_sections: skip = True continue print("[%s]" % (l[2:])) continue elif l == "" and not skip: print(".PP") tabmode = False continue if skip: continue if l and l[0] == '\t' and l[1:]: if not tabmode: print(".nf\n.sp") tabmode = True #print(".I ", end=" ") elif tabmode: after = ".fi" tabmode = False if l and l[0] == '-': print(".TP") l = l[2:] if l and l[0:2] == "**": print(".B ", end=" ") l = l.replace("**","") if l and l[0] == '[': m = re.match(r"\[(.*)\]\s*\((.*)\)(.*)", l) #l = '.URL "%s" "%s"\n%s' % (m.group(2), m.group(1), m.group(3)) l = m.group(2) + " " + m.group(1) + " " + m.group(3) print(l) if after: print(after)
1,297
Python
.py
46
21.369565
87
0.484388
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,959
sanity-run.py
andikleen_pmu-tools/ucevent/sanity-run.py
#!/usr/bin/env python # sanity check an event # percent between 0 and 100% # nothing negative from __future__ import print_function import sys import os import pipes logfile = "slog.%d" % (os.getpid()) s = "./ucevent.py -x, -o " + logfile + " " + " ".join(map(pipes.quote, sys.argv[1:])) w = os.getenv("WRAP") if w: s = w + " " + s print(s) r = os.system(s) if r != 0: print("ucevent failed", r) sys.exit(1) f = open(logfile, "r") fields = f.readline().strip().split(",") for l in f: vals = l.strip().split(",") for v, h in zip(vals, fields): if fields == "timestamp": continue try: num = float(v) except ValueError: print(h,v) continue if num < 0: print(h,"negative value",v) if h.find("_PCT") >= 0 or h.find("PCT_") >= 0: if num < 0 or num > 1.01: print(h,"percent out of bound", v) os.remove(logfile)
957
Python
.py
36
21.333333
85
0.55131
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,960
ivt_uc.py
andikleen_pmu-tools/ucevent/ivt_uc.py
# IVT ivtuc_events.v0.85p ivtuc_derived.v0.85p # aliases aliases = { "HA_OpcodeMatch": "HAn_PCI_PMON_BOX_OPCODEMATCH", "QPIMatch1": "Q_Py_PCI_PMON_BOX_MATCH1", "QPIMask0": "Q_Py_PCI_PMON_BOX_MASK0", "HA_AddrMatch1": "HAn_PCI_PMON_BOX_ADDRMATCH1", "QPIMatch0": "Q_Py_PCI_PMON_BOX_MATCH0", "PCUFilter": "PCU_MSR_PMON_BOX_FILTER", "IRPFilter": "IRP_PCI_PMON_BOX_FILTER", "UBoxFilter": "U_MSR_PMON_BOX_FILTER", "HA_AddrMatch0": "HAn_PCI_PMON_BOX_ADDRMATCH0", "CBoFilter0": "Cn_MSR_PMON_BOX_FILTER", "CBoFilter1": "Cn_MSR_PMON_BOX_FILTER1", "QPIMask1": "Q_Py_PCI_PMON_BOX_MASK1", } events = { # R3QPI: "R3QPI.CLOCKTICKS": { "Box": "R3QPI", "Category": "R3QPI UCLK Events", "Counters": "0-2", "Defn": "Counts the number of uclks in the QPI uclk domain. This could be slightly different than the count in the Ubox because of enable/freeze delays. However, because the QPI Agent is close to the Ubox, they generally should not diverge by more than a handful of cycles.", "Desc": "Number of uclks in domain", "EvSel": 1, "ExtSel": "", }, "R3QPI.C_HI_AD_CREDITS_EMPTY": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 44, "ExtSel": "", }, "R3QPI.C_HI_AD_CREDITS_EMPTY.CBO14": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 44, "ExtSel": "", "Umask": "bx1xxxxxx", }, "R3QPI.C_HI_AD_CREDITS_EMPTY.CBO9": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 44, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R3QPI.C_HI_AD_CREDITS_EMPTY.CBO13": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 44, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R3QPI.C_HI_AD_CREDITS_EMPTY.CBO11": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 44, "ExtSel": "", "Umask": "bxxxx1xxx", }, "R3QPI.C_HI_AD_CREDITS_EMPTY.CBO12": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 44, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R3QPI.C_HI_AD_CREDITS_EMPTY.CBO10": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 44, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R3QPI.C_HI_AD_CREDITS_EMPTY.CBO8": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 44, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R3QPI.C_LO_AD_CREDITS_EMPTY": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 43, "ExtSel": "", }, "R3QPI.C_LO_AD_CREDITS_EMPTY.CBO7": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 43, "ExtSel": "", "Umask": "b1xxxxxxx", }, "R3QPI.C_LO_AD_CREDITS_EMPTY.CBO0": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 43, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R3QPI.C_LO_AD_CREDITS_EMPTY.CBO3": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 43, "ExtSel": "", "Umask": "bxxxx1xxx", }, "R3QPI.C_LO_AD_CREDITS_EMPTY.CBO4": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 43, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R3QPI.C_LO_AD_CREDITS_EMPTY.CBO5": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 43, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R3QPI.C_LO_AD_CREDITS_EMPTY.CBO2": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 43, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R3QPI.C_LO_AD_CREDITS_EMPTY.CBO6": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 43, "ExtSel": "", "Umask": "bx1xxxxxx", }, "R3QPI.C_LO_AD_CREDITS_EMPTY.CBO1": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 43, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R3QPI.HA_R2_BL_CREDITS_EMPTY": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to either HA or R2 on the BL Ring", "Desc": "HA/R2 AD Credits Empty", "EvSel": 47, "ExtSel": "", "Notes": "Counter 0 counts lack of credits to the lesser numbered Cboxes (0-8) Counter 1 counts lack of credits to Cbox to the higher numbered CBoxes (8-13,15+17,16+18)", }, "R3QPI.HA_R2_BL_CREDITS_EMPTY.HA1": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to either HA or R2 on the BL Ring", "Desc": "HA/R2 AD Credits Empty", "EvSel": 47, "ExtSel": "", "Notes": "Counter 0 counts lack of credits to the lesser numbered Cboxes (0-8) Counter 1 counts lack of credits to Cbox to the higher numbered CBoxes (8-13,15+17,16+18)", "Umask": "bxxxxxx1x", }, "R3QPI.HA_R2_BL_CREDITS_EMPTY.R2_NCS": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to either HA or R2 on the BL Ring", "Desc": "HA/R2 AD Credits Empty", "EvSel": 47, "ExtSel": "", "Notes": "Counter 0 counts lack of credits to the lesser numbered Cboxes (0-8) Counter 1 counts lack of credits to Cbox to the higher numbered CBoxes (8-13,15+17,16+18)", "Umask": "bxxxx1xxx", }, "R3QPI.HA_R2_BL_CREDITS_EMPTY.R2_NCB": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to either HA or R2 on the BL Ring", "Desc": "HA/R2 AD Credits Empty", "EvSel": 47, "ExtSel": "", "Notes": "Counter 0 counts lack of credits to the lesser numbered Cboxes (0-8) Counter 1 counts lack of credits to Cbox to the higher numbered CBoxes (8-13,15+17,16+18)", "Umask": "bxxxxx1xx", }, "R3QPI.HA_R2_BL_CREDITS_EMPTY.HA0": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to either HA or R2 on the BL Ring", "Desc": "HA/R2 AD Credits Empty", "EvSel": 47, "ExtSel": "", "Notes": "Counter 0 counts lack of credits to the lesser numbered Cboxes (0-8) Counter 1 counts lack of credits to Cbox to the higher numbered CBoxes (8-13,15+17,16+18)", "Umask": "bxxxxxxx1", }, "R3QPI.QPI0_AD_CREDITS_EMPTY": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI0 on the AD Ring", "Desc": "QPI0 AD Credits Empty", "EvSel": 41, "ExtSel": "", }, "R3QPI.QPI0_AD_CREDITS_EMPTY.VN1_HOM": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI0 on the AD Ring", "Desc": "QPI0 AD Credits Empty", "EvSel": 41, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R3QPI.QPI0_AD_CREDITS_EMPTY.VNA": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI0 on the AD Ring", "Desc": "QPI0 AD Credits Empty", "EvSel": 41, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R3QPI.QPI0_AD_CREDITS_EMPTY.VN1_NDR": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI0 on the AD Ring", "Desc": "QPI0 AD Credits Empty", "EvSel": 41, "ExtSel": "", "Umask": "bx1xxxxxx", }, "R3QPI.QPI0_AD_CREDITS_EMPTY.VN1_SNP": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI0 on the AD Ring", "Desc": "QPI0 AD Credits Empty", "EvSel": 41, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R3QPI.QPI0_AD_CREDITS_EMPTY.VN0_NDR": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI0 on the AD Ring", "Desc": "QPI0 AD Credits Empty", "EvSel": 41, "ExtSel": "", "Umask": "bxxxx1xxx", }, "R3QPI.QPI0_AD_CREDITS_EMPTY.VN0_SNP": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI0 on the AD Ring", "Desc": "QPI0 AD Credits Empty", "EvSel": 41, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R3QPI.QPI0_AD_CREDITS_EMPTY.VN0_HOM": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI0 on the AD Ring", "Desc": "QPI0 AD Credits Empty", "EvSel": 41, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R3QPI.QPI0_BL_CREDITS_EMPTY": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI0 on the BL Ring", "Desc": "QPI0 BL Credits Empty", "EvSel": 45, "ExtSel": "", }, "R3QPI.QPI0_BL_CREDITS_EMPTY.VN1_HOM": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI0 on the BL Ring", "Desc": "QPI0 BL Credits Empty", "EvSel": 45, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R3QPI.QPI0_BL_CREDITS_EMPTY.VNA": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI0 on the BL Ring", "Desc": "QPI0 BL Credits Empty", "EvSel": 45, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R3QPI.QPI0_BL_CREDITS_EMPTY.VN1_NDR": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI0 on the BL Ring", "Desc": "QPI0 BL Credits Empty", "EvSel": 45, "ExtSel": "", "Umask": "bx1xxxxxx", }, "R3QPI.QPI0_BL_CREDITS_EMPTY.VN1_SNP": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI0 on the BL Ring", "Desc": "QPI0 BL Credits Empty", "EvSel": 45, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R3QPI.QPI0_BL_CREDITS_EMPTY.VN0_NDR": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI0 on the BL Ring", "Desc": "QPI0 BL Credits Empty", "EvSel": 45, "ExtSel": "", "Umask": "bxxxx1xxx", }, "R3QPI.QPI0_BL_CREDITS_EMPTY.VN0_SNP": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI0 on the BL Ring", "Desc": "QPI0 BL Credits Empty", "EvSel": 45, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R3QPI.QPI0_BL_CREDITS_EMPTY.VN0_HOM": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI0 on the BL Ring", "Desc": "QPI0 BL Credits Empty", "EvSel": 45, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R3QPI.QPI1_AD_CREDITS_EMPTY": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI1 on the AD Ring", "Desc": "QPI1 AD Credits Empty", "EvSel": 42, "ExtSel": "", }, "R3QPI.QPI1_AD_CREDITS_EMPTY.VN1_HOM": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI1 on the AD Ring", "Desc": "QPI1 AD Credits Empty", "EvSel": 42, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R3QPI.QPI1_AD_CREDITS_EMPTY.VNA": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI1 on the AD Ring", "Desc": "QPI1 AD Credits Empty", "EvSel": 42, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R3QPI.QPI1_AD_CREDITS_EMPTY.VN1_NDR": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI1 on the AD Ring", "Desc": "QPI1 AD Credits Empty", "EvSel": 42, "ExtSel": "", "Umask": "bx1xxxxxx", }, "R3QPI.QPI1_AD_CREDITS_EMPTY.VN1_SNP": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI1 on the AD Ring", "Desc": "QPI1 AD Credits Empty", "EvSel": 42, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R3QPI.QPI1_AD_CREDITS_EMPTY.VN0_NDR": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI1 on the AD Ring", "Desc": "QPI1 AD Credits Empty", "EvSel": 42, "ExtSel": "", "Umask": "bxxxx1xxx", }, "R3QPI.QPI1_AD_CREDITS_EMPTY.VN0_SNP": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI1 on the AD Ring", "Desc": "QPI1 AD Credits Empty", "EvSel": 42, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R3QPI.QPI1_AD_CREDITS_EMPTY.VN0_HOM": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI1 on the AD Ring", "Desc": "QPI1 AD Credits Empty", "EvSel": 42, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R3QPI.QPI1_BL_CREDITS_EMPTY": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI1 on the BL Ring", "Desc": "QPI1 BL Credits Empty", "EvSel": 46, "ExtSel": "", }, "R3QPI.QPI1_BL_CREDITS_EMPTY.VN1_HOM": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI1 on the BL Ring", "Desc": "QPI1 BL Credits Empty", "EvSel": 46, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R3QPI.QPI1_BL_CREDITS_EMPTY.VNA": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI1 on the BL Ring", "Desc": "QPI1 BL Credits Empty", "EvSel": 46, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R3QPI.QPI1_BL_CREDITS_EMPTY.VN1_NDR": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI1 on the BL Ring", "Desc": "QPI1 BL Credits Empty", "EvSel": 46, "ExtSel": "", "Umask": "bx1xxxxxx", }, "R3QPI.QPI1_BL_CREDITS_EMPTY.VN1_SNP": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI1 on the BL Ring", "Desc": "QPI1 BL Credits Empty", "EvSel": 46, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R3QPI.QPI1_BL_CREDITS_EMPTY.VN0_NDR": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI1 on the BL Ring", "Desc": "QPI1 BL Credits Empty", "EvSel": 46, "ExtSel": "", "Umask": "bxxxx1xxx", }, "R3QPI.QPI1_BL_CREDITS_EMPTY.VN0_SNP": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI1 on the BL Ring", "Desc": "QPI1 BL Credits Empty", "EvSel": 46, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R3QPI.QPI1_BL_CREDITS_EMPTY.VN0_HOM": { "Box": "R3QPI", "Category": "R3QPI EGRESS Credit Events", "Counters": "0-1", "Defn": "No credits available to send to QPI1 on the BL Ring", "Desc": "QPI1 BL Credits Empty", "EvSel": 46, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R3QPI.RING_AD_USED": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", }, "R3QPI.RING_AD_USED.CW_VR0_ODD": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxx1x", }, "R3QPI.RING_AD_USED.CCW_VR0_ODD": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxx1xxx", }, "R3QPI.RING_AD_USED.CCW": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b11001100", }, "R3QPI.RING_AD_USED.CCW_VR0_EVEN": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxx1xx", }, "R3QPI.RING_AD_USED.CW": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00110011", }, "R3QPI.RING_AD_USED.CW_VR0_EVEN": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxxx1", }, "R3QPI.RING_AK_USED": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", }, "R3QPI.RING_AK_USED.CW_VR0_ODD": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxx1x", }, "R3QPI.RING_AK_USED.CCW_VR0_ODD": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxx1xxx", }, "R3QPI.RING_AK_USED.CCW": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b11001100", }, "R3QPI.RING_AK_USED.CCW_VR0_EVEN": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxx1xx", }, "R3QPI.RING_AK_USED.CW": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00110011", }, "R3QPI.RING_AK_USED.CW_VR0_EVEN": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxxx1", }, "R3QPI.RING_BL_USED": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", }, "R3QPI.RING_BL_USED.CW_VR0_ODD": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxx1x", }, "R3QPI.RING_BL_USED.CCW_VR0_ODD": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxx1xxx", }, "R3QPI.RING_BL_USED.CCW": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b11001100", }, "R3QPI.RING_BL_USED.CCW_VR0_EVEN": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxx1xx", }, "R3QPI.RING_BL_USED.CW": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00110011", }, "R3QPI.RING_BL_USED.CW_VR0_EVEN": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxxx1", }, "R3QPI.RING_IV_USED": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. The IV ring is unidirectional. Whether UP or DN is used is dependent on the system programming. Thereofore, one should generally set both the UP and DN bits for a given polarity (or both) at a given time.", "Desc": "R2 IV Ring in Use", "EvSel": 10, "ExtSel": "", }, "R3QPI.RING_IV_USED.ANY": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. The IV ring is unidirectional. Whether UP or DN is used is dependent on the system programming. Thereofore, one should generally set both the UP and DN bits for a given polarity (or both) at a given time.", "Desc": "R2 IV Ring in Use", "EvSel": 10, "ExtSel": "", "Umask": "b11111111", }, "R3QPI.RING_IV_USED.CCW": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. The IV ring is unidirectional. Whether UP or DN is used is dependent on the system programming. Thereofore, one should generally set both the UP and DN bits for a given polarity (or both) at a given time.", "Desc": "R2 IV Ring in Use", "EvSel": 10, "ExtSel": "", "Umask": "b11001100", }, "R3QPI.RING_IV_USED.CW": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. The IV ring is unidirectional. Whether UP or DN is used is dependent on the system programming. Thereofore, one should generally set both the UP and DN bits for a given polarity (or both) at a given time.", "Desc": "R2 IV Ring in Use", "EvSel": 10, "ExtSel": "", "Umask": "b00110011", }, "R3QPI.RxR_AD_BYPASSED": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of times when the AD Ingress was bypassed and an incoming transaction was bypassed directly across the BGF and into the qfclk domain.", "Desc": "AD Ingress Bypassed", "EvSel": 18, "ExtSel": "", }, "R3QPI.RxR_CYCLES_NE": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Cycles Not Empty", "EvSel": 16, "ExtSel": "", }, "R3QPI.RxR_CYCLES_NE.SNP": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Cycles Not Empty", "EvSel": 16, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R3QPI.RxR_CYCLES_NE.HOM": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Cycles Not Empty", "EvSel": 16, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R3QPI.RxR_CYCLES_NE.NDR": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Cycles Not Empty", "EvSel": 16, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R3QPI.RxR_INSERTS": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Allocations", "EvSel": 17, "ExtSel": "", }, "R3QPI.RxR_INSERTS.NCS": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Allocations", "EvSel": 17, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R3QPI.RxR_INSERTS.NCB": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Allocations", "EvSel": 17, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R3QPI.RxR_INSERTS.DRS": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Allocations", "EvSel": 17, "ExtSel": "", "Umask": "bxxxx1xxx", }, "R3QPI.RxR_INSERTS.SNP": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Allocations", "EvSel": 17, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R3QPI.RxR_INSERTS.HOM": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Allocations", "EvSel": 17, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R3QPI.RxR_INSERTS.NDR": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Allocations", "EvSel": 17, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R3QPI.RxR_OCCUPANCY": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": 0, "Defn": "Accumulates the occupancy of a given QPI Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.", "Desc": "Ingress Occupancy Accumulator", "EvSel": 19, "ExtSel": "", "MaxIncCyc": 32, "SubCtr": 1, }, "R3QPI.RxR_OCCUPANCY.NCS": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": 0, "Defn": "Accumulates the occupancy of a given QPI Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.", "Desc": "Ingress Occupancy Accumulator", "EvSel": 19, "ExtSel": "", "MaxIncCyc": 32, "SubCtr": 1, "Umask": "b00100000", }, "R3QPI.RxR_OCCUPANCY.NCB": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": 0, "Defn": "Accumulates the occupancy of a given QPI Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.", "Desc": "Ingress Occupancy Accumulator", "EvSel": 19, "ExtSel": "", "MaxIncCyc": 32, "SubCtr": 1, "Umask": "b00010000", }, "R3QPI.RxR_OCCUPANCY.DRS": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": 0, "Defn": "Accumulates the occupancy of a given QPI Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.", "Desc": "Ingress Occupancy Accumulator", "EvSel": 19, "ExtSel": "", "MaxIncCyc": 32, "SubCtr": 1, "Umask": "b00001000", }, "R3QPI.RxR_OCCUPANCY.SNP": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": 0, "Defn": "Accumulates the occupancy of a given QPI Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.", "Desc": "Ingress Occupancy Accumulator", "EvSel": 19, "ExtSel": "", "MaxIncCyc": 32, "SubCtr": 1, "Umask": "b00000010", }, "R3QPI.RxR_OCCUPANCY.HOM": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": 0, "Defn": "Accumulates the occupancy of a given QPI Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.", "Desc": "Ingress Occupancy Accumulator", "EvSel": 19, "ExtSel": "", "MaxIncCyc": 32, "SubCtr": 1, "Umask": "b00000001", }, "R3QPI.RxR_OCCUPANCY.NDR": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": 0, "Defn": "Accumulates the occupancy of a given QPI Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.", "Desc": "Ingress Occupancy Accumulator", "EvSel": 19, "ExtSel": "", "MaxIncCyc": 32, "SubCtr": 1, "Umask": "b00000100", }, "R3QPI.TxR_CYCLES_FULL": { "Box": "R3QPI", "Category": "R3QPI EGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the R2PCIe Egress buffer is full.", "Desc": "Egress Cycles Full", "EvSel": 37, "ExtSel": "", }, "R3QPI.TxR_CYCLES_NE": { "Box": "R3QPI", "Category": "R3QPI EGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the QPI Egress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "Egress Cycles Not Empty", "EvSel": 35, "ExtSel": "", }, "R3QPI.TxR_NACK_CCW": { "Box": "R3QPI", "Category": "R3QPI EGRESS Events", "Counters": "0-1", "Desc": "Egress NACK", "EvSel": 40, "ExtSel": "", }, "R3QPI.TxR_NACK_CCW.AK": { "Box": "R3QPI", "Category": "R3QPI EGRESS Events", "Counters": "0-1", "Desc": "Egress NACK", "EvSel": 40, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R3QPI.TxR_NACK_CCW.BL": { "Box": "R3QPI", "Category": "R3QPI EGRESS Events", "Counters": "0-1", "Desc": "Egress NACK", "EvSel": 40, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R3QPI.TxR_NACK_CCW.AD": { "Box": "R3QPI", "Category": "R3QPI EGRESS Events", "Counters": "0-1", "Desc": "Egress NACK", "EvSel": 40, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R3QPI.TxR_NACK_CW": { "Box": "R3QPI", "Category": "R3QPI EGRESS Events", "Counters": "0-1", "Desc": "Egress NACK", "EvSel": 38, "ExtSel": "", }, "R3QPI.TxR_NACK_CW.AK": { "Box": "R3QPI", "Category": "R3QPI EGRESS Events", "Counters": "0-1", "Desc": "Egress NACK", "EvSel": 38, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R3QPI.TxR_NACK_CW.BL": { "Box": "R3QPI", "Category": "R3QPI EGRESS Events", "Counters": "0-1", "Desc": "Egress NACK", "EvSel": 38, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R3QPI.TxR_NACK_CW.AD": { "Box": "R3QPI", "Category": "R3QPI EGRESS Events", "Counters": "0-1", "Desc": "Egress NACK", "EvSel": 38, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R3QPI.VN0_CREDITS_REJECT": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.", "Desc": "VN0 Credit Acquisition Failed on DRS", "EvSel": 55, "ExtSel": "", }, "R3QPI.VN0_CREDITS_REJECT.NCS": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.", "Desc": "VN0 Credit Acquisition Failed on DRS", "EvSel": 55, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R3QPI.VN0_CREDITS_REJECT.NCB": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.", "Desc": "VN0 Credit Acquisition Failed on DRS", "EvSel": 55, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R3QPI.VN0_CREDITS_REJECT.DRS": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.", "Desc": "VN0 Credit Acquisition Failed on DRS", "EvSel": 55, "ExtSel": "", "Umask": "bxxxx1xxx", }, "R3QPI.VN0_CREDITS_REJECT.SNP": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.", "Desc": "VN0 Credit Acquisition Failed on DRS", "EvSel": 55, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R3QPI.VN0_CREDITS_REJECT.HOM": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.", "Desc": "VN0 Credit Acquisition Failed on DRS", "EvSel": 55, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R3QPI.VN0_CREDITS_REJECT.NDR": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.", "Desc": "VN0 Credit Acquisition Failed on DRS", "EvSel": 55, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R3QPI.VN0_CREDITS_USED": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.", "Desc": "VN0 Credit Used", "EvSel": 54, "ExtSel": "", }, "R3QPI.VN0_CREDITS_USED.NCS": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.", "Desc": "VN0 Credit Used", "EvSel": 54, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R3QPI.VN0_CREDITS_USED.NCB": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.", "Desc": "VN0 Credit Used", "EvSel": 54, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R3QPI.VN0_CREDITS_USED.DRS": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.", "Desc": "VN0 Credit Used", "EvSel": 54, "ExtSel": "", "Umask": "bxxxx1xxx", }, "R3QPI.VN0_CREDITS_USED.SNP": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.", "Desc": "VN0 Credit Used", "EvSel": 54, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R3QPI.VN0_CREDITS_USED.HOM": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.", "Desc": "VN0 Credit Used", "EvSel": 54, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R3QPI.VN0_CREDITS_USED.NDR": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.", "Desc": "VN0 Credit Used", "EvSel": 54, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R3QPI.VN1_CREDITS_REJECT": { "Box": "R3QPI", "Category": "R3QPI LINK_VN1_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.", "Desc": "VN1 Credit Acquisition Failed on DRS", "EvSel": 57, "ExtSel": "", }, "R3QPI.VN1_CREDITS_REJECT.NCS": { "Box": "R3QPI", "Category": "R3QPI LINK_VN1_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.", "Desc": "VN1 Credit Acquisition Failed on DRS", "EvSel": 57, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R3QPI.VN1_CREDITS_REJECT.NCB": { "Box": "R3QPI", "Category": "R3QPI LINK_VN1_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.", "Desc": "VN1 Credit Acquisition Failed on DRS", "EvSel": 57, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R3QPI.VN1_CREDITS_REJECT.DRS": { "Box": "R3QPI", "Category": "R3QPI LINK_VN1_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.", "Desc": "VN1 Credit Acquisition Failed on DRS", "EvSel": 57, "ExtSel": "", "Umask": "bxxxx1xxx", }, "R3QPI.VN1_CREDITS_REJECT.SNP": { "Box": "R3QPI", "Category": "R3QPI LINK_VN1_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.", "Desc": "VN1 Credit Acquisition Failed on DRS", "EvSel": 57, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R3QPI.VN1_CREDITS_REJECT.HOM": { "Box": "R3QPI", "Category": "R3QPI LINK_VN1_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.", "Desc": "VN1 Credit Acquisition Failed on DRS", "EvSel": 57, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R3QPI.VN1_CREDITS_REJECT.NDR": { "Box": "R3QPI", "Category": "R3QPI LINK_VN1_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.", "Desc": "VN1 Credit Acquisition Failed on DRS", "EvSel": 57, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R3QPI.VN1_CREDITS_USED": { "Box": "R3QPI", "Category": "R3QPI LINK_VN1_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.", "Desc": "VN1 Credit Used", "EvSel": 56, "ExtSel": "", }, "R3QPI.VN1_CREDITS_USED.NCS": { "Box": "R3QPI", "Category": "R3QPI LINK_VN1_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.", "Desc": "VN1 Credit Used", "EvSel": 56, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R3QPI.VN1_CREDITS_USED.NCB": { "Box": "R3QPI", "Category": "R3QPI LINK_VN1_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.", "Desc": "VN1 Credit Used", "EvSel": 56, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R3QPI.VN1_CREDITS_USED.DRS": { "Box": "R3QPI", "Category": "R3QPI LINK_VN1_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.", "Desc": "VN1 Credit Used", "EvSel": 56, "ExtSel": "", "Umask": "bxxxx1xxx", }, "R3QPI.VN1_CREDITS_USED.SNP": { "Box": "R3QPI", "Category": "R3QPI LINK_VN1_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.", "Desc": "VN1 Credit Used", "EvSel": 56, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R3QPI.VN1_CREDITS_USED.HOM": { "Box": "R3QPI", "Category": "R3QPI LINK_VN1_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.", "Desc": "VN1 Credit Used", "EvSel": 56, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R3QPI.VN1_CREDITS_USED.NDR": { "Box": "R3QPI", "Category": "R3QPI LINK_VN1_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.", "Desc": "VN1 Credit Used", "EvSel": 56, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R3QPI.VNA_CREDITS_ACQUIRED": { "Box": "R3QPI", "Category": "R3QPI LINK_VNA_CREDITS Events", "Counters": "0-1", "Defn": "Number of QPI VNA Credit acquisitions. This event can be used in conjunction with the VNA In-Use Accumulator to calculate the average lifetime of a credit holder. VNA credits are used by all message classes in order to communicate across QPI. If a packet is unable to acquire credits, it will then attempt to use credts from the VN0 pool. Note that a single packet may require multiple flit buffers (i.e. when data is being transfered). Therefore, this event will increment by the number of credits acquired in each cycle. Filtering based on message class is not provided. One can count the number of packets transfered in a given message class using an qfclk event.", "Desc": "VNA credit Acquisitions", "EvSel": 51, "ExtSel": "", "MaxIncCyc": 4, }, "R3QPI.VNA_CREDITS_ACQUIRED.BL": { "Box": "R3QPI", "Category": "R3QPI LINK_VNA_CREDITS Events", "Counters": "0-1", "Defn": "Number of QPI VNA Credit acquisitions. This event can be used in conjunction with the VNA In-Use Accumulator to calculate the average lifetime of a credit holder. VNA credits are used by all message classes in order to communicate across QPI. If a packet is unable to acquire credits, it will then attempt to use credts from the VN0 pool. Note that a single packet may require multiple flit buffers (i.e. when data is being transfered). Therefore, this event will increment by the number of credits acquired in each cycle. Filtering based on message class is not provided. One can count the number of packets transfered in a given message class using an qfclk event.", "Desc": "VNA credit Acquisitions", "EvSel": 51, "ExtSel": "", "MaxIncCyc": 4, "Umask": "bxxxxx1xx", }, "R3QPI.VNA_CREDITS_ACQUIRED.AD": { "Box": "R3QPI", "Category": "R3QPI LINK_VNA_CREDITS Events", "Counters": "0-1", "Defn": "Number of QPI VNA Credit acquisitions. This event can be used in conjunction with the VNA In-Use Accumulator to calculate the average lifetime of a credit holder. VNA credits are used by all message classes in order to communicate across QPI. If a packet is unable to acquire credits, it will then attempt to use credts from the VN0 pool. Note that a single packet may require multiple flit buffers (i.e. when data is being transfered). Therefore, this event will increment by the number of credits acquired in each cycle. Filtering based on message class is not provided. One can count the number of packets transfered in a given message class using an qfclk event.", "Desc": "VNA credit Acquisitions", "EvSel": 51, "ExtSel": "", "MaxIncCyc": 4, "Umask": "bxxxxxxx1", }, "R3QPI.VNA_CREDITS_REJECT": { "Box": "R3QPI", "Category": "R3QPI LINK_VNA_CREDITS Events", "Counters": "0-1", "Defn": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.", "Desc": "VNA Credit Reject", "EvSel": 52, "ExtSel": "", }, "R3QPI.VNA_CREDITS_REJECT.NCS": { "Box": "R3QPI", "Category": "R3QPI LINK_VNA_CREDITS Events", "Counters": "0-1", "Defn": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.", "Desc": "VNA Credit Reject", "EvSel": 52, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R3QPI.VNA_CREDITS_REJECT.NCB": { "Box": "R3QPI", "Category": "R3QPI LINK_VNA_CREDITS Events", "Counters": "0-1", "Defn": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.", "Desc": "VNA Credit Reject", "EvSel": 52, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R3QPI.VNA_CREDITS_REJECT.DRS": { "Box": "R3QPI", "Category": "R3QPI LINK_VNA_CREDITS Events", "Counters": "0-1", "Defn": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.", "Desc": "VNA Credit Reject", "EvSel": 52, "ExtSel": "", "Umask": "bxxxx1xxx", }, "R3QPI.VNA_CREDITS_REJECT.SNP": { "Box": "R3QPI", "Category": "R3QPI LINK_VNA_CREDITS Events", "Counters": "0-1", "Defn": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.", "Desc": "VNA Credit Reject", "EvSel": 52, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R3QPI.VNA_CREDITS_REJECT.HOM": { "Box": "R3QPI", "Category": "R3QPI LINK_VNA_CREDITS Events", "Counters": "0-1", "Defn": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.", "Desc": "VNA Credit Reject", "EvSel": 52, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R3QPI.VNA_CREDITS_REJECT.NDR": { "Box": "R3QPI", "Category": "R3QPI LINK_VNA_CREDITS Events", "Counters": "0-1", "Defn": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.", "Desc": "VNA Credit Reject", "EvSel": 52, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R3QPI.VNA_CREDIT_CYCLES_OUT": { "Box": "R3QPI", "Category": "R3QPI LINK_VNA_CREDITS Events", "Counters": "0-1", "Defn": "Number of QPI uclk cycles when the transmitted has no VNA credits available and therefore cannot send any requests on this channel. Note that this does not mean that no flits can be transmitted, as those holding VN0 credits will still (potentially) be able to transmit. Generally it is the goal of the uncore that VNA credits should not run out, as this can substantially throttle back useful QPI bandwidth.", "Desc": "Cycles with no VNA credits available", "EvSel": 49, "ExtSel": "", }, "R3QPI.VNA_CREDIT_CYCLES_USED": { "Box": "R3QPI", "Category": "R3QPI LINK_VNA_CREDITS Events", "Counters": "0-1", "Defn": "Number of QPI uclk cycles with one or more VNA credits in use. This event can be used in conjunction with the VNA In-Use Accumulator to calculate the average number of used VNA credits.", "Desc": "Cycles with 1 or more VNA credits in use", "EvSel": 50, "ExtSel": "", }, # IRP: "IRP.ADDRESS_MATCH": { "Box": "IRP", "Category": "IRP ADDRESS_MATCH Events", "Counters": "0-1", "Defn": "Counts the number of times when an inbound write (from a device to memory or another device) had an address match with another request in the write cache.", "Desc": "Address Match (Conflict) Count", "EvSel": 23, "ExtSel": "", }, "IRP.ADDRESS_MATCH.MERGE_COUNT": { "Box": "IRP", "Category": "IRP ADDRESS_MATCH Events", "Counters": "0-1", "Defn": "Counts the number of times when an inbound write (from a device to memory or another device) had an address match with another request in the write cache.", "Desc": "Address Match (Conflict) Count", "EvSel": 23, "ExtSel": "", "Umask": "bxxxxxx1x", }, "IRP.ADDRESS_MATCH.STALL_COUNT": { "Box": "IRP", "Category": "IRP ADDRESS_MATCH Events", "Counters": "0-1", "Defn": "Counts the number of times when an inbound write (from a device to memory or another device) had an address match with another request in the write cache.", "Desc": "Address Match (Conflict) Count", "EvSel": 23, "ExtSel": "", "Umask": "bxxxxxxx1", }, "IRP.CACHE_ACK_PENDING_OCCUPANCY": { "Box": "IRP", "Category": "IRP WRITE_CACHE Events", "Counters": "0-1", "Defn": "Accumulates the number of writes that have acquired ownership but have not yet returned their data to the uncore. These writes are generally queued up in the switch trying to get to the head of their queues so that they can post their data. The queue occuapancy increments when the ACK is received, and decrements when either the data is returned OR a tickle is received and ownership is released. Note that a single tickle can result in multiple decrements.", "Desc": "Write Ack Pending Occupancy", "EvSel": 20, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, }, "IRP.CACHE_ACK_PENDING_OCCUPANCY.ANY": { "Box": "IRP", "Category": "IRP WRITE_CACHE Events", "Counters": "0-1", "Defn": "Accumulates the number of writes that have acquired ownership but have not yet returned their data to the uncore. These writes are generally queued up in the switch trying to get to the head of their queues so that they can post their data. The queue occuapancy increments when the ACK is received, and decrements when either the data is returned OR a tickle is received and ownership is released. Note that a single tickle can result in multiple decrements.", "Desc": "Write Ack Pending Occupancy", "EvSel": 20, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "b00000001", }, "IRP.CACHE_ACK_PENDING_OCCUPANCY.SOURCE": { "Box": "IRP", "Category": "IRP WRITE_CACHE Events", "Counters": "0-1", "Defn": "Accumulates the number of writes that have acquired ownership but have not yet returned their data to the uncore. These writes are generally queued up in the switch trying to get to the head of their queues so that they can post their data. The queue occuapancy increments when the ACK is received, and decrements when either the data is returned OR a tickle is received and ownership is released. Note that a single tickle can result in multiple decrements.", "Desc": "Write Ack Pending Occupancy", "EvSel": 20, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "b00000010", }, "IRP.CACHE_OWN_OCCUPANCY": { "Box": "IRP", "Category": "IRP WRITE_CACHE Events", "Counters": "0-1", "Defn": "Accumulates the number of writes (and write prefetches) that are outstanding in the uncore trying to acquire ownership in each cycle. This can be used with the write transaction count to calculate the average write latency in the uncore. The occupancy increments when a write request is issued, and decrements when the data is returned.", "Desc": "Outstanding Write Ownership Occupancy", "EvSel": 19, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, }, "IRP.CACHE_OWN_OCCUPANCY.ANY": { "Box": "IRP", "Category": "IRP WRITE_CACHE Events", "Counters": "0-1", "Defn": "Accumulates the number of writes (and write prefetches) that are outstanding in the uncore trying to acquire ownership in each cycle. This can be used with the write transaction count to calculate the average write latency in the uncore. The occupancy increments when a write request is issued, and decrements when the data is returned.", "Desc": "Outstanding Write Ownership Occupancy", "EvSel": 19, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "b00000001", }, "IRP.CACHE_OWN_OCCUPANCY.SOURCE": { "Box": "IRP", "Category": "IRP WRITE_CACHE Events", "Counters": "0-1", "Defn": "Accumulates the number of writes (and write prefetches) that are outstanding in the uncore trying to acquire ownership in each cycle. This can be used with the write transaction count to calculate the average write latency in the uncore. The occupancy increments when a write request is issued, and decrements when the data is returned.", "Desc": "Outstanding Write Ownership Occupancy", "EvSel": 19, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "b00000010", }, "IRP.CACHE_READ_OCCUPANCY": { "Box": "IRP", "Category": "IRP WRITE_CACHE Events", "Counters": "0-1", "Defn": "Accumulates the number of reads that are outstanding in the uncore in each cycle. This can be used with the read transaction count to calculate the average read latency in the uncore. The occupancy increments when a read request is issued, and decrements when the data is returned.", "Desc": "Outstanding Read Occupancy", "EvSel": 16, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, }, "IRP.CACHE_READ_OCCUPANCY.ANY": { "Box": "IRP", "Category": "IRP WRITE_CACHE Events", "Counters": "0-1", "Defn": "Accumulates the number of reads that are outstanding in the uncore in each cycle. This can be used with the read transaction count to calculate the average read latency in the uncore. The occupancy increments when a read request is issued, and decrements when the data is returned.", "Desc": "Outstanding Read Occupancy", "EvSel": 16, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "b00000001", }, "IRP.CACHE_READ_OCCUPANCY.SOURCE": { "Box": "IRP", "Category": "IRP WRITE_CACHE Events", "Counters": "0-1", "Defn": "Accumulates the number of reads that are outstanding in the uncore in each cycle. This can be used with the read transaction count to calculate the average read latency in the uncore. The occupancy increments when a read request is issued, and decrements when the data is returned.", "Desc": "Outstanding Read Occupancy", "EvSel": 16, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "b00000010", }, "IRP.CACHE_TOTAL_OCCUPANCY": { "Box": "IRP", "Category": "IRP WRITE_CACHE Events", "Counters": "0-1", "Defn": "Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.", "Desc": "Total Write Cache Occupancy", "EvSel": 18, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, }, "IRP.CACHE_TOTAL_OCCUPANCY.ANY": { "Box": "IRP", "Category": "IRP WRITE_CACHE Events", "Counters": "0-1", "Defn": "Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.", "Desc": "Total Write Cache Occupancy", "EvSel": 18, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "b00000001", }, "IRP.CACHE_TOTAL_OCCUPANCY.SOURCE": { "Box": "IRP", "Category": "IRP WRITE_CACHE Events", "Counters": "0-1", "Defn": "Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.", "Desc": "Total Write Cache Occupancy", "EvSel": 18, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "b00000010", }, "IRP.CACHE_WRITE_OCCUPANCY": { "Box": "IRP", "Category": "IRP WRITE_CACHE Events", "Counters": "0-1", "Defn": "Accumulates the number of writes (and write prefetches) that are outstanding in the uncore in each cycle. This can be used with the transaction count event to calculate the average latency in the uncore. The occupancy increments when the ownership fetch/prefetch is issued, and decrements the data is returned to the uncore.", "Desc": "Outstanding Write Occupancy", "EvSel": 17, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, }, "IRP.CACHE_WRITE_OCCUPANCY.ANY": { "Box": "IRP", "Category": "IRP WRITE_CACHE Events", "Counters": "0-1", "Defn": "Accumulates the number of writes (and write prefetches) that are outstanding in the uncore in each cycle. This can be used with the transaction count event to calculate the average latency in the uncore. The occupancy increments when the ownership fetch/prefetch is issued, and decrements the data is returned to the uncore.", "Desc": "Outstanding Write Occupancy", "EvSel": 17, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "b00000001", }, "IRP.CACHE_WRITE_OCCUPANCY.SOURCE": { "Box": "IRP", "Category": "IRP WRITE_CACHE Events", "Counters": "0-1", "Defn": "Accumulates the number of writes (and write prefetches) that are outstanding in the uncore in each cycle. This can be used with the transaction count event to calculate the average latency in the uncore. The occupancy increments when the ownership fetch/prefetch is issued, and decrements the data is returned to the uncore.", "Desc": "Outstanding Write Occupancy", "EvSel": 17, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "b00000010", }, "IRP.CLOCKTICKS": { "Box": "IRP", "Category": "IRP IO_CLKS Events", "Counters": "0-1", "Defn": "Number of clocks in the IRP.", "Desc": "Clocks in the IRP", "EvSel": 0, "ExtSel": "", }, "IRP.RxR_AK_CYCLES_FULL": { "Box": "IRP", "Category": "IRP AK_INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the AK Ingress is full. This queue is where the IRP receives responses from R2PCIe (the ring).", "EvSel": 11, "ExtSel": "", }, "IRP.RxR_AK_INSERTS": { "Box": "IRP", "Category": "IRP AK_INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the AK Ingress. This queue is where the IRP receives responses from R2PCIe (the ring).", "Desc": "AK Ingress Occupancy", "EvSel": 10, "ExtSel": "", }, "IRP.RxR_AK_OCCUPANCY": { "Box": "IRP", "Category": "IRP AK_INGRESS Events", "Counters": "0-1", "Defn": "Accumulates the occupancy of the AK Ingress in each cycles. This queue is where the IRP receives responses from R2PCIe (the ring).", "EvSel": 12, "ExtSel": "", "MaxIncCyc": 24, "SubCtr": 1, }, "IRP.RxR_BL_DRS_CYCLES_FULL": { "Box": "IRP", "Category": "IRP BL_INGRESS_DRS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.", "EvSel": 4, "ExtSel": "", }, "IRP.RxR_BL_DRS_INSERTS": { "Box": "IRP", "Category": "IRP BL_INGRESS_DRS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.", "Desc": "BL Ingress Occupancy - DRS", "EvSel": 1, "ExtSel": "", }, "IRP.RxR_BL_DRS_OCCUPANCY": { "Box": "IRP", "Category": "IRP BL_INGRESS_DRS Events", "Counters": "0-1", "Defn": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.", "EvSel": 7, "ExtSel": "", "MaxIncCyc": 24, "SubCtr": 1, }, "IRP.RxR_BL_NCB_CYCLES_FULL": { "Box": "IRP", "Category": "IRP BL_INGRESS_NCB Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.", "EvSel": 5, "ExtSel": "", }, "IRP.RxR_BL_NCB_INSERTS": { "Box": "IRP", "Category": "IRP BL_INGRESS_NCB Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.", "Desc": "BL Ingress Occupancy - NCB", "EvSel": 2, "ExtSel": "", }, "IRP.RxR_BL_NCB_OCCUPANCY": { "Box": "IRP", "Category": "IRP BL_INGRESS_NCB Events", "Counters": "0-1", "Defn": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.", "EvSel": 8, "ExtSel": "", "MaxIncCyc": 24, "SubCtr": 1, }, "IRP.RxR_BL_NCS_CYCLES_FULL": { "Box": "IRP", "Category": "IRP BL_INGRESS_NCS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.", "EvSel": 6, "ExtSel": "", }, "IRP.RxR_BL_NCS_INSERTS": { "Box": "IRP", "Category": "IRP BL_INGRESS_NCS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.", "Desc": "BL Ingress Occupancy - NCS", "EvSel": 3, "ExtSel": "", }, "IRP.RxR_BL_NCS_OCCUPANCY": { "Box": "IRP", "Category": "IRP BL_INGRESS_NCS Events", "Counters": "0-1", "Defn": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.", "EvSel": 9, "ExtSel": "", "MaxIncCyc": 24, "SubCtr": 1, }, "IRP.TICKLES": { "Box": "IRP", "Category": "IRP TICKLES Events", "Counters": "0-1", "Defn": "Counts the number of tickles that are received. This is for both explicit (from Cbo) and implicit (internal conflict) tickles.", "Desc": "Tickle Count", "EvSel": 22, "ExtSel": "", }, "IRP.TICKLES.TOP_OF_QUEUE": { "Box": "IRP", "Category": "IRP TICKLES Events", "Counters": "0-1", "Defn": "Counts the number of tickles that are received. This is for both explicit (from Cbo) and implicit (internal conflict) tickles.", "Desc": "Tickle Count", "EvSel": 22, "ExtSel": "", "Umask": "bxxxxxx1x", }, "IRP.TICKLES.LOST_OWNERSHIP": { "Box": "IRP", "Category": "IRP TICKLES Events", "Counters": "0-1", "Defn": "Counts the number of tickles that are received. This is for both explicit (from Cbo) and implicit (internal conflict) tickles.", "Desc": "Tickle Count", "EvSel": 22, "ExtSel": "", "Umask": "bxxxxxxx1", }, "IRP.TRANSACTIONS": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Defn": "Counts the number of \"Inbound\" transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.", "Desc": "Inbound Transaction Count", "EvSel": 21, "ExtSel": "", }, "IRP.TRANSACTIONS.ORDERINGQ": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Defn": "Counts the number of \"Inbound\" transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.", "Desc": "Inbound Transaction Count", "EvSel": 21, "ExtSel": "", "Umask": "bxxxx1xxx", }, "IRP.TRANSACTIONS.RD_PREFETCHES": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Defn": "Counts the number of \"Inbound\" transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.", "Desc": "Inbound Transaction Count", "EvSel": 21, "ExtSel": "", "Umask": "bxxxxx1xx", }, "IRP.TRANSACTIONS.READS": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Defn": "Counts the number of \"Inbound\" transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.", "Desc": "Inbound Transaction Count", "EvSel": 21, "ExtSel": "", "Umask": "bxxxxxxx1", }, "IRP.TRANSACTIONS.WRITES": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Defn": "Counts the number of \"Inbound\" transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.", "Desc": "Inbound Transaction Count", "EvSel": 21, "ExtSel": "", "Umask": "bxxxxxx1x", }, "IRP.TxR_AD_STALL_CREDIT_CYCLES": { "Box": "IRP", "Category": "IRP STALL_CYCLES Events", "Counters": "0-1", "Defn": "Counts the number times when it is not possible to issue a request to the R2PCIe because there are no AD Egress Credits available.", "Desc": "No AD Egress Credit Stalls", "EvSel": 24, "ExtSel": "", }, "IRP.TxR_BL_STALL_CREDIT_CYCLES": { "Box": "IRP", "Category": "IRP STALL_CYCLES Events", "Counters": "0-1", "Defn": "Counts the number times when it is not possible to issue data to the R2PCIe because there are no BL Egress Credits available.", "Desc": "No BL Egress Credit Stalls", "EvSel": 25, "ExtSel": "", }, "IRP.TxR_DATA_INSERTS_NCB": { "Box": "IRP", "Category": "IRP OUTBOUND_REQUESTS Events", "Counters": "0-1", "Defn": "Counts the number of requests issued to the switch (towards the devices).", "Desc": "Outbound Read Requests", "EvSel": 14, "ExtSel": "", }, "IRP.TxR_DATA_INSERTS_NCS": { "Box": "IRP", "Category": "IRP OUTBOUND_REQUESTS Events", "Counters": "0-1", "Defn": "Counts the number of requests issued to the switch (towards the devices).", "Desc": "Outbound Read Requests", "EvSel": 15, "ExtSel": "", }, "IRP.TxR_REQUEST_OCCUPANCY": { "Box": "IRP", "Category": "IRP OUTBOUND_REQUESTS Events", "Counters": "0-1", "Defn": "Accumultes the number of outstanding outbound requests from the IRP to the switch (towards the devices). This can be used in conjuection with the allocations event in order to calculate average latency of outbound requests.", "Desc": "Outbound Request Queue Occupancy", "EvSel": 13, "ExtSel": "", "SubCtr": 1, }, "IRP.WRITE_ORDERING_STALL_CYCLES": { "Box": "IRP", "Category": "IRP STALL_CYCLES Events", "Counters": "0-1", "Defn": "Counts the number of cycles when there are pending write ACK's in the switch but the switch->IRP pipeline is not utilized.", "Desc": "Write Ordering Stalls", "EvSel": 26, "ExtSel": "", "Notes": "How are we handling this? What are we doing it we have outstanding ACKs but are using the Switch->IRP path for something else (like new requests?)", }, # CBO: "CBO.CLOCKTICKS": { "Box": "CBO", "Category": "CBO UCLK Events", "Counters": "0-3", "Desc": "Uncore Clocks", "EvSel": 0, "ExtSel": "", }, "CBO.COUNTER0_OCCUPANCY": { "Box": "CBO", "Category": "CBO OCCUPANCY Events", "Counters": "1-3", "Defn": "Since occupancy counts can only be captured in the Cbo's 0 counter, this event allows a user to capture occupancy related information by filtering the Cb0 occupancy count captured in Counter 0. The filtering available is found in the control register - threshold, invert and edge detect. E.g. setting threshold to 1 can effectively monitor how many cycles the monitored queue has an entry.", "Desc": "Counter 0 Occupancy", "EvSel": 31, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, }, "CBO.LLC_LOOKUP": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-1", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set filter mask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:17] bits correspond to [M'FMESI] state.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "Bit 0 of the umask must always be set for this event. This allows us to match a given state (or states). The state is programmed in Cn_MSR_PMON_BOX_FILTER.state. The state field is a bit mask, so you can select (and monitor) multiple states at a time. 0 = I (miss), 1 = S, 2 = E, 3 = M, 4 = F. For example, if you wanted to monitor F and S hits, you could set 10010b in the 5-bit state field. To monitor any lookup, set the field to 0x1F.", }, "CBO.LLC_LOOKUP.DATA_READ": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-1", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set filter mask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:17] bits correspond to [M'FMESI] state.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "Bit 0 of the umask must always be set for this event. This allows us to match a given state (or states). The state is programmed in Cn_MSR_PMON_BOX_FILTER.state. The state field is a bit mask, so you can select (and monitor) multiple states at a time. 0 = I (miss), 1 = S, 2 = E, 3 = M, 4 = F. For example, if you wanted to monitor F and S hits, you could set 10010b in the 5-bit state field. To monitor any lookup, set the field to 0x1F.", "Umask": "b00000011", }, "CBO.LLC_LOOKUP.REMOTE_SNOOP": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-1", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set filter mask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:17] bits correspond to [M'FMESI] state.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "Bit 0 of the umask must always be set for this event. This allows us to match a given state (or states). The state is programmed in Cn_MSR_PMON_BOX_FILTER.state. The state field is a bit mask, so you can select (and monitor) multiple states at a time. 0 = I (miss), 1 = S, 2 = E, 3 = M, 4 = F. For example, if you wanted to monitor F and S hits, you could set 10010b in the 5-bit state field. To monitor any lookup, set the field to 0x1F.", "Umask": "b00001001", }, "CBO.LLC_LOOKUP.WRITE": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-1", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set filter mask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:17] bits correspond to [M'FMESI] state.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "Bit 0 of the umask must always be set for this event. This allows us to match a given state (or states). The state is programmed in Cn_MSR_PMON_BOX_FILTER.state. The state field is a bit mask, so you can select (and monitor) multiple states at a time. 0 = I (miss), 1 = S, 2 = E, 3 = M, 4 = F. For example, if you wanted to monitor F and S hits, you could set 10010b in the 5-bit state field. To monitor any lookup, set the field to 0x1F.", "Umask": "b00000101", }, "CBO.LLC_LOOKUP.ANY": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-1", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set filter mask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:17] bits correspond to [M'FMESI] state.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "Bit 0 of the umask must always be set for this event. This allows us to match a given state (or states). The state is programmed in Cn_MSR_PMON_BOX_FILTER.state. The state field is a bit mask, so you can select (and monitor) multiple states at a time. 0 = I (miss), 1 = S, 2 = E, 3 = M, 4 = F. For example, if you wanted to monitor F and S hits, you could set 10010b in the 5-bit state field. To monitor any lookup, set the field to 0x1F.", "Umask": "b00010001", }, "CBO.LLC_LOOKUP.NID": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-1", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set filter mask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:17] bits correspond to [M'FMESI] state.", "Desc": "Cache Lookups", "EvSel": 52, "ExtSel": "", "Notes": "Bit 0 of the umask must always be set for this event. This allows us to match a given state (or states). The state is programmed in Cn_MSR_PMON_BOX_FILTER.state. The state field is a bit mask, so you can select (and monitor) multiple states at a time. 0 = I (miss), 1 = S, 2 = E, 3 = M, 4 = F. For example, if you wanted to monitor F and S hits, you could set 10010b in the 5-bit state field. To monitor any lookup, set the field to 0x1F.", "Umask": "b01000001", }, "CBO.LLC_VICTIMS": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-1", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", }, "CBO.LLC_VICTIMS.MISS": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-1", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CBO.LLC_VICTIMS.NID": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-1", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CBO.LLC_VICTIMS.S_STATE": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-1", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CBO.LLC_VICTIMS.E_STATE": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-1", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CBO.LLC_VICTIMS.M_STATE": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-1", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CBO.MISC": { "Box": "CBO", "Category": "CBO MISC Events", "Counters": "0-1", "Defn": "Miscellaneous events in the Cbo.", "Desc": "Cbo Misc", "EvSel": 57, "ExtSel": "", }, "CBO.MISC.RFO_HIT_S": { "Box": "CBO", "Category": "CBO MISC Events", "Counters": "0-1", "Defn": "Miscellaneous events in the Cbo.", "Desc": "Cbo Misc", "EvSel": 57, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CBO.MISC.RSPI_WAS_FSE": { "Box": "CBO", "Category": "CBO MISC Events", "Counters": "0-1", "Defn": "Miscellaneous events in the Cbo.", "Desc": "Cbo Misc", "EvSel": 57, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CBO.MISC.STARTED": { "Box": "CBO", "Category": "CBO MISC Events", "Counters": "0-1", "Defn": "Miscellaneous events in the Cbo.", "Desc": "Cbo Misc", "EvSel": 57, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CBO.MISC.WC_ALIASING": { "Box": "CBO", "Category": "CBO MISC Events", "Counters": "0-1", "Defn": "Miscellaneous events in the Cbo.", "Desc": "Cbo Misc", "EvSel": 57, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CBO.RING_AD_USED": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) UP_EVEN is actually UP_VR0_EVEN+UP_VR1_EVEN (similarly for ODD/DN). In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction. T", }, "CBO.RING_AD_USED.UP_VR0_EVEN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) UP_EVEN is actually UP_VR0_EVEN+UP_VR1_EVEN (similarly for ODD/DN). In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction. T", "Umask": "bxxxxxxx1", }, "CBO.RING_AD_USED.DOWN_VR0_EVEN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) UP_EVEN is actually UP_VR0_EVEN+UP_VR1_EVEN (similarly for ODD/DN). In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction. T", "Umask": "bxxxxx1xx", }, "CBO.RING_AD_USED.CCW": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) UP_EVEN is actually UP_VR0_EVEN+UP_VR1_EVEN (similarly for ODD/DN). In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction. T", "Umask": "b00001100", }, "CBO.RING_AD_USED.UP": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) UP_EVEN is actually UP_VR0_EVEN+UP_VR1_EVEN (similarly for ODD/DN). In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction. T", "Umask": "b00110011", }, "CBO.RING_AD_USED.DOWN_VR1_ODD": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) UP_EVEN is actually UP_VR0_EVEN+UP_VR1_EVEN (similarly for ODD/DN). In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction. T", "Umask": "b1xxxxxxx", }, "CBO.RING_AD_USED.UP_VR0_ODD": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) UP_EVEN is actually UP_VR0_EVEN+UP_VR1_EVEN (similarly for ODD/DN). In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction. T", "Umask": "bxxxxxx1x", }, "CBO.RING_AD_USED.DOWN_VR1_EVEN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) UP_EVEN is actually UP_VR0_EVEN+UP_VR1_EVEN (similarly for ODD/DN). In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction. T", "Umask": "bx1xxxxxx", }, "CBO.RING_AD_USED.UP_VR1_ODD": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) UP_EVEN is actually UP_VR0_EVEN+UP_VR1_EVEN (similarly for ODD/DN). In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction. T", "Umask": "bxx1xxxxx", }, "CBO.RING_AD_USED.DOWN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) UP_EVEN is actually UP_VR0_EVEN+UP_VR1_EVEN (similarly for ODD/DN). In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction. T", "Umask": "b11001100", }, "CBO.RING_AD_USED.CW": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) UP_EVEN is actually UP_VR0_EVEN+UP_VR1_EVEN (similarly for ODD/DN). In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction. T", "Umask": "b00000011", }, "CBO.RING_AD_USED.UP_VR1_EVEN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) UP_EVEN is actually UP_VR0_EVEN+UP_VR1_EVEN (similarly for ODD/DN). In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction. T", "Umask": "bxxx1xxxx", }, "CBO.RING_AD_USED.DOWN_VR0_ODD": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) UP_EVEN is actually UP_VR0_EVEN+UP_VR1_EVEN (similarly for ODD/DN). In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction. T", "Umask": "bxxxx1xxx", }, "CBO.RING_AK_USED": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) UP_EVEN is actually UP_VR0_EVEN+UP_VR1_EVEN (similarly for ODD/DN). In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction. T", }, "CBO.RING_AK_USED.UP_VR0_EVEN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) UP_EVEN is actually UP_VR0_EVEN+UP_VR1_EVEN (similarly for ODD/DN). In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction. T", "Umask": "bxxxxxxx1", }, "CBO.RING_AK_USED.DOWN_VR0_EVEN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) UP_EVEN is actually UP_VR0_EVEN+UP_VR1_EVEN (similarly for ODD/DN). In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction. T", "Umask": "bxxxxx1xx", }, "CBO.RING_AK_USED.CCW": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) UP_EVEN is actually UP_VR0_EVEN+UP_VR1_EVEN (similarly for ODD/DN). In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction. T", "Umask": "b00001100", }, "CBO.RING_AK_USED.UP": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) UP_EVEN is actually UP_VR0_EVEN+UP_VR1_EVEN (similarly for ODD/DN). In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction. T", "Umask": "b00110011", }, "CBO.RING_AK_USED.DOWN_VR1_ODD": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) UP_EVEN is actually UP_VR0_EVEN+UP_VR1_EVEN (similarly for ODD/DN). In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction. T", "Umask": "b1xxxxxxx", }, "CBO.RING_AK_USED.UP_VR0_ODD": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) UP_EVEN is actually UP_VR0_EVEN+UP_VR1_EVEN (similarly for ODD/DN). In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction. T", "Umask": "bxxxxxx1x", }, "CBO.RING_AK_USED.DOWN_VR1_EVEN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) UP_EVEN is actually UP_VR0_EVEN+UP_VR1_EVEN (similarly for ODD/DN). In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction. T", "Umask": "bx1xxxxxx", }, "CBO.RING_AK_USED.UP_VR1_ODD": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) UP_EVEN is actually UP_VR0_EVEN+UP_VR1_EVEN (similarly for ODD/DN). In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction. T", "Umask": "bxx1xxxxx", }, "CBO.RING_AK_USED.DOWN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) UP_EVEN is actually UP_VR0_EVEN+UP_VR1_EVEN (similarly for ODD/DN). In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction. T", "Umask": "b11001100", }, "CBO.RING_AK_USED.CW": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) UP_EVEN is actually UP_VR0_EVEN+UP_VR1_EVEN (similarly for ODD/DN). In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction. T", "Umask": "b00000011", }, "CBO.RING_AK_USED.UP_VR1_EVEN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) UP_EVEN is actually UP_VR0_EVEN+UP_VR1_EVEN (similarly for ODD/DN). In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction. T", "Umask": "bxxx1xxxx", }, "CBO.RING_AK_USED.DOWN_VR0_ODD": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) UP_EVEN is actually UP_VR0_EVEN+UP_VR1_EVEN (similarly for ODD/DN). In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction. T", "Umask": "bxxxx1xxx", }, "CBO.RING_BL_USED": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) UP_EVEN is actually UP_VR0_EVEN+UP_VR1_EVEN (similarly for ODD/DN). In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction. T", }, "CBO.RING_BL_USED.UP_VR0_EVEN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) UP_EVEN is actually UP_VR0_EVEN+UP_VR1_EVEN (similarly for ODD/DN). In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction. T", "Umask": "bxxxxxxx1", }, "CBO.RING_BL_USED.DOWN_VR0_EVEN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) UP_EVEN is actually UP_VR0_EVEN+UP_VR1_EVEN (similarly for ODD/DN). In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction. T", "Umask": "bxxxxx1xx", }, "CBO.RING_BL_USED.CCW": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) UP_EVEN is actually UP_VR0_EVEN+UP_VR1_EVEN (similarly for ODD/DN). In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction. T", "Umask": "b00001100", }, "CBO.RING_BL_USED.UP": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) UP_EVEN is actually UP_VR0_EVEN+UP_VR1_EVEN (similarly for ODD/DN). In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction. T", "Umask": "b00110011", }, "CBO.RING_BL_USED.DOWN_VR1_ODD": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) UP_EVEN is actually UP_VR0_EVEN+UP_VR1_EVEN (similarly for ODD/DN). In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction. T", "Umask": "b1xxxxxxx", }, "CBO.RING_BL_USED.UP_VR0_ODD": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) UP_EVEN is actually UP_VR0_EVEN+UP_VR1_EVEN (similarly for ODD/DN). In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction. T", "Umask": "bxxxxxx1x", }, "CBO.RING_BL_USED.DOWN_VR1_EVEN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) UP_EVEN is actually UP_VR0_EVEN+UP_VR1_EVEN (similarly for ODD/DN). In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction. T", "Umask": "bx1xxxxxx", }, "CBO.RING_BL_USED.UP_VR1_ODD": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) UP_EVEN is actually UP_VR0_EVEN+UP_VR1_EVEN (similarly for ODD/DN). In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction. T", "Umask": "bxx1xxxxx", }, "CBO.RING_BL_USED.DOWN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) UP_EVEN is actually UP_VR0_EVEN+UP_VR1_EVEN (similarly for ODD/DN). In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction. T", "Umask": "b11001100", }, "CBO.RING_BL_USED.CW": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) UP_EVEN is actually UP_VR0_EVEN+UP_VR1_EVEN (similarly for ODD/DN). In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction. T", "Umask": "b00000011", }, "CBO.RING_BL_USED.UP_VR1_EVEN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) UP_EVEN is actually UP_VR0_EVEN+UP_VR1_EVEN (similarly for ODD/DN). In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction. T", "Umask": "bxxx1xxxx", }, "CBO.RING_BL_USED.DOWN_VR0_ODD": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) UP_EVEN is actually UP_VR0_EVEN+UP_VR1_EVEN (similarly for ODD/DN). In any cycle, a ring stop can see up to one packet moving in the UP direction and one packet moving in the DN direction. T", "Umask": "bxxxx1xxx", }, "CBO.RING_BOUNCES": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-1", "Desc": "Number of LLC responses that bounced on the Ring.", "EvSel": 5, "ExtSel": "", }, "CBO.RING_BOUNCES.AD_IRQ": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-1", "Desc": "Number of LLC responses that bounced on the Ring.", "EvSel": 5, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CBO.RING_BOUNCES.IV": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-1", "Desc": "Number of LLC responses that bounced on the Ring.", "EvSel": 5, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CBO.RING_BOUNCES.AK": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-1", "Desc": "Number of LLC responses that bounced on the Ring.", "EvSel": 5, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CBO.RING_BOUNCES.BL": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-1", "Desc": "Number of LLC responses that bounced on the Ring.", "EvSel": 5, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CBO.RING_IV_USED": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "IV Ring in Use", "EvSel": 30, "ExtSel": "", }, "CBO.RING_IV_USED.UP": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "IV Ring in Use", "EvSel": 30, "ExtSel": "", "Umask": "b00110011", }, "CBO.RING_IV_USED.ANY": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "IV Ring in Use", "EvSel": 30, "ExtSel": "", "Umask": "b00001111", }, "CBO.RING_IV_USED.DOWN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "IV Ring in Use", "EvSel": 30, "ExtSel": "", "Umask": "b11001100", }, "CBO.RING_SRC_THRTL": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-1", "EvSel": 7, "ExtSel": "", }, "CBO.RxR_EXT_STARVED": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-1", "Defn": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.", "Desc": "Ingress Arbiter Blocking Cycles", "EvSel": 18, "ExtSel": "", }, "CBO.RxR_EXT_STARVED.PRQ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-1", "Defn": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.", "Desc": "Ingress Arbiter Blocking Cycles", "EvSel": 18, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CBO.RxR_EXT_STARVED.IPQ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-1", "Defn": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.", "Desc": "Ingress Arbiter Blocking Cycles", "EvSel": 18, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CBO.RxR_EXT_STARVED.ISMQ_BIDS": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-1", "Defn": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.", "Desc": "Ingress Arbiter Blocking Cycles", "EvSel": 18, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CBO.RxR_EXT_STARVED.IRQ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-1", "Defn": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.", "Desc": "Ingress Arbiter Blocking Cycles", "EvSel": 18, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CBO.RxR_INSERTS": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-1", "Defn": "Counts number of allocations per cycle into the specified Ingress queue.", "Desc": "Ingress Allocations", "EvSel": 19, "ExtSel": "", "Notes": "IRQ_REJ should not be Ored with the other umasks.", }, "CBO.RxR_INSERTS.VFIFO": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-1", "Defn": "Counts number of allocations per cycle into the specified Ingress queue.", "Desc": "Ingress Allocations", "EvSel": 19, "ExtSel": "", "Notes": "IRQ_REJ should not be Ored with the other umasks.", "Umask": "bxxx1xxxx", }, "CBO.RxR_INSERTS.IPQ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-1", "Defn": "Counts number of allocations per cycle into the specified Ingress queue.", "Desc": "Ingress Allocations", "EvSel": 19, "ExtSel": "", "Notes": "IRQ_REJ should not be Ored with the other umasks.", "Umask": "bxxxxx1xx", }, "CBO.RxR_INSERTS.IRQ_REJ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-1", "Defn": "Counts number of allocations per cycle into the specified Ingress queue.", "Desc": "Ingress Allocations", "EvSel": 19, "ExtSel": "", "Notes": "IRQ_REJ should not be Ored with the other umasks.", "Umask": "bxxxxxx1x", }, "CBO.RxR_INSERTS.IRQ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-1", "Defn": "Counts number of allocations per cycle into the specified Ingress queue.", "Desc": "Ingress Allocations", "EvSel": 19, "ExtSel": "", "Notes": "IRQ_REJ should not be Ored with the other umasks.", "Umask": "bxxxxxxx1", }, "CBO.RxR_IPQ_RETRY": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-1", "Defn": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.", "Desc": "Probe Queue Retries", "EvSel": 49, "ExtSel": "", }, "CBO.RxR_IPQ_RETRY.QPI_CREDITS": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-1", "Defn": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.", "Desc": "Probe Queue Retries", "EvSel": 49, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CBO.RxR_IPQ_RETRY.ADDR_CONFLICT": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-1", "Defn": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.", "Desc": "Probe Queue Retries", "EvSel": 49, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CBO.RxR_IPQ_RETRY.ANY": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-1", "Defn": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.", "Desc": "Probe Queue Retries", "EvSel": 49, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CBO.RxR_IPQ_RETRY.FULL": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-1", "Defn": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.", "Desc": "Probe Queue Retries", "EvSel": 49, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CBO.RxR_IRQ_RETRY": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-1", "Desc": "Ingress Request Queue Rejects", "EvSel": 50, "ExtSel": "", }, "CBO.RxR_IRQ_RETRY.RTID": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-1", "Desc": "Ingress Request Queue Rejects", "EvSel": 50, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CBO.RxR_IRQ_RETRY.QPI_CREDITS": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-1", "Desc": "Ingress Request Queue Rejects", "EvSel": 50, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CBO.RxR_IRQ_RETRY.ADDR_CONFLICT": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-1", "Desc": "Ingress Request Queue Rejects", "EvSel": 50, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CBO.RxR_IRQ_RETRY.ANY": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-1", "Desc": "Ingress Request Queue Rejects", "EvSel": 50, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CBO.RxR_IRQ_RETRY.IIO_CREDITS": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-1", "Desc": "Ingress Request Queue Rejects", "EvSel": 50, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CBO.RxR_IRQ_RETRY.FULL": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-1", "Desc": "Ingress Request Queue Rejects", "EvSel": 50, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CBO.RxR_ISMQ_RETRY": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-1", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 51, "ExtSel": "", }, "CBO.RxR_ISMQ_RETRY.WB_CREDITS": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-1", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 51, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CBO.RxR_ISMQ_RETRY.RTID": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-1", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 51, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CBO.RxR_ISMQ_RETRY.QPI_CREDITS": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-1", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 51, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CBO.RxR_ISMQ_RETRY.ANY": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-1", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 51, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CBO.RxR_ISMQ_RETRY.IIO_CREDITS": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-1", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 51, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CBO.RxR_ISMQ_RETRY.FULL": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-1", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 51, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CBO.RxR_OCCUPANCY": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": 0, "Defn": "Counts number of entries in the specified Ingress queue in each cycle.", "Desc": "Ingress Occupancy", "EvSel": 17, "ExtSel": "", "MaxIncCyc": 20, "Notes": "IRQ_REJ should not be Ored with the other umasks.", "SubCtr": 1, }, "CBO.RxR_OCCUPANCY.VFIFO": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": 0, "Defn": "Counts number of entries in the specified Ingress queue in each cycle.", "Desc": "Ingress Occupancy", "EvSel": 17, "ExtSel": "", "MaxIncCyc": 20, "Notes": "IRQ_REJ should not be Ored with the other umasks.", "SubCtr": 1, "Umask": "b00010000", }, "CBO.RxR_OCCUPANCY.IPQ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": 0, "Defn": "Counts number of entries in the specified Ingress queue in each cycle.", "Desc": "Ingress Occupancy", "EvSel": 17, "ExtSel": "", "MaxIncCyc": 20, "Notes": "IRQ_REJ should not be Ored with the other umasks.", "SubCtr": 1, "Umask": "b00000100", }, "CBO.RxR_OCCUPANCY.IRQ_REJ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": 0, "Defn": "Counts number of entries in the specified Ingress queue in each cycle.", "Desc": "Ingress Occupancy", "EvSel": 17, "ExtSel": "", "MaxIncCyc": 20, "Notes": "IRQ_REJ should not be Ored with the other umasks.", "SubCtr": 1, "Umask": "b00000010", }, "CBO.RxR_OCCUPANCY.IRQ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": 0, "Defn": "Counts number of entries in the specified Ingress queue in each cycle.", "Desc": "Ingress Occupancy", "EvSel": 17, "ExtSel": "", "MaxIncCyc": 20, "Notes": "IRQ_REJ should not be Ored with the other umasks.", "SubCtr": 1, "Umask": "b00000001", }, "CBO.TOR_INSERTS": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-1", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", }, "CBO.TOR_INSERTS.MISS_LOCAL_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-1", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b00100011", }, "CBO.TOR_INSERTS.NID_EVICTION": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-1", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b01000100", }, "CBO.TOR_INSERTS.LOCAL_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-1", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b00100001", }, "CBO.TOR_INSERTS.EVICTION": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-1", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b00000100", }, "CBO.TOR_INSERTS.MISS_REMOTE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-1", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b10001010", }, "CBO.TOR_INSERTS.LOCAL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-1", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b00101000", }, "CBO.TOR_INSERTS.MISS_REMOTE_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-1", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b10000011", }, "CBO.TOR_INSERTS.ALL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-1", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b00001000", }, "CBO.TOR_INSERTS.MISS_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-1", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b00000011", }, "CBO.TOR_INSERTS.NID_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-1", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b01000001", }, "CBO.TOR_INSERTS.NID_MISS_ALL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-1", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b01001010", }, "CBO.TOR_INSERTS.NID_ALL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-1", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b01001000", }, "CBO.TOR_INSERTS.REMOTE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-1", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b10001000", }, "CBO.TOR_INSERTS.NID_MISS_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-1", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b01000011", }, "CBO.TOR_INSERTS.MISS_LOCAL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-1", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b00101010", }, "CBO.TOR_INSERTS.REMOTE_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-1", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b10000001", }, "CBO.TOR_INSERTS.WB": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-1", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b00010000", }, "CBO.TOR_INSERTS.OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-1", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b00000001", }, "CBO.TOR_INSERTS.NID_WB": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-1", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Umask": "b01010000", }, "CBO.TOR_OCCUPANCY": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, }, "CBO.TOR_OCCUPANCY.MISS_LOCAL_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b00100011", }, "CBO.TOR_OCCUPANCY.NID_EVICTION": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b01000100", }, "CBO.TOR_OCCUPANCY.LOCAL_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b00100001", }, "CBO.TOR_OCCUPANCY.EVICTION": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b00000100", }, "CBO.TOR_OCCUPANCY.MISS_REMOTE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b10001010", }, "CBO.TOR_OCCUPANCY.MISS_ALL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b00001010", }, "CBO.TOR_OCCUPANCY.LOCAL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b00101000", }, "CBO.TOR_OCCUPANCY.MISS_REMOTE_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b10000011", }, "CBO.TOR_OCCUPANCY.ALL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b00001000", }, "CBO.TOR_OCCUPANCY.MISS_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b00000011", }, "CBO.TOR_OCCUPANCY.NID_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b01000001", }, "CBO.TOR_OCCUPANCY.NID_MISS_ALL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b01001010", }, "CBO.TOR_OCCUPANCY.NID_ALL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b01001000", }, "CBO.TOR_OCCUPANCY.REMOTE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b10001000", }, "CBO.TOR_OCCUPANCY.NID_MISS_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b01000011", }, "CBO.TOR_OCCUPANCY.MISS_LOCAL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b00101010", }, "CBO.TOR_OCCUPANCY.WB": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b00010000", }, "CBO.TOR_OCCUPANCY.REMOTE_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b10000001", }, "CBO.TOR_OCCUPANCY.NID_WB": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b01010000", }, "CBO.TOR_OCCUPANCY.OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b00000001", }, "CBO.TxR_ADS_USED": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-1", "EvSel": 4, "ExtSel": "", }, "CBO.TxR_ADS_USED.AK": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-1", "EvSel": 4, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CBO.TxR_ADS_USED.BL": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-1", "EvSel": 4, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CBO.TxR_ADS_USED.AD": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-1", "EvSel": 4, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CBO.TxR_INSERTS": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-1", "Defn": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "ExtSel": "", }, "CBO.TxR_INSERTS.BL_CACHE": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-1", "Defn": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CBO.TxR_INSERTS.AK_CORE": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-1", "Defn": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CBO.TxR_INSERTS.AD_CORE": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-1", "Defn": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CBO.TxR_INSERTS.IV_CACHE": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-1", "Defn": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CBO.TxR_INSERTS.BL_CORE": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-1", "Defn": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CBO.TxR_INSERTS.AK_CACHE": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-1", "Defn": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CBO.TxR_INSERTS.AD_CACHE": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-1", "Defn": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "ExtSel": "", "Umask": "bxxxxxxx1", }, # HA: "HA.ADDR_OPC_MATCH": { "Box": "HA", "Category": "HA ADDR_OPCODE_MATCH Events", "Counters": "0-3", "Desc": "QPI Address/Opcode Match", "EvSel": 32, "ExtSel": "", }, "HA.ADDR_OPC_MATCH.FILT": { "Box": "HA", "Category": "HA ADDR_OPCODE_MATCH Events", "Counters": "0-3", "Desc": "QPI Address/Opcode Match", "EvSel": 32, "ExtSel": "", "Umask": "b00000011", }, "HA.ADDR_OPC_MATCH.AK": { "Box": "HA", "Category": "HA ADDR_OPCODE_MATCH Events", "Counters": "0-3", "Desc": "QPI Address/Opcode Match", "EvSel": 32, "ExtSel": "", "Umask": "bxxx1xxxx", }, "HA.ADDR_OPC_MATCH.BL": { "Box": "HA", "Category": "HA ADDR_OPCODE_MATCH Events", "Counters": "0-3", "Desc": "QPI Address/Opcode Match", "EvSel": 32, "ExtSel": "", "Umask": "bxxxx1xxx", }, "HA.ADDR_OPC_MATCH.OPC": { "Box": "HA", "Category": "HA ADDR_OPCODE_MATCH Events", "Counters": "0-3", "Desc": "QPI Address/Opcode Match", "EvSel": 32, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.ADDR_OPC_MATCH.ADDR": { "Box": "HA", "Category": "HA ADDR_OPCODE_MATCH Events", "Counters": "0-3", "Desc": "QPI Address/Opcode Match", "EvSel": 32, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.ADDR_OPC_MATCH.AD": { "Box": "HA", "Category": "HA ADDR_OPCODE_MATCH Events", "Counters": "0-3", "Desc": "QPI Address/Opcode Match", "EvSel": 32, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.BT_BYPASS": { "Box": "HA", "Category": "HA BT (Backup Tracker) Events", "Counters": "0-3", "Defn": "Number of transactions that bypass the BT (fifo) to HT", "Desc": "BT Bypass", "EvSel": 82, "ExtSel": "", }, "HA.BT_CYCLES_NE": { "Box": "HA", "Category": "HA BT (Backup Tracker) Events", "Counters": "0-3", "Defn": "Cycles the Backup Tracker (BT) is not empty. The BT is the actual HOM tracker in IVT.", "Desc": "BT Cycles Not Empty", "EvSel": 66, "ExtSel": "", "Notes": "Will not count case HT is empty and a Bypass happens.", }, "HA.BT_OCCUPANCY": { "Box": "HA", "Category": "HA BT (Backup Tracker) Events", "Counters": "0-3", "Defn": "Accumulates the occupancy of the HA BT pool in every cycle. This can be used with the \"not empty\" stat to calculate average queue occupancy or the \"allocations\" stat in order to calculate average queue latency. HA BTs are allocated as soon as a request enters the HA and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "BT Occupancy", "EvSel": 67, "ExtSel": "", "MaxIncCyc": 512, }, "HA.BT_OCCUPANCY.WRITES_LOCAL": { "Box": "HA", "Category": "HA BT (Backup Tracker) Events", "Counters": "0-3", "Defn": "Accumulates the occupancy of the HA BT pool in every cycle. This can be used with the \"not empty\" stat to calculate average queue occupancy or the \"allocations\" stat in order to calculate average queue latency. HA BTs are allocated as soon as a request enters the HA and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "BT Occupancy", "EvSel": 67, "ExtSel": "", "MaxIncCyc": 512, "Umask": "b00010000", }, "HA.BT_OCCUPANCY.READS_LOCAL": { "Box": "HA", "Category": "HA BT (Backup Tracker) Events", "Counters": "0-3", "Defn": "Accumulates the occupancy of the HA BT pool in every cycle. This can be used with the \"not empty\" stat to calculate average queue occupancy or the \"allocations\" stat in order to calculate average queue latency. HA BTs are allocated as soon as a request enters the HA and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "BT Occupancy", "EvSel": 67, "ExtSel": "", "MaxIncCyc": 512, "Umask": "b00000100", }, "HA.BT_OCCUPANCY.READS_REMOTE": { "Box": "HA", "Category": "HA BT (Backup Tracker) Events", "Counters": "0-3", "Defn": "Accumulates the occupancy of the HA BT pool in every cycle. This can be used with the \"not empty\" stat to calculate average queue occupancy or the \"allocations\" stat in order to calculate average queue latency. HA BTs are allocated as soon as a request enters the HA and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "BT Occupancy", "EvSel": 67, "ExtSel": "", "MaxIncCyc": 512, "Umask": "b00001000", }, "HA.BT_OCCUPANCY.REMOTE": { "Box": "HA", "Category": "HA BT (Backup Tracker) Events", "Counters": "0-3", "Defn": "Accumulates the occupancy of the HA BT pool in every cycle. This can be used with the \"not empty\" stat to calculate average queue occupancy or the \"allocations\" stat in order to calculate average queue latency. HA BTs are allocated as soon as a request enters the HA and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "BT Occupancy", "EvSel": 67, "ExtSel": "", "MaxIncCyc": 512, "Umask": "b00000010", }, "HA.BT_OCCUPANCY.WRITES_REMOTE": { "Box": "HA", "Category": "HA BT (Backup Tracker) Events", "Counters": "0-3", "Defn": "Accumulates the occupancy of the HA BT pool in every cycle. This can be used with the \"not empty\" stat to calculate average queue occupancy or the \"allocations\" stat in order to calculate average queue latency. HA BTs are allocated as soon as a request enters the HA and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "BT Occupancy", "EvSel": 67, "ExtSel": "", "MaxIncCyc": 512, "Umask": "b00100000", }, "HA.BT_OCCUPANCY.LOCAL": { "Box": "HA", "Category": "HA BT (Backup Tracker) Events", "Counters": "0-3", "Defn": "Accumulates the occupancy of the HA BT pool in every cycle. This can be used with the \"not empty\" stat to calculate average queue occupancy or the \"allocations\" stat in order to calculate average queue latency. HA BTs are allocated as soon as a request enters the HA and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "BT Occupancy", "EvSel": 67, "ExtSel": "", "MaxIncCyc": 512, "Umask": "b00000001", }, "HA.BYPASS_IMC": { "Box": "HA", "Category": "HA BYPASS Events", "Counters": "0-3", "Defn": "Counts the number of times when the HA was able to bypass was attempted. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filted by when the bypass was taken and when it was not.", "Desc": "HA to iMC Bypass", "EvSel": 20, "ExtSel": "", "Notes": "Only read transactions use iMC bypass", }, "HA.BYPASS_IMC.TAKEN": { "Box": "HA", "Category": "HA BYPASS Events", "Counters": "0-3", "Defn": "Counts the number of times when the HA was able to bypass was attempted. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filted by when the bypass was taken and when it was not.", "Desc": "HA to iMC Bypass", "EvSel": 20, "ExtSel": "", "Notes": "Only read transactions use iMC bypass", "Umask": "bxxxxxxx1", }, "HA.BYPASS_IMC.NOT_TAKEN": { "Box": "HA", "Category": "HA BYPASS Events", "Counters": "0-3", "Defn": "Counts the number of times when the HA was able to bypass was attempted. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filted by when the bypass was taken and when it was not.", "Desc": "HA to iMC Bypass", "EvSel": 20, "ExtSel": "", "Notes": "Only read transactions use iMC bypass", "Umask": "bxxxxxx1x", }, "HA.CLOCKTICKS": { "Box": "HA", "Category": "HA UCLK Events", "Counters": "0-3", "Defn": "Counts the number of uclks in the HA. This will be slightly different than the count in the Ubox because of enable/freeze delays. The HA is on the other side of the die from the fixed Ubox uclk counter, so the drift could be somewhat larger than in units that are closer like the QPI Agent.", "Desc": "uclks", "EvSel": 0, "ExtSel": "", }, "HA.CONFLICT_CYCLES": { "Box": "HA", "Category": "HA CONFLICTS Events", "Counters": "0-3", "Desc": "Conflict Checks", "EvSel": 11, "ExtSel": "", }, "HA.CONFLICT_CYCLES.CONFLICT": { "Box": "HA", "Category": "HA CONFLICTS Events", "Counters": "0-3", "Desc": "Conflict Checks", "EvSel": 11, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.CONFLICT_CYCLES.CMP_FWDS": { "Box": "HA", "Category": "HA CONFLICTS Events", "Counters": "0-3", "Desc": "Conflict Checks", "EvSel": 11, "ExtSel": "", "Umask": "bxxx1xxxx", }, "HA.CONFLICT_CYCLES.ACKCNFLTS": { "Box": "HA", "Category": "HA CONFLICTS Events", "Counters": "0-3", "Desc": "Conflict Checks", "EvSel": 11, "ExtSel": "", "Umask": "bxxxx1xxx", }, "HA.CONFLICT_CYCLES.LAST": { "Box": "HA", "Category": "HA CONFLICTS Events", "Counters": "0-3", "Desc": "Conflict Checks", "EvSel": 11, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.DIRECT2CORE_COUNT": { "Box": "HA", "Category": "HA DIRECT2CORE Events", "Counters": "0-3", "Defn": "Number of Direct2Core messages sent", "Desc": "Direct2Core Messages Sent", "EvSel": 17, "ExtSel": "", "Notes": "Will not be implemented since OUTBOUND_TX_BL:0x1 will count DRS to CORE which is effectively the same thing as D2C count", }, "HA.DIRECT2CORE_CYCLES_DISABLED": { "Box": "HA", "Category": "HA DIRECT2CORE Events", "Counters": "0-3", "Defn": "Number of cycles in which Direct2Core was disabled", "Desc": "Cycles when Direct2Core was Disabled", "EvSel": 18, "ExtSel": "", }, "HA.DIRECT2CORE_TXN_OVERRIDE": { "Box": "HA", "Category": "HA DIRECT2CORE Events", "Counters": "0-3", "Defn": "Number of Reads where Direct2Core overridden", "Desc": "Number of Reads that had Direct2Core Overridden", "EvSel": 19, "ExtSel": "", }, "HA.DIRECTORY_LAT_OPT": { "Box": "HA", "Category": "HA DIRECTORY Events", "Counters": "0-3", "Defn": "Directory Latency Optimization Data Return Path Taken. When directory mode is enabled and the directory retuned for a read is Dir=I, then data can be returned using a faster path if certain conditions are met (credits, free pipeline, etc).", "Desc": "Directory Lat Opt Return", "EvSel": 65, }, "HA.DIRECTORY_LOOKUP": { "Box": "HA", "Category": "HA DIRECTORY Events", "Counters": "0-3", "Defn": "Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to.", "Desc": "Directory Lookups", "EvSel": 12, "ExtSel": "", "Notes": "Only valid for parts that implement the Directory", }, "HA.DIRECTORY_LOOKUP.NO_SNP": { "Box": "HA", "Category": "HA DIRECTORY Events", "Counters": "0-3", "Defn": "Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to.", "Desc": "Directory Lookups", "EvSel": 12, "ExtSel": "", "Notes": "Only valid for parts that implement the Directory", "Umask": "bxxxxxx1x", }, "HA.DIRECTORY_LOOKUP.SNP": { "Box": "HA", "Category": "HA DIRECTORY Events", "Counters": "0-3", "Defn": "Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to.", "Desc": "Directory Lookups", "EvSel": 12, "ExtSel": "", "Notes": "Only valid for parts that implement the Directory", "Umask": "bxxxxxxx1", }, "HA.DIRECTORY_UPDATE": { "Box": "HA", "Category": "HA DIRECTORY Events", "Counters": "0-3", "Defn": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.", "Desc": "Directory Updates", "EvSel": 13, "ExtSel": "", "Notes": "Only valid for parts that implement the Directory", }, "HA.DIRECTORY_UPDATE.SET": { "Box": "HA", "Category": "HA DIRECTORY Events", "Counters": "0-3", "Defn": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.", "Desc": "Directory Updates", "EvSel": 13, "ExtSel": "", "Notes": "Only valid for parts that implement the Directory", "Umask": "bxxxxxxx1", }, "HA.DIRECTORY_UPDATE.ANY": { "Box": "HA", "Category": "HA DIRECTORY Events", "Counters": "0-3", "Defn": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.", "Desc": "Directory Updates", "EvSel": 13, "ExtSel": "", "Notes": "Only valid for parts that implement the Directory", "Umask": "bxxxxxx11", }, "HA.DIRECTORY_UPDATE.CLEAR": { "Box": "HA", "Category": "HA DIRECTORY Events", "Counters": "0-3", "Defn": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.", "Desc": "Directory Updates", "EvSel": 13, "ExtSel": "", "Notes": "Only valid for parts that implement the Directory", "Umask": "bxxxxxx1x", }, "HA.IGR_CREDITS_AD_QPI2": { "Box": "HA", "Category": "HA QPI_IGR_CREDITS Events", "Counters": "0-3", "Defn": "Accumulates the number of credits available to the QPI Link 2 AD Ingress buffer.", "Desc": "AD QPI Link 2 Credit Accumulator", "EvSel": 89, "ExtSel": "", }, "HA.IGR_CREDITS_BL_QPI2": { "Box": "HA", "Category": "HA QPI_IGR_CREDITS Events", "Counters": "0-3", "Defn": "Accumulates the number of credits available to the QPI Link 2 BL Ingress buffer.", "Desc": "BL QPI Link 2 Credit Accumulator", "EvSel": 90, "ExtSel": "", }, "HA.IGR_NO_CREDIT_CYCLES": { "Box": "HA", "Category": "HA QPI_IGR_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.", "Desc": "Cycles without QPI Ingress Credits", "EvSel": 34, "ExtSel": "", }, "HA.IGR_NO_CREDIT_CYCLES.AD_QPI1": { "Box": "HA", "Category": "HA QPI_IGR_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.", "Desc": "Cycles without QPI Ingress Credits", "EvSel": 34, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.IGR_NO_CREDIT_CYCLES.AD_QPI0": { "Box": "HA", "Category": "HA QPI_IGR_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.", "Desc": "Cycles without QPI Ingress Credits", "EvSel": 34, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.IGR_NO_CREDIT_CYCLES.BL_QPI1": { "Box": "HA", "Category": "HA QPI_IGR_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.", "Desc": "Cycles without QPI Ingress Credits", "EvSel": 34, "ExtSel": "", "Umask": "bxxxx1xxx", }, "HA.IGR_NO_CREDIT_CYCLES.BL_QPI0": { "Box": "HA", "Category": "HA QPI_IGR_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.", "Desc": "Cycles without QPI Ingress Credits", "EvSel": 34, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.IMC_READS": { "Box": "HA", "Category": "HA IMC_READS Events", "Counters": "0-3", "Defn": "Count of the number of reads issued to any of the memory controller channels. This can be filtered by the priority of the reads.", "Desc": "HA to iMC Normal Priority Reads Issued", "EvSel": 23, "ExtSel": "", "MaxIncCyc": 4, "Notes": "Does not count reads using the bypass path. That is counted separately in HA_IMC.BYPASS", }, "HA.IMC_READS.NORMAL": { "Box": "HA", "Category": "HA IMC_READS Events", "Counters": "0-3", "Defn": "Count of the number of reads issued to any of the memory controller channels. This can be filtered by the priority of the reads.", "Desc": "HA to iMC Normal Priority Reads Issued", "EvSel": 23, "ExtSel": "", "MaxIncCyc": 4, "Notes": "Does not count reads using the bypass path. That is counted separately in HA_IMC.BYPASS", "Umask": "b00000001", }, "HA.IMC_RETRY": { "Box": "HA", "Category": "HA IMC_MISC Events", "Counters": "0-3", "Desc": "Retry Events", "EvSel": 30, "ExtSel": "", }, "HA.IMC_WRITES": { "Box": "HA", "Category": "HA IMC_WRITES Events", "Counters": "0-3", "Defn": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.", "Desc": "HA to iMC Full Line Writes Issued", "EvSel": 26, "ExtSel": "", }, "HA.IMC_WRITES.PARTIAL_ISOCH": { "Box": "HA", "Category": "HA IMC_WRITES Events", "Counters": "0-3", "Defn": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.", "Desc": "HA to iMC Full Line Writes Issued", "EvSel": 26, "ExtSel": "", "Umask": "bxxxx1xxx", }, "HA.IMC_WRITES.ALL": { "Box": "HA", "Category": "HA IMC_WRITES Events", "Counters": "0-3", "Defn": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.", "Desc": "HA to iMC Full Line Writes Issued", "EvSel": 26, "ExtSel": "", "Umask": "b00001111", }, "HA.IMC_WRITES.PARTIAL": { "Box": "HA", "Category": "HA IMC_WRITES Events", "Counters": "0-3", "Defn": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.", "Desc": "HA to iMC Full Line Writes Issued", "EvSel": 26, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.IMC_WRITES.FULL": { "Box": "HA", "Category": "HA IMC_WRITES Events", "Counters": "0-3", "Defn": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.", "Desc": "HA to iMC Full Line Writes Issued", "EvSel": 26, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.IMC_WRITES.FULL_ISOCH": { "Box": "HA", "Category": "HA IMC_WRITES Events", "Counters": "0-3", "Defn": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.", "Desc": "HA to iMC Full Line Writes Issued", "EvSel": 26, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.IODC_CONFLICTS": { "Box": "HA", "Category": "HA IODC Events", "Counters": "0-3", "Desc": "IODC Conflicts", "EvSel": 87, "ExtSel": "", }, "HA.IODC_CONFLICTS.ANY": { "Box": "HA", "Category": "HA IODC Events", "Counters": "0-3", "Desc": "IODC Conflicts", "EvSel": 87, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.IODC_CONFLICTS.LAST": { "Box": "HA", "Category": "HA IODC Events", "Counters": "0-3", "Desc": "IODC Conflicts", "EvSel": 87, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.IODC_INSERTS": { "Box": "HA", "Category": "HA IODC Events", "Counters": "0-3", "Defn": "IODC Allocations", "Desc": "IODC Inserts", "EvSel": 86, "ExtSel": "", }, "HA.IODC_OLEN_WBMTOI": { "Box": "HA", "Category": "HA IODC Events", "Counters": "0-3", "Defn": "Num IODC 0 Length Writebacks M to I - All of which are dropped.", "Desc": "Num IODC 0 Length Writes", "EvSel": 88, }, "HA.OSB": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.", "Desc": "OSB Snoop Broadcast", "EvSel": 83, "ExtSel": "", }, "HA.OSB.READS_LOCAL": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.", "Desc": "OSB Snoop Broadcast", "EvSel": 83, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.OSB.REMOTE": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.", "Desc": "OSB Snoop Broadcast", "EvSel": 83, "ExtSel": "", "Umask": "bxxxx1xxx", }, "HA.OSB.INVITOE_LOCAL": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.", "Desc": "OSB Snoop Broadcast", "EvSel": 83, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.OSB_EDR": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return", "Desc": "OSB Early Data Return", "EvSel": 84, "ExtSel": "", }, "HA.OSB_EDR.ALL": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return", "Desc": "OSB Early Data Return", "EvSel": 84, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.OSB_EDR.READS_LOCAL_S": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return", "Desc": "OSB Early Data Return", "EvSel": 84, "ExtSel": "", "Umask": "bxxxx1xxx", }, "HA.OSB_EDR.READS_LOCAL_I": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return", "Desc": "OSB Early Data Return", "EvSel": 84, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.OSB_EDR.READS_REMOTE_S": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return", "Desc": "OSB Early Data Return", "EvSel": 84, "ExtSel": "", "Umask": "bxxx1xxxx", }, "HA.OSB_EDR.READS_REMOTE_I": { "Box": "HA", "Category": "HA OSB (Opportunistic Snoop Broadcast) Events", "Counters": "0-3", "Defn": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return", "Desc": "OSB Early Data Return", "EvSel": 84, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.REQUESTS": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "Read and Write Requests", "EvSel": 1, "ExtSel": "", }, "HA.REQUESTS.WRITES_LOCAL": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "Read and Write Requests", "EvSel": 1, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.REQUESTS.READS_LOCAL": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "Read and Write Requests", "EvSel": 1, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.REQUESTS.WRITES_REMOTE": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "Read and Write Requests", "EvSel": 1, "ExtSel": "", "Umask": "bxxxx1xxx", }, "HA.REQUESTS.WRITES": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "Read and Write Requests", "EvSel": 1, "ExtSel": "", "Umask": "b00001100", }, "HA.REQUESTS.INVITOE_LOCAL": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "Read and Write Requests", "EvSel": 1, "ExtSel": "", "Umask": "bxxx1xxxx", }, "HA.REQUESTS.READS": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "Read and Write Requests", "EvSel": 1, "ExtSel": "", "Umask": "b00000011", }, "HA.REQUESTS.INVITOE_REMOTE": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "Read and Write Requests", "EvSel": 1, "ExtSel": "", "Umask": "bxx1xxxxx", }, "HA.REQUESTS.READS_REMOTE": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "Read and Write Requests", "EvSel": 1, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.RING_AD_USED": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AD Ring in Use", "EvSel": 62, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", }, "HA.RING_AD_USED.CCW_VR1_EVEN": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AD Ring in Use", "EvSel": 62, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bx1xxxxxx", }, "HA.RING_AD_USED.CCW": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AD Ring in Use", "EvSel": 62, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b11001100", }, "HA.RING_AD_USED.CW_VR0_ODD": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AD Ring in Use", "EvSel": 62, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxx1x", }, "HA.RING_AD_USED.CW_VR1_EVEN": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AD Ring in Use", "EvSel": 62, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxx1xxxx", }, "HA.RING_AD_USED.CCW_VR0_ODD": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AD Ring in Use", "EvSel": 62, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxx1xxx", }, "HA.RING_AD_USED.CCW_VR1_ODD": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AD Ring in Use", "EvSel": 62, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b1xxxxxxx", }, "HA.RING_AD_USED.CCW_VR0_EVEN": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AD Ring in Use", "EvSel": 62, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxx1xx", }, "HA.RING_AD_USED.CW": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AD Ring in Use", "EvSel": 62, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00110011", }, "HA.RING_AD_USED.CW_VR0_EVEN": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AD Ring in Use", "EvSel": 62, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxxx1", }, "HA.RING_AD_USED.CW_VR1_ODD": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AD Ring in Use", "EvSel": 62, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxx1xxxxx", }, "HA.RING_AK_USED": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AK Ring in Use", "EvSel": 63, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", }, "HA.RING_AK_USED.CCW_VR1_EVEN": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AK Ring in Use", "EvSel": 63, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bx1xxxxxx", }, "HA.RING_AK_USED.CCW": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AK Ring in Use", "EvSel": 63, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b11001100", }, "HA.RING_AK_USED.CW_VR0_ODD": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AK Ring in Use", "EvSel": 63, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxx1x", }, "HA.RING_AK_USED.CW_VR1_EVEN": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AK Ring in Use", "EvSel": 63, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxx1xxxx", }, "HA.RING_AK_USED.CCW_VR0_ODD": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AK Ring in Use", "EvSel": 63, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxx1xxx", }, "HA.RING_AK_USED.CCW_VR1_ODD": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AK Ring in Use", "EvSel": 63, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b1xxxxxxx", }, "HA.RING_AK_USED.CCW_VR0_EVEN": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AK Ring in Use", "EvSel": 63, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxx1xx", }, "HA.RING_AK_USED.CW": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AK Ring in Use", "EvSel": 63, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00110011", }, "HA.RING_AK_USED.CW_VR0_EVEN": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AK Ring in Use", "EvSel": 63, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxxx1", }, "HA.RING_AK_USED.CW_VR1_ODD": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA AK Ring in Use", "EvSel": 63, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxx1xxxxx", }, "HA.RING_BL_USED": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA BL Ring in Use", "EvSel": 64, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", }, "HA.RING_BL_USED.CCW_VR1_EVEN": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA BL Ring in Use", "EvSel": 64, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bx1xxxxxx", }, "HA.RING_BL_USED.CCW": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA BL Ring in Use", "EvSel": 64, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b11001100", }, "HA.RING_BL_USED.CW_VR0_ODD": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA BL Ring in Use", "EvSel": 64, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxx1x", }, "HA.RING_BL_USED.CW_VR1_EVEN": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA BL Ring in Use", "EvSel": 64, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxx1xxxx", }, "HA.RING_BL_USED.CCW_VR0_ODD": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA BL Ring in Use", "EvSel": 64, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxx1xxx", }, "HA.RING_BL_USED.CCW_VR1_ODD": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA BL Ring in Use", "EvSel": 64, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b1xxxxxxx", }, "HA.RING_BL_USED.CCW_VR0_EVEN": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA BL Ring in Use", "EvSel": 64, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxx1xx", }, "HA.RING_BL_USED.CW": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA BL Ring in Use", "EvSel": 64, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00110011", }, "HA.RING_BL_USED.CW_VR0_EVEN": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA BL Ring in Use", "EvSel": 64, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxxx1", }, "HA.RING_BL_USED.CW_VR1_ODD": { "Box": "HA", "Category": "HA RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "HA BL Ring in Use", "EvSel": 64, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxx1xxxxx", }, "HA.RPQ_CYCLES_NO_REG_CREDITS": { "Box": "HA", "Category": "HA RPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and \"special\" requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "iMC RPQ Credits Empty - Regular", "EvSel": 21, "ExtSel": "", "MaxIncCyc": 4, }, "HA.RPQ_CYCLES_NO_REG_CREDITS.CHN1": { "Box": "HA", "Category": "HA RPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and \"special\" requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "iMC RPQ Credits Empty - Regular", "EvSel": 21, "ExtSel": "", "MaxIncCyc": 4, "Umask": "b00000010", }, "HA.RPQ_CYCLES_NO_REG_CREDITS.CHN2": { "Box": "HA", "Category": "HA RPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and \"special\" requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "iMC RPQ Credits Empty - Regular", "EvSel": 21, "ExtSel": "", "MaxIncCyc": 4, "Umask": "b00000100", }, "HA.RPQ_CYCLES_NO_REG_CREDITS.CHN3": { "Box": "HA", "Category": "HA RPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and \"special\" requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "iMC RPQ Credits Empty - Regular", "EvSel": 21, "ExtSel": "", "MaxIncCyc": 4, "Umask": "b00001000", }, "HA.RPQ_CYCLES_NO_REG_CREDITS.CHN0": { "Box": "HA", "Category": "HA RPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and \"special\" requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "iMC RPQ Credits Empty - Regular", "EvSel": 21, "ExtSel": "", "MaxIncCyc": 4, "Umask": "b00000001", }, "HA.SNOOP_RESP": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 33, "ExtSel": "", }, "HA.SNOOP_RESP.RSPI": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 33, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.SNOOP_RESP.RSPIFWD": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 33, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.SNOOP_RESP.RSPS": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 33, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.SNOOP_RESP.RSP_WB": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 33, "ExtSel": "", "Umask": "bxxx1xxxx", }, "HA.SNOOP_RESP.RSPCNFLCT": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 33, "ExtSel": "", "Umask": "bx1xxxxxx", }, "HA.SNOOP_RESP.RSP_FWD_WB": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 33, "ExtSel": "", "Umask": "bxx1xxxxx", }, "HA.SNOOP_RESP.RSPSFWD": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 33, "ExtSel": "", "Umask": "bxxxx1xxx", }, "HA.SNP_RESP_RECV_LOCAL": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 96, "ExtSel": "", }, "HA.SNP_RESP_RECV_LOCAL.RSPI": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 96, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.SNP_RESP_RECV_LOCAL.OTHER": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 96, "ExtSel": "", "Umask": "b1xxxxxxx", }, "HA.SNP_RESP_RECV_LOCAL.RSPIFWD": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 96, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.SNP_RESP_RECV_LOCAL.RSPxFWDxWB": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 96, "ExtSel": "", "Umask": "bxx1xxxxx", }, "HA.SNP_RESP_RECV_LOCAL.RSPS": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 96, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.SNP_RESP_RECV_LOCAL.RSPxWB": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 96, "ExtSel": "", "Umask": "bxxx1xxxx", }, "HA.SNP_RESP_RECV_LOCAL.RSPCNFLCT": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 96, "ExtSel": "", "Umask": "bx1xxxxxx", }, "HA.SNP_RESP_RECV_LOCAL.RSPSFWD": { "Box": "HA", "Category": "HA SNP_RESP Events", "Counters": "0-3", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 96, "ExtSel": "", "Umask": "bxxxx1xxx", }, "HA.TAD_REQUESTS_G0": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 0", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, }, "HA.TAD_REQUESTS_G0.REGION0": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 0", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000001", }, "HA.TAD_REQUESTS_G0.REGION7": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 0", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b10000000", }, "HA.TAD_REQUESTS_G0.REGION3": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 0", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00001000", }, "HA.TAD_REQUESTS_G0.REGION4": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 0", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00010000", }, "HA.TAD_REQUESTS_G0.REGION2": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 0", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000100", }, "HA.TAD_REQUESTS_G0.REGION1": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 0", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000010", }, "HA.TAD_REQUESTS_G0.REGION5": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 0", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00100000", }, "HA.TAD_REQUESTS_G0.REGION6": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 0", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b01000000", }, "HA.TAD_REQUESTS_G1": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 1", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, }, "HA.TAD_REQUESTS_G1.REGION9": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 1", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000010", }, "HA.TAD_REQUESTS_G1.REGION10": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 1", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000100", }, "HA.TAD_REQUESTS_G1.REGION11": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 1", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00001000", }, "HA.TAD_REQUESTS_G1.REGION8": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 1", "EvSel": 28, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000001", }, "HA.TxR_AD_CYCLES_FULL": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "AD Egress Full", "Desc": "AD Egress Full", "EvSel": 42, "ExtSel": "", }, "HA.TxR_AD_CYCLES_FULL.SCHED1": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "AD Egress Full", "Desc": "AD Egress Full", "EvSel": 42, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.TxR_AD_CYCLES_FULL.ALL": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "AD Egress Full", "Desc": "AD Egress Full", "EvSel": 42, "ExtSel": "", "Umask": "bxxxxxx11", }, "HA.TxR_AD_CYCLES_FULL.SCHED0": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "AD Egress Full", "Desc": "AD Egress Full", "EvSel": 42, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.TxR_AK": { "Box": "HA", "Category": "HA OUTBOUND_TX Events", "Counters": "0-3", "Desc": "Outbound Ring Transactions on AK", "EvSel": 14, "ExtSel": "", }, "HA.TxR_AK_CYCLES_FULL": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "AK Egress Full", "Desc": "AK Egress Full", "EvSel": 50, "ExtSel": "", }, "HA.TxR_AK_CYCLES_FULL.SCHED1": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "AK Egress Full", "Desc": "AK Egress Full", "EvSel": 50, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.TxR_AK_CYCLES_FULL.ALL": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "AK Egress Full", "Desc": "AK Egress Full", "EvSel": 50, "ExtSel": "", "Umask": "bxxxxxx11", }, "HA.TxR_AK_CYCLES_FULL.SCHED0": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "AK Egress Full", "Desc": "AK Egress Full", "EvSel": 50, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.TxR_BL": { "Box": "HA", "Category": "HA OUTBOUND_TX Events", "Counters": "0-3", "Defn": "Counts the number of DRS messages sent out on the BL ring. This can be filtered by the destination.", "Desc": "Outbound DRS Ring Transactions to Cache", "EvSel": 16, "ExtSel": "", }, "HA.TxR_BL.DRS_QPI": { "Box": "HA", "Category": "HA OUTBOUND_TX Events", "Counters": "0-3", "Defn": "Counts the number of DRS messages sent out on the BL ring. This can be filtered by the destination.", "Desc": "Outbound DRS Ring Transactions to Cache", "EvSel": 16, "ExtSel": "", "Umask": "bxxxxx1xx", }, "HA.TxR_BL.DRS_CACHE": { "Box": "HA", "Category": "HA OUTBOUND_TX Events", "Counters": "0-3", "Defn": "Counts the number of DRS messages sent out on the BL ring. This can be filtered by the destination.", "Desc": "Outbound DRS Ring Transactions to Cache", "EvSel": 16, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.TxR_BL.DRS_CORE": { "Box": "HA", "Category": "HA OUTBOUND_TX Events", "Counters": "0-3", "Defn": "Counts the number of DRS messages sent out on the BL ring. This can be filtered by the destination.", "Desc": "Outbound DRS Ring Transactions to Cache", "EvSel": 16, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.TxR_BL_CYCLES_FULL": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "BL Egress Full", "Desc": "BL Egress Full", "EvSel": 54, "ExtSel": "", }, "HA.TxR_BL_CYCLES_FULL.SCHED1": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "BL Egress Full", "Desc": "BL Egress Full", "EvSel": 54, "ExtSel": "", "Umask": "bxxxxxx1x", }, "HA.TxR_BL_CYCLES_FULL.ALL": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "BL Egress Full", "Desc": "BL Egress Full", "EvSel": 54, "ExtSel": "", "Umask": "bxxxxxx11", }, "HA.TxR_BL_CYCLES_FULL.SCHED0": { "Box": "HA", "Category": "HA EGRESS Events", "Counters": "0-3", "Defn": "BL Egress Full", "Desc": "BL Egress Full", "EvSel": 54, "ExtSel": "", "Umask": "bxxxxxxx1", }, "HA.TxR_BL_OCCUPANCY": { "Box": "HA", "Category": "HA BL_EGRESS Events", "Counters": "0-3", "Defn": "BL Egress Occupancy", "Desc": "BL Egress Occupancy", "EvSel": 52, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, }, "HA.TxR_BL_OCCUPANCY.SCHED1": { "Box": "HA", "Category": "HA BL_EGRESS Events", "Counters": "0-3", "Defn": "BL Egress Occupancy", "Desc": "BL Egress Occupancy", "EvSel": 52, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b00000010", }, "HA.TxR_BL_OCCUPANCY.SCHED0": { "Box": "HA", "Category": "HA BL_EGRESS Events", "Counters": "0-3", "Defn": "BL Egress Occupancy", "Desc": "BL Egress Occupancy", "EvSel": 52, "ExtSel": "", "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b00000001", }, "HA.WPQ_CYCLES_NO_REG_CREDITS": { "Box": "HA", "Category": "HA WPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and \"special\" requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "HA iMC CHN0 WPQ Credits Empty - Regular", "EvSel": 24, "ExtSel": "", "MaxIncCyc": 4, }, "HA.WPQ_CYCLES_NO_REG_CREDITS.CHN1": { "Box": "HA", "Category": "HA WPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and \"special\" requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "HA iMC CHN0 WPQ Credits Empty - Regular", "EvSel": 24, "ExtSel": "", "MaxIncCyc": 4, "Umask": "b00000010", }, "HA.WPQ_CYCLES_NO_REG_CREDITS.CHN2": { "Box": "HA", "Category": "HA WPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and \"special\" requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "HA iMC CHN0 WPQ Credits Empty - Regular", "EvSel": 24, "ExtSel": "", "MaxIncCyc": 4, "Umask": "b00000100", }, "HA.WPQ_CYCLES_NO_REG_CREDITS.CHN3": { "Box": "HA", "Category": "HA WPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and \"special\" requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "HA iMC CHN0 WPQ Credits Empty - Regular", "EvSel": 24, "ExtSel": "", "MaxIncCyc": 4, "Umask": "b00001000", }, "HA.WPQ_CYCLES_NO_REG_CREDITS.CHN0": { "Box": "HA", "Category": "HA WPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and \"special\" requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "HA iMC CHN0 WPQ Credits Empty - Regular", "EvSel": 24, "ExtSel": "", "MaxIncCyc": 4, "Umask": "b00000001", }, # iMC: "iMC.ACT_COUNT": { "Box": "iMC", "Category": "iMC ACT Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.", "Desc": "DRAM Activate Count", "EvSel": 1, "ExtSel": "", }, "iMC.ACT_COUNT.BYP": { "Box": "iMC", "Category": "iMC ACT Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.", "Desc": "DRAM Activate Count", "EvSel": 1, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.ACT_COUNT.RD": { "Box": "iMC", "Category": "iMC ACT Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.", "Desc": "DRAM Activate Count", "EvSel": 1, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.ACT_COUNT.WR": { "Box": "iMC", "Category": "iMC ACT Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.", "Desc": "DRAM Activate Count", "EvSel": 1, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.BYP_CMDS": { "Box": "iMC", "Category": "iMC BYPASS Command Events", "Counters": "0-3", "EvSel": 161, "ExtSel": "", }, "iMC.BYP_CMDS.PRE": { "Box": "iMC", "Category": "iMC BYPASS Command Events", "Counters": "0-3", "EvSel": 161, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.BYP_CMDS.CAS": { "Box": "iMC", "Category": "iMC BYPASS Command Events", "Counters": "0-3", "EvSel": 161, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.BYP_CMDS.ACT": { "Box": "iMC", "Category": "iMC BYPASS Command Events", "Counters": "0-3", "EvSel": 161, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.CAS_COUNT": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", }, "iMC.CAS_COUNT.RD_UNDERFILL": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.CAS_COUNT.ALL": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", "Umask": "b00001111", }, "iMC.CAS_COUNT.WR_WMM": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.CAS_COUNT.WR_RMM": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.CAS_COUNT.RD": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", "Umask": "b00000011", }, "iMC.CAS_COUNT.RD_REG": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.CAS_COUNT.RD_RMM": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", "Umask": "bxx1xxxxx", }, "iMC.CAS_COUNT.WR": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", "Umask": "b00001100", }, "iMC.CAS_COUNT.RD_WMM": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "ExtSel": "", "Umask": "bxxx1xxxx", }, "iMC.DCLOCKTICKS": { "Box": "iMC", "Category": "iMC DCLK Events", "Counters": "0-3", "Desc": "DRAM Clockticks", "EvSel": 0, "ExtSel": "", }, "iMC.DRAM_PRE_ALL": { "Box": "iMC", "Category": "iMC DRAM_PRE_ALL Events", "Counters": "0-3", "Defn": "Counts the number of times that the precharge all command was sent.", "Desc": "DRAM Precharge All Commands", "EvSel": 6, "ExtSel": "", }, "iMC.DRAM_REFRESH": { "Box": "iMC", "Category": "iMC DRAM_REFRESH Events", "Counters": "0-3", "Defn": "Counts the number of refreshes issued.", "Desc": "Number of DRAM Refreshes Issued", "EvSel": 5, "ExtSel": "", }, "iMC.DRAM_REFRESH.PANIC": { "Box": "iMC", "Category": "iMC DRAM_REFRESH Events", "Counters": "0-3", "Defn": "Counts the number of refreshes issued.", "Desc": "Number of DRAM Refreshes Issued", "EvSel": 5, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.DRAM_REFRESH.HIGH": { "Box": "iMC", "Category": "iMC DRAM_REFRESH Events", "Counters": "0-3", "Defn": "Counts the number of refreshes issued.", "Desc": "Number of DRAM Refreshes Issued", "EvSel": 5, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.ECC_CORRECTABLE_ERRORS": { "Box": "iMC", "Category": "iMC ECC Events", "Counters": "0-3", "Defn": "Counts the number of ECC errors detected and corrected by the iMC on this channel. This counter is only useful with ECC DRAM devices. This count will increment one time for each correction regardless of the number of bits corrected. The iMC can correct up to 4 bit errors in independent channel mode and 8 bit erros in lockstep mode.", "Desc": "ECC Correctable Errors", "EvSel": 9, "ExtSel": "", }, "iMC.MAJOR_MODES": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Defn": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.", "Desc": "Cycles in a Major Mode", "EvSel": 7, "ExtSel": "", }, "iMC.MAJOR_MODES.ISOCH": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Defn": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.", "Desc": "Cycles in a Major Mode", "EvSel": 7, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.MAJOR_MODES.READ": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Defn": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.", "Desc": "Cycles in a Major Mode", "EvSel": 7, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.MAJOR_MODES.PARTIAL": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Defn": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.", "Desc": "Cycles in a Major Mode", "EvSel": 7, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.MAJOR_MODES.WRITE": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Defn": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.", "Desc": "Cycles in a Major Mode", "EvSel": 7, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.POWER_CHANNEL_DLLOFF": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles when all the ranks in the channel are in CKE Slow (DLLOFF) mode.", "Desc": "Channel DLLOFF Cycles", "EvSel": 132, "ExtSel": "", "Notes": "IBT = Input Buffer Termination = Off", }, "iMC.POWER_CHANNEL_PPD": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles when all the ranks in the channel are in PPD mode. If IBT=off is enabled, then this can be used to count those cycles. If it is not enabled, then this can count the number of cycles when that could have been taken advantage of.", "Desc": "Channel PPD Cycles", "EvSel": 133, "ExtSel": "", "MaxIncCyc": 4, "Notes": "IBT = Input Buffer Termination = On. ALL Ranks must be populated in order to measure", }, "iMC.POWER_CKE_CYCLES": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "ExtSel": "", "MaxIncCyc": 16, }, "iMC.POWER_CKE_CYCLES.RANK5": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "ExtSel": "", "MaxIncCyc": 16, "Umask": "b00100000", }, "iMC.POWER_CKE_CYCLES.RANK6": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "ExtSel": "", "MaxIncCyc": 16, "Umask": "b01000000", }, "iMC.POWER_CKE_CYCLES.RANK3": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "ExtSel": "", "MaxIncCyc": 16, "Umask": "b00001000", }, "iMC.POWER_CKE_CYCLES.RANK4": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "ExtSel": "", "MaxIncCyc": 16, "Umask": "b00010000", }, "iMC.POWER_CKE_CYCLES.RANK1": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "ExtSel": "", "MaxIncCyc": 16, "Umask": "b00000010", }, "iMC.POWER_CKE_CYCLES.RANK0": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "ExtSel": "", "MaxIncCyc": 16, "Umask": "b00000001", }, "iMC.POWER_CKE_CYCLES.RANK2": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "ExtSel": "", "MaxIncCyc": 16, "Umask": "b00000100", }, "iMC.POWER_CKE_CYCLES.RANK7": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "ExtSel": "", "MaxIncCyc": 16, "Umask": "b10000000", }, "iMC.POWER_CRITICAL_THROTTLE_CYCLES": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the iMC is in critical thermal throttling. When this happens, all traffic is blocked. This should be rare unless something bad is going on in the platform. There is no filtering by rank for this event.", "Desc": "Critical Throttle Cycles", "EvSel": 134, "ExtSel": "", }, "iMC.POWER_PCU_THROTTLING": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "EvSel": 66, "ExtSel": "", }, "iMC.POWER_SELF_REFRESH": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the iMC is in self-refresh and the iMC still has a clock. This happens in some package C-states. For example, the PCU may ask the iMC to enter self-refresh even though some of the cores are still processing. One use of this is for Monroe technology. Self-refresh is required during package C3 and C6, but there is no clock in the iMC at this time, so it is not possible to count these cases.", "Desc": "Clock-Enabled Self-Refresh", "EvSel": 67, "ExtSel": "", }, "iMC.POWER_THROTTLE_CYCLES": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 65, "ExtSel": "", }, "iMC.POWER_THROTTLE_CYCLES.RANK5": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 65, "ExtSel": "", "Umask": "bxx1xxxxx", }, "iMC.POWER_THROTTLE_CYCLES.RANK6": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 65, "ExtSel": "", "Umask": "bx1xxxxxx", }, "iMC.POWER_THROTTLE_CYCLES.RANK3": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 65, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.POWER_THROTTLE_CYCLES.RANK4": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 65, "ExtSel": "", "Umask": "bxxx1xxxx", }, "iMC.POWER_THROTTLE_CYCLES.RANK1": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 65, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.POWER_THROTTLE_CYCLES.RANK0": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 65, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.POWER_THROTTLE_CYCLES.RANK2": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 65, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.POWER_THROTTLE_CYCLES.RANK7": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 65, "ExtSel": "", "Umask": "b1xxxxxxx", }, "iMC.PREEMPTION": { "Box": "iMC", "Category": "iMC PREEMPTION Events", "Counters": "0-3", "Defn": "Counts the number of times a read in the iMC preempts another read or write. Generally reads to an open page are issued ahead of requests to closed pages. This improves the page hit rate of the system. However, high priority requests can cause pages of active requests to be closed in order to get them out. This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.", "Desc": "Read Preemption Count", "EvSel": 8, "ExtSel": "", }, "iMC.PREEMPTION.RD_PREEMPT_WR": { "Box": "iMC", "Category": "iMC PREEMPTION Events", "Counters": "0-3", "Defn": "Counts the number of times a read in the iMC preempts another read or write. Generally reads to an open page are issued ahead of requests to closed pages. This improves the page hit rate of the system. However, high priority requests can cause pages of active requests to be closed in order to get them out. This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.", "Desc": "Read Preemption Count", "EvSel": 8, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.PREEMPTION.RD_PREEMPT_RD": { "Box": "iMC", "Category": "iMC PREEMPTION Events", "Counters": "0-3", "Defn": "Counts the number of times a read in the iMC preempts another read or write. Generally reads to an open page are issued ahead of requests to closed pages. This improves the page hit rate of the system. However, high priority requests can cause pages of active requests to be closed in order to get them out. This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.", "Desc": "Read Preemption Count", "EvSel": 8, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.PRE_COUNT": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Precharge commands sent on this channel.", "Desc": "DRAM Precharge commands.", "EvSel": 2, "ExtSel": "", }, "iMC.PRE_COUNT.PAGE_CLOSE": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Precharge commands sent on this channel.", "Desc": "DRAM Precharge commands.", "EvSel": 2, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.PRE_COUNT.BYP": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Precharge commands sent on this channel.", "Desc": "DRAM Precharge commands.", "EvSel": 2, "ExtSel": "", "Umask": "bxxx1xxxx", }, "iMC.PRE_COUNT.RD": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Precharge commands sent on this channel.", "Desc": "DRAM Precharge commands.", "EvSel": 2, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.PRE_COUNT.PAGE_MISS": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Precharge commands sent on this channel.", "Desc": "DRAM Precharge commands.", "EvSel": 2, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.PRE_COUNT.WR": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Precharge commands sent on this channel.", "Desc": "DRAM Precharge commands.", "EvSel": 2, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.RD_CAS_PRIO": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "EvSel": 160, "ExtSel": "", }, "iMC.RD_CAS_PRIO.PANIC": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "EvSel": 160, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.RD_CAS_PRIO.LOW": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "EvSel": 160, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.RD_CAS_PRIO.MED": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "EvSel": 160, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.RD_CAS_PRIO.HIGH": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "EvSel": 160, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.RD_CAS_RANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", }, "iMC.RD_CAS_RANK0.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.RD_CAS_RANK0.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b1xxxxxxx", }, "iMC.RD_CAS_RANK0.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "bxxx1xxxx", }, "iMC.RD_CAS_RANK0.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.RD_CAS_RANK0.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "bxx1xxxxx", }, "iMC.RD_CAS_RANK0.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.RD_CAS_RANK0.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.RD_CAS_RANK0.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "bx1xxxxxx", }, "iMC.RD_CAS_RANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", }, "iMC.RD_CAS_RANK1.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.RD_CAS_RANK1.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b1xxxxxxx", }, "iMC.RD_CAS_RANK1.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "bxxx1xxxx", }, "iMC.RD_CAS_RANK1.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.RD_CAS_RANK1.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "bxx1xxxxx", }, "iMC.RD_CAS_RANK1.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.RD_CAS_RANK1.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.RD_CAS_RANK1.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "bx1xxxxxx", }, "iMC.RD_CAS_RANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 2", "EvSel": 178, "ExtSel": "", }, "iMC.RD_CAS_RANK2.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 2", "EvSel": 178, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.RD_CAS_RANK2.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 2", "EvSel": 178, "ExtSel": "", "Umask": "b1xxxxxxx", }, "iMC.RD_CAS_RANK2.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 2", "EvSel": 178, "ExtSel": "", "Umask": "bxxx1xxxx", }, "iMC.RD_CAS_RANK2.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 2", "EvSel": 178, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.RD_CAS_RANK2.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 2", "EvSel": 178, "ExtSel": "", "Umask": "bxx1xxxxx", }, "iMC.RD_CAS_RANK2.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 2", "EvSel": 178, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.RD_CAS_RANK2.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 2", "EvSel": 178, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.RD_CAS_RANK2.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 2", "EvSel": 178, "ExtSel": "", "Umask": "bx1xxxxxx", }, "iMC.RD_CAS_RANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 3", "EvSel": 179, "ExtSel": "", }, "iMC.RD_CAS_RANK3.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 3", "EvSel": 179, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.RD_CAS_RANK3.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 3", "EvSel": 179, "ExtSel": "", "Umask": "b1xxxxxxx", }, "iMC.RD_CAS_RANK3.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 3", "EvSel": 179, "ExtSel": "", "Umask": "bxxx1xxxx", }, "iMC.RD_CAS_RANK3.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 3", "EvSel": 179, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.RD_CAS_RANK3.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 3", "EvSel": 179, "ExtSel": "", "Umask": "bxx1xxxxx", }, "iMC.RD_CAS_RANK3.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 3", "EvSel": 179, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.RD_CAS_RANK3.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 3", "EvSel": 179, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.RD_CAS_RANK3.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 3", "EvSel": 179, "ExtSel": "", "Umask": "bx1xxxxxx", }, "iMC.RD_CAS_RANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", }, "iMC.RD_CAS_RANK4.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.RD_CAS_RANK4.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b1xxxxxxx", }, "iMC.RD_CAS_RANK4.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "bxxx1xxxx", }, "iMC.RD_CAS_RANK4.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.RD_CAS_RANK4.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "bxx1xxxxx", }, "iMC.RD_CAS_RANK4.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.RD_CAS_RANK4.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.RD_CAS_RANK4.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "bx1xxxxxx", }, "iMC.RD_CAS_RANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", }, "iMC.RD_CAS_RANK5.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.RD_CAS_RANK5.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b1xxxxxxx", }, "iMC.RD_CAS_RANK5.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "bxxx1xxxx", }, "iMC.RD_CAS_RANK5.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.RD_CAS_RANK5.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "bxx1xxxxx", }, "iMC.RD_CAS_RANK5.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.RD_CAS_RANK5.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.RD_CAS_RANK5.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "bx1xxxxxx", }, "iMC.RD_CAS_RANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", }, "iMC.RD_CAS_RANK6.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.RD_CAS_RANK6.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b1xxxxxxx", }, "iMC.RD_CAS_RANK6.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "bxxx1xxxx", }, "iMC.RD_CAS_RANK6.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.RD_CAS_RANK6.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "bxx1xxxxx", }, "iMC.RD_CAS_RANK6.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.RD_CAS_RANK6.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.RD_CAS_RANK6.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "bx1xxxxxx", }, "iMC.RD_CAS_RANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", }, "iMC.RD_CAS_RANK7.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.RD_CAS_RANK7.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b1xxxxxxx", }, "iMC.RD_CAS_RANK7.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "bxxx1xxxx", }, "iMC.RD_CAS_RANK7.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.RD_CAS_RANK7.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "bxx1xxxxx", }, "iMC.RD_CAS_RANK7.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.RD_CAS_RANK7.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.RD_CAS_RANK7.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "bx1xxxxxx", }, "iMC.RPQ_CYCLES_NE": { "Box": "iMC", "Category": "iMC RPQ Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the Read Pending Queue is not empty. This can then be used to calculate the average occupancy (in conjunction with the Read Pending Queue Occupancy count). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This filter is to be used in conjunction with the occupancy filter so that one can correctly track the average occupancies for schedulable entries and scheduled requests.", "Desc": "Read Pending Queue Not Empty", "EvSel": 17, "ExtSel": "", }, "iMC.RPQ_INSERTS": { "Box": "iMC", "Category": "iMC RPQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.", "Desc": "Read Pending Queue Allocations", "EvSel": 16, "ExtSel": "", }, "iMC.VMSE_MXB_WR_OCCUPANCY": { "Box": "iMC", "Category": "iMC VMSE Events", "Counters": "0-3", "Desc": "VMSE MXB write buffer occupancy", "EvSel": 145, "ExtSel": "", "MaxIncCyc": 32, "SubCtr": 1, }, "iMC.VMSE_WR_PUSH": { "Box": "iMC", "Category": "iMC VMSE Events", "Counters": "0-3", "Desc": "VMSE WR PUSH issued", "EvSel": 144, "ExtSel": "", }, "iMC.VMSE_WR_PUSH.RMM": { "Box": "iMC", "Category": "iMC VMSE Events", "Counters": "0-3", "Desc": "VMSE WR PUSH issued", "EvSel": 144, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.VMSE_WR_PUSH.WMM": { "Box": "iMC", "Category": "iMC VMSE Events", "Counters": "0-3", "Desc": "VMSE WR PUSH issued", "EvSel": 144, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.WMM_TO_RMM": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Desc": "Transition from WMM to RMM because of low threshold", "EvSel": 192, "ExtSel": "", }, "iMC.WMM_TO_RMM.VMSE_RETRY": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Desc": "Transition from WMM to RMM because of low threshold", "EvSel": 192, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.WMM_TO_RMM.STARVE": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Desc": "Transition from WMM to RMM because of low threshold", "EvSel": 192, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.WMM_TO_RMM.LOW_THRESH": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Desc": "Transition from WMM to RMM because of low threshold", "EvSel": 192, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.WPQ_CYCLES_FULL": { "Box": "iMC", "Category": "iMC WPQ Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the Write Pending Queue is full. When the WPQ is full, the HA will not be able to issue any additional read requests into the iMC. This count should be similar count in the HA which tracks the number of cycles that the HA has no WPQ credits, just somewhat smaller to account for the credit return overhead.", "Desc": "Write Pending Queue Full Cycles", "EvSel": 34, "ExtSel": "", }, "iMC.WPQ_CYCLES_NE": { "Box": "iMC", "Category": "iMC WPQ Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the Write Pending Queue is not empty. This can then be used to calculate the average queue occupancy (in conjunction with the WPQ Occupancy Accumulation count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have \"posted\" to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies.", "Desc": "Write Pending Queue Not Empty", "EvSel": 33, "ExtSel": "", }, "iMC.WPQ_INSERTS": { "Box": "iMC", "Category": "iMC WPQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the Write Pending Queue. This can then be used to calculate the average queuing latency (in conjunction with the WPQ occupancy count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have \"posted\" to the iMC.", "Desc": "Write Pending Queue Allocations", "EvSel": 32, "ExtSel": "", }, "iMC.WPQ_READ_HIT": { "Box": "iMC", "Category": "iMC WPQ Events", "Counters": "0-3", "Defn": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.", "Desc": "Write Pending Queue CAM Match", "EvSel": 35, "ExtSel": "", }, "iMC.WPQ_WRITE_HIT": { "Box": "iMC", "Category": "iMC WPQ Events", "Counters": "0-3", "Defn": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.", "Desc": "Write Pending Queue CAM Match", "EvSel": 36, "ExtSel": "", }, "iMC.WRONG_MM": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Desc": "Not getting the requested Major Mode", "EvSel": 193, "ExtSel": "", }, "iMC.WR_CAS_RANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", }, "iMC.WR_CAS_RANK0.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.WR_CAS_RANK0.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b1xxxxxxx", }, "iMC.WR_CAS_RANK0.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "bxxx1xxxx", }, "iMC.WR_CAS_RANK0.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.WR_CAS_RANK0.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "bxx1xxxxx", }, "iMC.WR_CAS_RANK0.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.WR_CAS_RANK0.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.WR_CAS_RANK0.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "bx1xxxxxx", }, "iMC.WR_CAS_RANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", }, "iMC.WR_CAS_RANK1.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.WR_CAS_RANK1.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b1xxxxxxx", }, "iMC.WR_CAS_RANK1.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "bxxx1xxxx", }, "iMC.WR_CAS_RANK1.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.WR_CAS_RANK1.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "bxx1xxxxx", }, "iMC.WR_CAS_RANK1.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.WR_CAS_RANK1.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.WR_CAS_RANK1.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "bx1xxxxxx", }, "iMC.WR_CAS_RANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 2", "EvSel": 186, "ExtSel": "", }, "iMC.WR_CAS_RANK2.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 2", "EvSel": 186, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.WR_CAS_RANK2.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 2", "EvSel": 186, "ExtSel": "", "Umask": "b1xxxxxxx", }, "iMC.WR_CAS_RANK2.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 2", "EvSel": 186, "ExtSel": "", "Umask": "bxxx1xxxx", }, "iMC.WR_CAS_RANK2.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 2", "EvSel": 186, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.WR_CAS_RANK2.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 2", "EvSel": 186, "ExtSel": "", "Umask": "bxx1xxxxx", }, "iMC.WR_CAS_RANK2.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 2", "EvSel": 186, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.WR_CAS_RANK2.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 2", "EvSel": 186, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.WR_CAS_RANK2.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 2", "EvSel": 186, "ExtSel": "", "Umask": "bx1xxxxxx", }, "iMC.WR_CAS_RANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 3", "EvSel": 187, "ExtSel": "", }, "iMC.WR_CAS_RANK3.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 3", "EvSel": 187, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.WR_CAS_RANK3.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 3", "EvSel": 187, "ExtSel": "", "Umask": "b1xxxxxxx", }, "iMC.WR_CAS_RANK3.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 3", "EvSel": 187, "ExtSel": "", "Umask": "bxxx1xxxx", }, "iMC.WR_CAS_RANK3.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 3", "EvSel": 187, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.WR_CAS_RANK3.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 3", "EvSel": 187, "ExtSel": "", "Umask": "bxx1xxxxx", }, "iMC.WR_CAS_RANK3.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 3", "EvSel": 187, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.WR_CAS_RANK3.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 3", "EvSel": 187, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.WR_CAS_RANK3.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 3", "EvSel": 187, "ExtSel": "", "Umask": "bx1xxxxxx", }, "iMC.WR_CAS_RANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", }, "iMC.WR_CAS_RANK4.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.WR_CAS_RANK4.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b1xxxxxxx", }, "iMC.WR_CAS_RANK4.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "bxxx1xxxx", }, "iMC.WR_CAS_RANK4.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.WR_CAS_RANK4.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "bxx1xxxxx", }, "iMC.WR_CAS_RANK4.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.WR_CAS_RANK4.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.WR_CAS_RANK4.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "bx1xxxxxx", }, "iMC.WR_CAS_RANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", }, "iMC.WR_CAS_RANK5.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.WR_CAS_RANK5.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b1xxxxxxx", }, "iMC.WR_CAS_RANK5.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "bxxx1xxxx", }, "iMC.WR_CAS_RANK5.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.WR_CAS_RANK5.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "bxx1xxxxx", }, "iMC.WR_CAS_RANK5.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.WR_CAS_RANK5.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.WR_CAS_RANK5.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "bx1xxxxxx", }, "iMC.WR_CAS_RANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", }, "iMC.WR_CAS_RANK6.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.WR_CAS_RANK6.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b1xxxxxxx", }, "iMC.WR_CAS_RANK6.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "bxxx1xxxx", }, "iMC.WR_CAS_RANK6.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.WR_CAS_RANK6.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "bxx1xxxxx", }, "iMC.WR_CAS_RANK6.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.WR_CAS_RANK6.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.WR_CAS_RANK6.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "bx1xxxxxx", }, "iMC.WR_CAS_RANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", }, "iMC.WR_CAS_RANK7.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.WR_CAS_RANK7.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b1xxxxxxx", }, "iMC.WR_CAS_RANK7.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "bxxx1xxxx", }, "iMC.WR_CAS_RANK7.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.WR_CAS_RANK7.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "bxx1xxxxx", }, "iMC.WR_CAS_RANK7.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.WR_CAS_RANK7.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.WR_CAS_RANK7.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "bx1xxxxxx", }, # R2PCIe: "R2PCIe.CLOCKTICKS": { "Box": "R2PCIe", "Category": "R2PCIe UCLK Events", "Counters": "0-3", "Defn": "Counts the number of uclks in the R2PCIe uclk domain. This could be slightly different than the count in the Ubox because of enable/freeze delays. However, because the R2PCIe is close to the Ubox, they generally should not diverge by more than a handful of cycles.", "Desc": "Number of uclks in domain", "EvSel": 1, "ExtSel": "", }, "R2PCIe.RING_AD_USED": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", }, "R2PCIe.RING_AD_USED.CCW_VR1_EVEN": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bx1xxxxxx", }, "R2PCIe.RING_AD_USED.CCW": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b11001100", }, "R2PCIe.RING_AD_USED.CW_VR0_ODD": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxx1x", }, "R2PCIe.RING_AD_USED.CW_VR1_EVEN": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxx1xxxx", }, "R2PCIe.RING_AD_USED.CCW_VR0_ODD": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxx1xxx", }, "R2PCIe.RING_AD_USED.CCW_VR1_ODD": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b1xxxxxxx", }, "R2PCIe.RING_AD_USED.CCW_VR0_EVEN": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxx1xx", }, "R2PCIe.RING_AD_USED.CW_VR0_EVEN": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxxx1", }, "R2PCIe.RING_AD_USED.CW": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00110011", }, "R2PCIe.RING_AD_USED.CW_VR1_ODD": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AD Ring in Use", "EvSel": 7, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxx1xxxxx", }, "R2PCIe.RING_AK_USED": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", }, "R2PCIe.RING_AK_USED.CCW_VR1_EVEN": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bx1xxxxxx", }, "R2PCIe.RING_AK_USED.CCW": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b11001100", }, "R2PCIe.RING_AK_USED.CW_VR0_ODD": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxx1x", }, "R2PCIe.RING_AK_USED.CW_VR1_EVEN": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxx1xxxx", }, "R2PCIe.RING_AK_USED.CCW_VR0_ODD": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxx1xxx", }, "R2PCIe.RING_AK_USED.CCW_VR1_ODD": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b1xxxxxxx", }, "R2PCIe.RING_AK_USED.CCW_VR0_EVEN": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxx1xx", }, "R2PCIe.RING_AK_USED.CW_VR0_EVEN": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxxx1", }, "R2PCIe.RING_AK_USED.CW": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00110011", }, "R2PCIe.RING_AK_USED.CW_VR1_ODD": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AK Ring in Use", "EvSel": 8, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxx1xxxxx", }, "R2PCIe.RING_BL_USED": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", }, "R2PCIe.RING_BL_USED.CCW_VR1_EVEN": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bx1xxxxxx", }, "R2PCIe.RING_BL_USED.CCW": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b11001100", }, "R2PCIe.RING_BL_USED.CW_VR0_ODD": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxx1x", }, "R2PCIe.RING_BL_USED.CW_VR1_EVEN": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxx1xxxx", }, "R2PCIe.RING_BL_USED.CCW_VR0_ODD": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxx1xxx", }, "R2PCIe.RING_BL_USED.CCW_VR1_ODD": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b1xxxxxxx", }, "R2PCIe.RING_BL_USED.CCW_VR0_EVEN": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxx1xx", }, "R2PCIe.RING_BL_USED.CW_VR0_EVEN": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxxxxxxx1", }, "R2PCIe.RING_BL_USED.CW": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "b00110011", }, "R2PCIe.RING_BL_USED.CW_VR1_ODD": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 BL Ring in Use", "EvSel": 9, "ExtSel": "", "Notes": "On a 2 column IVT (e.g. 10C) CW_EVEN is actually CW_VR0_EVEN+CW_VR1_EVEN (similarly for CCW/ODD). In any cycle, a ring stop can see up to one packet moving in the CW direction and one packet moving in the CCW direction.", "Umask": "bxx1xxxxx", }, "R2PCIe.RING_IV_USED": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. The IV ring is unidirectional. Whether UP or DN is used is dependent on the system programming. Thereofore, one should generally set both the UP and DN bits for a given polarity (or both) at a given time.", "Desc": "R2 IV Ring in Use", "EvSel": 10, "ExtSel": "", }, "R2PCIe.RING_IV_USED.ANY": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. The IV ring is unidirectional. Whether UP or DN is used is dependent on the system programming. Thereofore, one should generally set both the UP and DN bits for a given polarity (or both) at a given time.", "Desc": "R2 IV Ring in Use", "EvSel": 10, "ExtSel": "", "Umask": "b11111111", }, "R2PCIe.RING_IV_USED.CCW": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. The IV ring is unidirectional. Whether UP or DN is used is dependent on the system programming. Thereofore, one should generally set both the UP and DN bits for a given polarity (or both) at a given time.", "Desc": "R2 IV Ring in Use", "EvSel": 10, "ExtSel": "", "Umask": "b11001100", }, "R2PCIe.RING_IV_USED.CW": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. The IV ring is unidirectional. Whether UP or DN is used is dependent on the system programming. Thereofore, one should generally set both the UP and DN bits for a given polarity (or both) at a given time.", "Desc": "R2 IV Ring in Use", "EvSel": 10, "ExtSel": "", "Umask": "b00110011", }, "R2PCIe.RxR_AK_BOUNCES": { "Box": "R2PCIe", "Category": "R2PCIe INGRESS Events", "Counters": 0, "Defn": "Counts the number of times when a request destined for the AK ingress bounced.", "Desc": "AK Ingress Bounced", "EvSel": 18, "ExtSel": "", }, "R2PCIe.RxR_AK_BOUNCES.CCW": { "Box": "R2PCIe", "Category": "R2PCIe INGRESS Events", "Counters": 0, "Defn": "Counts the number of times when a request destined for the AK ingress bounced.", "Desc": "AK Ingress Bounced", "EvSel": 18, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R2PCIe.RxR_AK_BOUNCES.CW": { "Box": "R2PCIe", "Category": "R2PCIe INGRESS Events", "Counters": 0, "Defn": "Counts the number of times when a request destined for the AK ingress bounced.", "Desc": "AK Ingress Bounced", "EvSel": 18, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R2PCIe.RxR_CYCLES_NE": { "Box": "R2PCIe", "Category": "R2PCIe INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the R2PCIe Ingress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Cycles Not Empty", "EvSel": 16, "ExtSel": "", }, "R2PCIe.RxR_CYCLES_NE.NCS": { "Box": "R2PCIe", "Category": "R2PCIe INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the R2PCIe Ingress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Cycles Not Empty", "EvSel": 16, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R2PCIe.RxR_CYCLES_NE.NCB": { "Box": "R2PCIe", "Category": "R2PCIe INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the R2PCIe Ingress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Cycles Not Empty", "EvSel": 16, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R2PCIe.RxR_INSERTS": { "Box": "R2PCIe", "Category": "R2PCIe INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the R2PCIe Ingress. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Allocations", "EvSel": 17, "ExtSel": "", }, "R2PCIe.RxR_INSERTS.NCS": { "Box": "R2PCIe", "Category": "R2PCIe INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the R2PCIe Ingress. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Allocations", "EvSel": 17, "ExtSel": "", "Umask": "bxx1xxxxx", }, "R2PCIe.RxR_INSERTS.NCB": { "Box": "R2PCIe", "Category": "R2PCIe INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the R2PCIe Ingress. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Allocations", "EvSel": 17, "ExtSel": "", "Umask": "bxxx1xxxx", }, "R2PCIe.RxR_OCCUPANCY": { "Box": "R2PCIe", "Category": "R2PCIe INGRESS Events", "Counters": 0, "Defn": "Accumulates the occupancy of a given R2PCIe Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the R2PCIe Ingress Not Empty event to calculate average occupancy or the R2PCIe Ingress Allocations event in order to calculate average queuing latency.", "Desc": "Ingress Occupancy Accumulator", "EvSel": 19, "ExtSel": "", "MaxIncCyc": 24, "SubCtr": 1, }, "R2PCIe.RxR_OCCUPANCY.DRS": { "Box": "R2PCIe", "Category": "R2PCIe INGRESS Events", "Counters": 0, "Defn": "Accumulates the occupancy of a given R2PCIe Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the R2PCIe Ingress Not Empty event to calculate average occupancy or the R2PCIe Ingress Allocations event in order to calculate average queuing latency.", "Desc": "Ingress Occupancy Accumulator", "EvSel": 19, "ExtSel": "", "MaxIncCyc": 24, "SubCtr": 1, "Umask": "b00001000", }, "R2PCIe.TxR_CYCLES_FULL": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": 0, "Defn": "Counts the number of cycles when the R2PCIe Egress buffer is full.", "Desc": "Egress Cycles Full", "EvSel": 37, "ExtSel": "", }, "R2PCIe.TxR_CYCLES_FULL.AK": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": 0, "Defn": "Counts the number of cycles when the R2PCIe Egress buffer is full.", "Desc": "Egress Cycles Full", "EvSel": 37, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R2PCIe.TxR_CYCLES_FULL.BL": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": 0, "Defn": "Counts the number of cycles when the R2PCIe Egress buffer is full.", "Desc": "Egress Cycles Full", "EvSel": 37, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R2PCIe.TxR_CYCLES_FULL.AD": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": 0, "Defn": "Counts the number of cycles when the R2PCIe Egress buffer is full.", "Desc": "Egress Cycles Full", "EvSel": 37, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R2PCIe.TxR_CYCLES_NE": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": 0, "Defn": "Counts the number of cycles when the R2PCIe Egress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "Egress Cycles Not Empty", "EvSel": 35, "ExtSel": "", }, "R2PCIe.TxR_CYCLES_NE.AK": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": 0, "Defn": "Counts the number of cycles when the R2PCIe Egress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "Egress Cycles Not Empty", "EvSel": 35, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R2PCIe.TxR_CYCLES_NE.BL": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": 0, "Defn": "Counts the number of cycles when the R2PCIe Egress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "Egress Cycles Not Empty", "EvSel": 35, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R2PCIe.TxR_CYCLES_NE.AD": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": 0, "Defn": "Counts the number of cycles when the R2PCIe Egress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "Egress Cycles Not Empty", "EvSel": 35, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R2PCIe.TxR_NACK_CCW": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": "0-1", "Desc": "Egress CCW NACK", "EvSel": 40, "ExtSel": "", }, "R2PCIe.TxR_NACK_CCW.AK": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": "0-1", "Desc": "Egress CCW NACK", "EvSel": 40, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R2PCIe.TxR_NACK_CCW.BL": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": "0-1", "Desc": "Egress CCW NACK", "EvSel": 40, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R2PCIe.TxR_NACK_CCW.AD": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": "0-1", "Desc": "Egress CCW NACK", "EvSel": 40, "ExtSel": "", "Umask": "bxxxxxxx1", }, "R2PCIe.TxR_NACK_CW": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": "0-1", "Desc": "Egress CW NACK", "EvSel": 38, "ExtSel": "", }, "R2PCIe.TxR_NACK_CW.AK": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": "0-1", "Desc": "Egress CW NACK", "EvSel": 38, "ExtSel": "", "Umask": "bxxxxxx1x", }, "R2PCIe.TxR_NACK_CW.BL": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": "0-1", "Desc": "Egress CW NACK", "EvSel": 38, "ExtSel": "", "Umask": "bxxxxx1xx", }, "R2PCIe.TxR_NACK_CW.AD": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": "0-1", "Desc": "Egress CW NACK", "EvSel": 38, "ExtSel": "", "Umask": "bxxxxxxx1", }, # PCU: "PCU.CLOCKTICKS": { "Box": "PCU", "Category": "PCU PCLK Events", "Counters": "0-3", "Defn": "The PCU runs off a fixed 800 MHz clock. This event counts the number of pclk cycles measured while the counter was enabled. The pclk, like the Memory Controller's dclk, counts at a constant rate making it a good measure of actual wall time.", "Desc": "pclk Cycles", "EvSel": 0, "ExtSel": "", }, "PCU.CORE0_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core 0 C State Transition Cycles", "EvSel": 112, "ExtSel": "", "Notes": "This only tracks the hardware portion in the RCFSM (CFCFSM). This portion is just doing the core C state transition. It does not include any necessary frequency/voltage transitions.", }, "PCU.CORE10_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core 10 C State Transition Cycles", "EvSel": 122, "ExtSel": "", }, "PCU.CORE11_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core 11 C State Transition Cycles", "EvSel": 123, "ExtSel": "", }, "PCU.CORE12_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core 12 C State Transition Cycles", "EvSel": 124, "ExtSel": "", }, "PCU.CORE13_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core 13 C State Transition Cycles", "EvSel": 125, "ExtSel": "", }, "PCU.CORE14_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core 14 C State Transition Cycles", "EvSel": 126, "ExtSel": "", }, "PCU.CORE1_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core 1 C State Transition Cycles", "EvSel": 113, "ExtSel": "", }, "PCU.CORE2_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core 2 C State Transition Cycles", "EvSel": 114, "ExtSel": "", }, "PCU.CORE3_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core 3 C State Transition Cycles", "EvSel": 115, "ExtSel": "", }, "PCU.CORE4_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core 4 C State Transition Cycles", "EvSel": 116, "ExtSel": "", }, "PCU.CORE5_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core 5 C State Transition Cycles", "EvSel": 117, "ExtSel": "", }, "PCU.CORE6_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core 6 C State Transition Cycles", "EvSel": 118, "ExtSel": "", }, "PCU.CORE7_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core 7 C State Transition Cycles", "EvSel": 119, "ExtSel": "", }, "PCU.CORE8_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core 8 C State Transition Cycles", "EvSel": 120, "ExtSel": "", "Notes": "This only tracks the hardware portion in the RCFSM (CFCFSM). This portion is just doing the core C state transition. It does not include any necessary frequency/voltage transitions.", }, "PCU.CORE9_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core 9 C State Transition Cycles", "EvSel": 121, "ExtSel": "", }, "PCU.DELAYED_C_STATE_ABORT_CORE0": { "Box": "PCU", "Category": "PCU Delayed C-State Events", "Counters": "0-3", "Defn": "Number of times that a deep C state was requested, but the delayed C state algorithm \"rejected\" the deep sleep state. In other words, a wake event occurred before the timer expired that causes a transition into the deeper C state.", "Desc": "Deep C State Rejection - Core 0", "EvSel": 23, "ExtSel": "", }, "PCU.DELAYED_C_STATE_ABORT_CORE1": { "Box": "PCU", "Category": "PCU Delayed C-State Events", "Counters": "0-3", "Defn": "Number of times that a deep C state was requested, but the delayed C state algorithm \"rejected\" the deep sleep state. In other words, a wake event occurred before the timer expired that causes a transition into the deeper C state.", "Desc": "Deep C State Rejection - Core 1", "EvSel": 24, "ExtSel": "", }, "PCU.DELAYED_C_STATE_ABORT_CORE10": { "Box": "PCU", "Category": "PCU Delayed C-State Events", "Counters": "0-3", "Defn": "Number of times that a deep C state was requested, but the delayed C state algorithm \"rejected\" the deep sleep state. In other words, a wake event occurred before the timer expired that causes a transition into the deeper C state.", "Desc": "Deep C State Rejection - Core 10", "EvSel": 33, "ExtSel": "", }, "PCU.DELAYED_C_STATE_ABORT_CORE11": { "Box": "PCU", "Category": "PCU Delayed C-State Events", "Counters": "0-3", "Defn": "Number of times that a deep C state was requested, but the delayed C state algorithm \"rejected\" the deep sleep state. In other words, a wake event occurred before the timer expired that causes a transition into the deeper C state.", "Desc": "Deep C State Rejection - Core 11", "EvSel": 34, "ExtSel": "", }, "PCU.DELAYED_C_STATE_ABORT_CORE12": { "Box": "PCU", "Category": "PCU Delayed C-State Events", "Counters": "0-3", "Defn": "Number of times that a deep C state was requested, but the delayed C state algorithm \"rejected\" the deep sleep state. In other words, a wake event occurred before the timer expired that causes a transition into the deeper C state.", "Desc": "Deep C State Rejection - Core 12", "EvSel": 35, "ExtSel": "", }, "PCU.DELAYED_C_STATE_ABORT_CORE13": { "Box": "PCU", "Category": "PCU Delayed C-State Events", "Counters": "0-3", "Defn": "Number of times that a deep C state was requested, but the delayed C state algorithm \"rejected\" the deep sleep state. In other words, a wake event occurred before the timer expired that causes a transition into the deeper C state.", "Desc": "Deep C State Rejection - Core 13", "EvSel": 36, "ExtSel": "", }, "PCU.DELAYED_C_STATE_ABORT_CORE14": { "Box": "PCU", "Category": "PCU Delayed C-State Events", "Counters": "0-3", "Defn": "Number of times that a deep C state was requested, but the delayed C state algorithm \"rejected\" the deep sleep state. In other words, a wake event occurred before the timer expired that causes a transition into the deeper C state.", "Desc": "Deep C State Rejection - Core 14", "EvSel": 37, "ExtSel": "", }, "PCU.DELAYED_C_STATE_ABORT_CORE2": { "Box": "PCU", "Category": "PCU Delayed C-State Events", "Counters": "0-3", "Defn": "Number of times that a deep C state was requested, but the delayed C state algorithm \"rejected\" the deep sleep state. In other words, a wake event occurred before the timer expired that causes a transition into the deeper C state.", "Desc": "Deep C State Rejection - Core 2", "EvSel": 25, "ExtSel": "", }, "PCU.DELAYED_C_STATE_ABORT_CORE3": { "Box": "PCU", "Category": "PCU Delayed C-State Events", "Counters": "0-3", "Defn": "Number of times that a deep C state was requested, but the delayed C state algorithm \"rejected\" the deep sleep state. In other words, a wake event occurred before the timer expired that causes a transition into the deeper C state.", "Desc": "Deep C State Rejection - Core 3", "EvSel": 26, "ExtSel": "", }, "PCU.DELAYED_C_STATE_ABORT_CORE4": { "Box": "PCU", "Category": "PCU Delayed C-State Events", "Counters": "0-3", "Defn": "Number of times that a deep C state was requested, but the delayed C state algorithm \"rejected\" the deep sleep state. In other words, a wake event occurred before the timer expired that causes a transition into the deeper C state.", "Desc": "Deep C State Rejection - Core 4", "EvSel": 27, "ExtSel": "", }, "PCU.DELAYED_C_STATE_ABORT_CORE5": { "Box": "PCU", "Category": "PCU Delayed C-State Events", "Counters": "0-3", "Defn": "Number of times that a deep C state was requested, but the delayed C state algorithm \"rejected\" the deep sleep state. In other words, a wake event occurred before the timer expired that causes a transition into the deeper C state.", "Desc": "Deep C State Rejection - Core 5", "EvSel": 28, "ExtSel": "", }, "PCU.DELAYED_C_STATE_ABORT_CORE6": { "Box": "PCU", "Category": "PCU Delayed C-State Events", "Counters": "0-3", "Defn": "Number of times that a deep C state was requested, but the delayed C state algorithm \"rejected\" the deep sleep state. In other words, a wake event occurred before the timer expired that causes a transition into the deeper C state.", "Desc": "Deep C State Rejection - Core 6", "EvSel": 29, "ExtSel": "", }, "PCU.DELAYED_C_STATE_ABORT_CORE7": { "Box": "PCU", "Category": "PCU Delayed C-State Events", "Counters": "0-3", "Defn": "Number of times that a deep C state was requested, but the delayed C state algorithm \"rejected\" the deep sleep state. In other words, a wake event occurred before the timer expired that causes a transition into the deeper C state.", "Desc": "Deep C State Rejection - Core 7", "EvSel": 30, "ExtSel": "", }, "PCU.DELAYED_C_STATE_ABORT_CORE8": { "Box": "PCU", "Category": "PCU Delayed C-State Events", "Counters": "0-3", "Defn": "Number of times that a deep C state was requested, but the delayed C state algorithm \"rejected\" the deep sleep state. In other words, a wake event occurred before the timer expired that causes a transition into the deeper C state.", "Desc": "Deep C State Rejection - Core 8", "EvSel": 31, "ExtSel": "", }, "PCU.DELAYED_C_STATE_ABORT_CORE9": { "Box": "PCU", "Category": "PCU Delayed C-State Events", "Counters": "0-3", "Defn": "Number of times that a deep C state was requested, but the delayed C state algorithm \"rejected\" the deep sleep state. In other words, a wake event occurred before the timer expired that causes a transition into the deeper C state.", "Desc": "Deep C State Rejection - Core 9", "EvSel": 32, "ExtSel": "", }, "PCU.DEMOTIONS_CORE0": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core 0 C State Demotions", "EvSel": 30, "Filter": "PCUFilter[7:0]", "ExtSel": "", }, "PCU.DEMOTIONS_CORE1": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core 1 C State Demotions", "EvSel": 31, "Filter": "PCUFilter[7:0]", "ExtSel": "", }, "PCU.DEMOTIONS_CORE10": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core 10 C State Demotions", "EvSel": 66, "Filter": "PCUFilter[7:0]", "ExtSel": "", }, "PCU.DEMOTIONS_CORE11": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core 11 C State Demotions", "EvSel": 67, "Filter": "PCUFilter[7:0]", "ExtSel": "", }, "PCU.DEMOTIONS_CORE12": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core 12 C State Demotions", "EvSel": 68, "Filter": "PCUFilter[7:0]", "ExtSel": "", }, "PCU.DEMOTIONS_CORE13": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core 13 C State Demotions", "EvSel": 69, "Filter": "PCUFilter[7:0]", "ExtSel": "", }, "PCU.DEMOTIONS_CORE14": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core 14 C State Demotions", "EvSel": 70, "Filter": "PCUFilter[7:0]", "ExtSel": "", }, "PCU.DEMOTIONS_CORE2": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core 2 C State Demotions", "EvSel": 32, "Filter": "PCUFilter[7:0]", "ExtSel": "", }, "PCU.DEMOTIONS_CORE3": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core 3 C State Demotions", "EvSel": 33, "Filter": "PCUFilter[7:0]", "ExtSel": "", }, "PCU.DEMOTIONS_CORE4": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core 4 C State Demotions", "EvSel": 34, "Filter": "PCUFilter[7:0]", "ExtSel": "", }, "PCU.DEMOTIONS_CORE5": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core 5 C State Demotions", "EvSel": 35, "Filter": "PCUFilter[7:0]", "ExtSel": "", }, "PCU.DEMOTIONS_CORE6": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core 6 C State Demotions", "EvSel": 36, "Filter": "PCUFilter[7:0]", "ExtSel": "", }, "PCU.DEMOTIONS_CORE7": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core 7 C State Demotions", "EvSel": 37, "Filter": "PCUFilter[7:0]", "ExtSel": "", }, "PCU.DEMOTIONS_CORE8": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core 8 C State Demotions", "EvSel": 64, "Filter": "PCUFilter[7:0]", "ExtSel": "", }, "PCU.DEMOTIONS_CORE9": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core 9 C State Demotions", "EvSel": 65, "Filter": "PCUFilter[7:0]", "ExtSel": "", }, "PCU.FREQ_BAND0_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_RESIDENCY Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.", "Desc": "Frequency Residency", "EvSel": 11, "Filter": "PCUFilter[7:0]", "ExtSel": "", "Notes": "The PMON control registers in the PCU only update on a frequency transition. Changing the measuring threshold during a sample interval may introduce errors in the counts. This is especially true when running at a constant frequency for an extended period of time. There is a corner case here: we set this code on the GV transition. So, if we never GV we will never call this code. This event does not include transition times. It is handled on fast path.", }, "PCU.FREQ_BAND1_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_RESIDENCY Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.", "Desc": "Frequency Residency", "EvSel": 12, "Filter": "PCUFilter[15:8]", "ExtSel": "", "Notes": "The PMON control registers in the PCU only update on a frequency transition. Changing the measuring threshold during a sample interval may introduce errors in the counts. This is especially true when running at a constant frequency for an extended period of time. There is a corner case here: we set this code on the GV transition. So, if we never GV we will never call this code. This event does not include transition times. It is handled on fast path.", }, "PCU.FREQ_BAND2_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_RESIDENCY Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.", "Desc": "Frequency Residency", "EvSel": 13, "Filter": "PCUFilter[23:16]", "ExtSel": "", "Notes": "The PMON control registers in the PCU only update on a frequency transition. Changing the measuring threshold during a sample interval may introduce errors in the counts. This is especially true when running at a constant frequency for an extended period of time. There is a corner case here: we set this code on the GV transition. So, if we never GV we will never call this code. This event does not include transition times. It is handled on fast path.", }, "PCU.FREQ_BAND3_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_RESIDENCY Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.", "Desc": "Frequency Residency", "EvSel": 14, "Filter": "PCUFilter[31:24]", "ExtSel": "", "Notes": "The PMON control registers in the PCU only update on a frequency transition. Changing the measuring threshold during a sample interval may introduce errors in the counts. This is especially true when running at a constant frequency for an extended period of time. There is a corner case here: we set this code on the GV transition. So, if we never GV we will never call this code. This event does not include transition times. It is handled on fast path.", }, "PCU.FREQ_MAX_CURRENT_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_MAX_LIMIT Events", "Counters": "0-3", "Defn": "Counts the number of cycles when current is the upper limit on frequency.", "Desc": "Current Strongest Upper Limit Cycles", "EvSel": 7, "ExtSel": "", "Notes": "This is fast path, will clear our other limits when it happens. The slow loop portion, which covers the other limits, can double count EDP. Clearing should fix this up in the next fast path event, but this will happen. Add up all the cycles and it won't make sense, but the general distribution is true.", }, "PCU.FREQ_MAX_LIMIT_THERMAL_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_MAX_LIMIT Events", "Counters": "0-3", "Defn": "Counts the number of cycles when thermal conditions are the upper limit on frequency. This is related to the THERMAL_THROTTLE CYCLES_ABOVE_TEMP event, which always counts cycles when we are above the thermal temperature. This event (STRONGEST_UPPER_LIMIT) is sampled at the output of the algorithm that determines the actual frequency, while THERMAL_THROTTLE looks at the input.", "Desc": "Thermal Strongest Upper Limit Cycles", "EvSel": 4, "ExtSel": "", }, "PCU.FREQ_MAX_OS_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_MAX_LIMIT Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the OS is the upper limit on frequency.", "Desc": "OS Strongest Upper Limit Cycles", "EvSel": 6, "ExtSel": "", "Notes": "Essentially, this event says the OS is getting the frequency it requested.", }, "PCU.FREQ_MAX_POWER_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_MAX_LIMIT Events", "Counters": "0-3", "Defn": "Counts the number of cycles when power is the upper limit on frequency.", "Desc": "Power Strongest Upper Limit Cycles", "EvSel": 5, "ExtSel": "", }, "PCU.FREQ_MIN_IO_P_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_MIN_LIMIT Events", "Counters": "0-3", "Defn": "Counts the number of cycles when IO P Limit is preventing us from dropping the frequency lower. This algorithm monitors the needs to the IO subsystem on both local and remote sockets and will maintain a frequency high enough to maintain good IO BW. This is necessary for when all the IA cores on a socket are idle but a user still would like to maintain high IO Bandwidth.", "Desc": "IO P Limit Strongest Lower Limit Cycles", "EvSel": 97, "ExtSel": "", }, "PCU.FREQ_MIN_PERF_P_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_MIN_LIMIT Events", "Counters": "0-3", "Defn": "Counts the number of cycles when Perf P Limit is preventing us from dropping the frequency lower. Perf P Limit is an algorithm that takes input from remote sockets when determining if a socket should drop it's frequency down. This is largely to minimize increases in snoop and remote read latencies.", "Desc": "Perf P Limit Strongest Lower Limit Cycles", "EvSel": 2, "ExtSel": "", }, "PCU.FREQ_TRANS_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_TRANS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the system is changing frequency. This can not be filtered by thread ID. One can also use it with the occupancy counter that monitors number of threads in C0 to estimate the performance impact that frequency transitions had on the system.", "Desc": "Cycles spent changing Frequency", "EvSel": 96, "ExtSel": "", }, "PCU.MEMORY_PHASE_SHEDDING_CYCLES": { "Box": "PCU", "Category": "PCU MEMORY_PHASE_SHEDDING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the PCU has triggered memory phase shedding. This is a mode that can be run in the iMC physicals that saves power at the expense of additional latency.", "Desc": "Memory Phase Shedding Cycles", "EvSel": 47, "ExtSel": "", "Notes": "Package C1", }, "PCU.PKG_C_EXIT_LATENCY": { "Box": "PCU", "Category": "PCU PKG_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the package is transitioning from package C2 to C3.", "Desc": "Package C State Exit Latency", "EvSel": 38, "ExtSel": "", "Notes": "This can be used in conjunction with edge detect to count C3 entrances.", }, "PCU.POWER_STATE_OCCUPANCY": { "Box": "PCU", "Category": "PCU POWER_STATE_OCC Events", "Counters": "0-3", "Defn": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", "Desc": "Number of cores in C-State", "EvSel": 128, "ExtSel": "", "MaxIncCyc": 8, "SubCtr": 1, }, "PCU.POWER_STATE_OCCUPANCY.CORES_C3": { "Box": "PCU", "Category": "PCU POWER_STATE_OCC Events", "Counters": "0-3", "Defn": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", "Desc": "Number of cores in C-State", "EvSel": 128, "ExtSel": "", "MaxIncCyc": 8, "SubCtr": 1, "Umask": "b10000000", }, "PCU.POWER_STATE_OCCUPANCY.CORES_C0": { "Box": "PCU", "Category": "PCU POWER_STATE_OCC Events", "Counters": "0-3", "Defn": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", "Desc": "Number of cores in C-State", "EvSel": 128, "ExtSel": "", "MaxIncCyc": 8, "SubCtr": 1, "Umask": "b01000000", }, "PCU.POWER_STATE_OCCUPANCY.CORES_C6": { "Box": "PCU", "Category": "PCU POWER_STATE_OCC Events", "Counters": "0-3", "Defn": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", "Desc": "Number of cores in C-State", "EvSel": 128, "ExtSel": "", "MaxIncCyc": 8, "SubCtr": 1, "Umask": "b11000000", }, "PCU.PROCHOT_EXTERNAL_CYCLES": { "Box": "PCU", "Category": "PCU PROCHOT Events", "Counters": "0-3", "Defn": "Counts the number of cycles that we are in external PROCHOT mode. This mode is triggered when a sensor off the die determines that something off-die (like DRAM) is too hot and must throttle to avoid damaging the chip.", "Desc": "External Prochot", "EvSel": 10, "ExtSel": "", }, "PCU.PROCHOT_INTERNAL_CYCLES": { "Box": "PCU", "Category": "PCU PROCHOT Events", "Counters": "0-3", "Defn": "Counts the number of cycles that we are in Interal PROCHOT mode. This mode is triggered when a sensor on the die determines that we are too hot and must throttle to avoid damaging the chip.", "Desc": "Internal Prochot", "EvSel": 9, "ExtSel": "", }, "PCU.TOTAL_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions across all cores.", "Desc": "Total Core C State Transition Cycles", "EvSel": 99, "ExtSel": "", }, "PCU.VOLT_TRANS_CYCLES_CHANGE": { "Box": "PCU", "Category": "PCU VOLT_TRANS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the system is changing voltage. There is no filtering supported with this event. One can use it as a simple event, or use it conjunction with the occupancy events to monitor the number of cores or threads that were impacted by the transition. This event is calculated by or'ing together the increasing and decreasing events.", "Desc": "Cycles Changing Voltage", "EvSel": 3, "ExtSel": "", }, "PCU.VOLT_TRANS_CYCLES_DECREASE": { "Box": "PCU", "Category": "PCU VOLT_TRANS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the system is decreasing voltage. There is no filtering supported with this event. One can use it as a simple event, or use it conjunction with the occupancy events to monitor the number of cores or threads that were impacted by the transition.", "Desc": "Cycles Decreasing Voltage", "EvSel": 2, "ExtSel": "", }, "PCU.VOLT_TRANS_CYCLES_INCREASE": { "Box": "PCU", "Category": "PCU VOLT_TRANS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the system is increasing voltage. There is no filtering supported with this event. One can use it as a simple event, or use it conjunction with the occupancy events to monitor the number of cores or threads that were impacted by the transition.", "Desc": "Cycles Increasing Voltage", "EvSel": 1, "ExtSel": "", }, "PCU.VR_HOT_CYCLES": { "Box": "PCU", "Category": "PCU VR_HOT Events", "Counters": "0-3", "Desc": "VR Hot", "EvSel": 50, "ExtSel": "", }, # QPI_LL: "QPI_LL.CLOCKTICKS": { "Box": "QPI_LL", "Category": "QPI_LL CFCLK Events", "Counters": "0-3", "Defn": "Counts the number of clocks in the QPI LL. This clock runs at 1/8th the \"GT/s\" speed of the QPI link. For example, a 8GT/s link will have qfclk or 1GHz. JKT does not support dynamic link speeds, so this frequency is fixed.", "Desc": "Number of qfclks", "EvSel": 20, "ExtSel": "", }, "QPI_LL.CTO_COUNT": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Counters": "0-3", "Defn": "Counts the number of CTO (cluster trigger outs) events that were asserted across the two slots. If both slots trigger in a given cycle, the event will increment by 2. You can use edge detect to count the number of cases when both events triggered.", "Desc": "Count of CTO Events", "EvSel": 56, "Filter": "QPIMask0[17:0],QPIMatch0[17:0],QPIMask1[19:16],QPIMatch1[19:16]", "ExtSel": "", "MaxIncCyc": 2, "SubCtr": 1, }, "QPI_LL.DIRECT2CORE": { "Box": "QPI_LL", "Category": "QPI_LL DIRECT2CORE Events", "Counters": "0-3", "Defn": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.", "Desc": "Direct 2 Core Spawning", "EvSel": 19, "ExtSel": "", }, "QPI_LL.DIRECT2CORE.FAILURE_RBT_MISS": { "Box": "QPI_LL", "Category": "QPI_LL DIRECT2CORE Events", "Counters": "0-3", "Defn": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.", "Desc": "Direct 2 Core Spawning", "EvSel": 19, "ExtSel": "", "Umask": "bx1xxxxxx", }, "QPI_LL.DIRECT2CORE.FAILURE_MISS": { "Box": "QPI_LL", "Category": "QPI_LL DIRECT2CORE Events", "Counters": "0-3", "Defn": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.", "Desc": "Direct 2 Core Spawning", "EvSel": 19, "ExtSel": "", "Umask": "bxxx1xxxx", }, "QPI_LL.DIRECT2CORE.SUCCESS_RBT_HIT": { "Box": "QPI_LL", "Category": "QPI_LL DIRECT2CORE Events", "Counters": "0-3", "Defn": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.", "Desc": "Direct 2 Core Spawning", "EvSel": 19, "ExtSel": "", "Umask": "bxxxxxxx1", }, "QPI_LL.DIRECT2CORE.FAILURE_CREDITS_RBT_MISS": { "Box": "QPI_LL", "Category": "QPI_LL DIRECT2CORE Events", "Counters": "0-3", "Defn": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.", "Desc": "Direct 2 Core Spawning", "EvSel": 19, "ExtSel": "", "Umask": "b1xxxxxxx", }, "QPI_LL.DIRECT2CORE.FAILURE_CREDITS": { "Box": "QPI_LL", "Category": "QPI_LL DIRECT2CORE Events", "Counters": "0-3", "Defn": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.", "Desc": "Direct 2 Core Spawning", "EvSel": 19, "ExtSel": "", "Umask": "bxxxxxx1x", }, "QPI_LL.DIRECT2CORE.FAILURE_CREDITS_RBT": { "Box": "QPI_LL", "Category": "QPI_LL DIRECT2CORE Events", "Counters": "0-3", "Defn": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.", "Desc": "Direct 2 Core Spawning", "EvSel": 19, "ExtSel": "", "Umask": "bxxxx1xxx", }, "QPI_LL.DIRECT2CORE.FAILURE_RBT_HIT": { "Box": "QPI_LL", "Category": "QPI_LL DIRECT2CORE Events", "Counters": "0-3", "Defn": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.", "Desc": "Direct 2 Core Spawning", "EvSel": 19, "ExtSel": "", "Umask": "bxxxxx1xx", }, "QPI_LL.DIRECT2CORE.FAILURE_CREDITS_MISS": { "Box": "QPI_LL", "Category": "QPI_LL DIRECT2CORE Events", "Counters": "0-3", "Defn": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.", "Desc": "Direct 2 Core Spawning", "EvSel": 19, "ExtSel": "", "Umask": "bxx1xxxxx", }, "QPI_LL.L1_POWER_CYCLES": { "Box": "QPI_LL", "Category": "QPI_LL POWER Events", "Counters": "0-3", "Defn": "Number of QPI qfclk cycles spent in L1 power mode. L1 is a mode that totally shuts down a QPI link. Use edge detect to count the number of instances when the QPI link entered L1. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. Because L1 totally shuts down the link, it takes a good amount of time to exit this mode.", "Desc": "Cycles in L1", "EvSel": 18, "ExtSel": "", }, "QPI_LL.RxL0P_POWER_CYCLES": { "Box": "QPI_LL", "Category": "QPI_LL POWER_RX Events", "Counters": "0-3", "Defn": "Number of QPI qfclk cycles spent in L0p power mode. L0p is a mode where we disable 1/2 of the QPI lanes, decreasing our bandwidth in order to save power. It increases snoop and data transfer latencies and decreases overall bandwidth. This mode can be very useful in NUMA optimized workloads that largely only utilize QPI for snoops and their responses. Use edge detect to count the number of instances when the QPI link entered L0p. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another.", "Desc": "Cycles in L0p", "EvSel": 16, "ExtSel": "", "Notes": "Using .edge_det to count transitions does not function if L1_POWER_CYCLES > 0.", }, "QPI_LL.RxL0_POWER_CYCLES": { "Box": "QPI_LL", "Category": "QPI_LL POWER_RX Events", "Counters": "0-3", "Defn": "Number of QPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.", "Desc": "Cycles in L0", "EvSel": 15, "ExtSel": "", "Notes": "Includes L0p cycles. To get just L0, subtract RxL0P_POWER_CYCLES", }, "QPI_LL.RxL_BYPASSED": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Counts the number of times that an incoming flit was able to bypass the flit buffer and pass directly across the BGF and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of flits transfered, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.", "Desc": "Rx Flit Buffer Bypassed", "EvSel": 9, "ExtSel": "", }, "QPI_LL.RxL_CREDITS_CONSUMED_VN0": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN0 Credit Consumed", "EvSel": 30, "ExtSel": "", "MaxIncCyc": 2, }, "QPI_LL.RxL_CREDITS_CONSUMED_VN0.NCS": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN0 Credit Consumed", "EvSel": 30, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxxx1xx", }, "QPI_LL.RxL_CREDITS_CONSUMED_VN0.NCB": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN0 Credit Consumed", "EvSel": 30, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxxxx1x", }, "QPI_LL.RxL_CREDITS_CONSUMED_VN0.SNP": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN0 Credit Consumed", "EvSel": 30, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxx1xxxx", }, "QPI_LL.RxL_CREDITS_CONSUMED_VN0.HOM": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN0 Credit Consumed", "EvSel": 30, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxx1xxx", }, "QPI_LL.RxL_CREDITS_CONSUMED_VN0.DRS": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN0 Credit Consumed", "EvSel": 30, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxxxxx1", }, "QPI_LL.RxL_CREDITS_CONSUMED_VN0.NDR": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN0 Credit Consumed", "EvSel": 30, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxx1xxxxx", }, "QPI_LL.RxL_CREDITS_CONSUMED_VN1": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN1 Credit Consumed", "EvSel": 57, "ExtSel": "", "MaxIncCyc": 2, }, "QPI_LL.RxL_CREDITS_CONSUMED_VN1.NCS": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN1 Credit Consumed", "EvSel": 57, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxxx1xx", }, "QPI_LL.RxL_CREDITS_CONSUMED_VN1.NCB": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN1 Credit Consumed", "EvSel": 57, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxxxx1x", }, "QPI_LL.RxL_CREDITS_CONSUMED_VN1.SNP": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN1 Credit Consumed", "EvSel": 57, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxx1xxxx", }, "QPI_LL.RxL_CREDITS_CONSUMED_VN1.HOM": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN1 Credit Consumed", "EvSel": 57, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxx1xxx", }, "QPI_LL.RxL_CREDITS_CONSUMED_VN1.DRS": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN1 Credit Consumed", "EvSel": 57, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxxxxxxx1", }, "QPI_LL.RxL_CREDITS_CONSUMED_VN1.NDR": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN1 Credit Consumed", "EvSel": 57, "ExtSel": "", "MaxIncCyc": 2, "Umask": "bxx1xxxxx", }, "QPI_LL.RxL_CREDITS_CONSUMED_VNA": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VNA credit was consumed (i.e. message uses a VNA credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VNA Credit Consumed", "EvSel": 29, "ExtSel": "", }, "QPI_LL.RxL_CYCLES_NE": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy.", "Desc": "RxQ Cycles Not Empty", "EvSel": 10, "ExtSel": "", }, "QPI_LL.RxL_FLITS_G0": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.", "Desc": "Flits Received - Group 0", "EvSel": 1, "ExtSel": "", "MaxIncCyc": 2, }, "QPI_LL.RxL_FLITS_G0.NON_DATA": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.", "Desc": "Flits Received - Group 0", "EvSel": 1, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000100", }, "QPI_LL.RxL_FLITS_G0.DATA": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.", "Desc": "Flits Received - Group 0", "EvSel": 1, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000010", }, "QPI_LL.RxL_FLITS_G0.IDLE": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.", "Desc": "Flits Received - Group 0", "EvSel": 1, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000001", }, "QPI_LL.RxL_FLITS_G1": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 1", "EvSel": 2, "ExtSel": "", "MaxIncCyc": 2, }, "QPI_LL.RxL_FLITS_G1.DRS_DATA": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 1", "EvSel": 2, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00001000", }, "QPI_LL.RxL_FLITS_G1.HOM_NONREQ": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 1", "EvSel": 2, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000100", }, "QPI_LL.RxL_FLITS_G1.HOM_REQ": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 1", "EvSel": 2, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000010", }, "QPI_LL.RxL_FLITS_G1.DRS": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 1", "EvSel": 2, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00011000", }, "QPI_LL.RxL_FLITS_G1.HOM": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 1", "EvSel": 2, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000110", }, "QPI_LL.RxL_FLITS_G1.SNP": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 1", "EvSel": 2, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000001", }, "QPI_LL.RxL_FLITS_G1.DRS_NONDATA": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 1", "EvSel": 2, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00010000", }, "QPI_LL.RxL_FLITS_G2": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 2", "EvSel": 3, "ExtSel": "", "MaxIncCyc": 2, }, "QPI_LL.RxL_FLITS_G2.NCS": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 2", "EvSel": 3, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00010000", }, "QPI_LL.RxL_FLITS_G2.NCB": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 2", "EvSel": 3, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00001100", }, "QPI_LL.RxL_FLITS_G2.NDR_AD": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 2", "EvSel": 3, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000001", }, "QPI_LL.RxL_FLITS_G2.NCB_NONDATA": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 2", "EvSel": 3, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00001000", }, "QPI_LL.RxL_FLITS_G2.NDR_AK": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 2", "EvSel": 3, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000010", }, "QPI_LL.RxL_FLITS_G2.NCB_DATA": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 2", "EvSel": 3, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000100", }, "QPI_LL.RxL_INSERTS": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.", "Desc": "Rx Flit Buffer Allocations", "EvSel": 8, "ExtSel": "", }, "QPI_LL.RxL_INSERTS_DRS": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only DRS flits.", "Desc": "Rx Flit Buffer Allocations - DRS", "EvSel": 9, "ExtSel": "", }, "QPI_LL.RxL_INSERTS_DRS.VN0": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only DRS flits.", "Desc": "Rx Flit Buffer Allocations - DRS", "EvSel": 9, "ExtSel": "", "Umask": "bxxxxxxx1", }, "QPI_LL.RxL_INSERTS_DRS.VN1": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only DRS flits.", "Desc": "Rx Flit Buffer Allocations - DRS", "EvSel": 9, "ExtSel": "", "Umask": "bxxxxxx1x", }, "QPI_LL.RxL_INSERTS_HOM": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only HOM flits.", "Desc": "Rx Flit Buffer Allocations - HOM", "EvSel": 12, "ExtSel": "", }, "QPI_LL.RxL_INSERTS_HOM.VN0": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only HOM flits.", "Desc": "Rx Flit Buffer Allocations - HOM", "EvSel": 12, "ExtSel": "", "Umask": "bxxxxxxx1", }, "QPI_LL.RxL_INSERTS_HOM.VN1": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only HOM flits.", "Desc": "Rx Flit Buffer Allocations - HOM", "EvSel": 12, "ExtSel": "", "Umask": "bxxxxxx1x", }, "QPI_LL.RxL_INSERTS_NCB": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCB flits.", "Desc": "Rx Flit Buffer Allocations - NCB", "EvSel": 10, "ExtSel": "", }, "QPI_LL.RxL_INSERTS_NCB.VN0": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCB flits.", "Desc": "Rx Flit Buffer Allocations - NCB", "EvSel": 10, "ExtSel": "", "Umask": "bxxxxxxx1", }, "QPI_LL.RxL_INSERTS_NCB.VN1": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCB flits.", "Desc": "Rx Flit Buffer Allocations - NCB", "EvSel": 10, "ExtSel": "", "Umask": "bxxxxxx1x", }, "QPI_LL.RxL_INSERTS_NCS": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCS flits.", "Desc": "Rx Flit Buffer Allocations - NCS", "EvSel": 11, "ExtSel": "", }, "QPI_LL.RxL_INSERTS_NCS.VN0": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCS flits.", "Desc": "Rx Flit Buffer Allocations - NCS", "EvSel": 11, "ExtSel": "", "Umask": "bxxxxxxx1", }, "QPI_LL.RxL_INSERTS_NCS.VN1": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCS flits.", "Desc": "Rx Flit Buffer Allocations - NCS", "EvSel": 11, "ExtSel": "", "Umask": "bxxxxxx1x", }, "QPI_LL.RxL_INSERTS_NDR": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NDR flits.", "Desc": "Rx Flit Buffer Allocations - NDR", "EvSel": 14, "ExtSel": "", }, "QPI_LL.RxL_INSERTS_NDR.VN0": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NDR flits.", "Desc": "Rx Flit Buffer Allocations - NDR", "EvSel": 14, "ExtSel": "", "Umask": "bxxxxxxx1", }, "QPI_LL.RxL_INSERTS_NDR.VN1": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NDR flits.", "Desc": "Rx Flit Buffer Allocations - NDR", "EvSel": 14, "ExtSel": "", "Umask": "bxxxxxx1x", }, "QPI_LL.RxL_INSERTS_SNP": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only SNP flits.", "Desc": "Rx Flit Buffer Allocations - SNP", "EvSel": 13, "ExtSel": "", }, "QPI_LL.RxL_INSERTS_SNP.VN0": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only SNP flits.", "Desc": "Rx Flit Buffer Allocations - SNP", "EvSel": 13, "ExtSel": "", "Umask": "bxxxxxxx1", }, "QPI_LL.RxL_INSERTS_SNP.VN1": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only SNP flits.", "Desc": "Rx Flit Buffer Allocations - SNP", "EvSel": 13, "ExtSel": "", "Umask": "bxxxxxx1x", }, "QPI_LL.RxL_OCCUPANCY": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.", "Desc": "RxQ Occupancy - All Packets", "EvSel": 11, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, }, "QPI_LL.RxL_OCCUPANCY_DRS": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors DRS flits only.", "Desc": "RxQ Occupancy - DRS", "EvSel": 21, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, }, "QPI_LL.RxL_OCCUPANCY_DRS.VN0": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors DRS flits only.", "Desc": "RxQ Occupancy - DRS", "EvSel": 21, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bxxxxxxx1", }, "QPI_LL.RxL_OCCUPANCY_DRS.VN1": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors DRS flits only.", "Desc": "RxQ Occupancy - DRS", "EvSel": 21, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bxxxxxx1x", }, "QPI_LL.RxL_OCCUPANCY_HOM": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors HOM flits only.", "Desc": "RxQ Occupancy - HOM", "EvSel": 24, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, }, "QPI_LL.RxL_OCCUPANCY_HOM.VN0": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors HOM flits only.", "Desc": "RxQ Occupancy - HOM", "EvSel": 24, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bxxxxxxx1", }, "QPI_LL.RxL_OCCUPANCY_HOM.VN1": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors HOM flits only.", "Desc": "RxQ Occupancy - HOM", "EvSel": 24, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bxxxxxx1x", }, "QPI_LL.RxL_OCCUPANCY_NCB": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCB flits only.", "Desc": "RxQ Occupancy - NCB", "EvSel": 22, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, }, "QPI_LL.RxL_OCCUPANCY_NCB.VN0": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCB flits only.", "Desc": "RxQ Occupancy - NCB", "EvSel": 22, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bxxxxxxx1", }, "QPI_LL.RxL_OCCUPANCY_NCB.VN1": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCB flits only.", "Desc": "RxQ Occupancy - NCB", "EvSel": 22, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bxxxxxx1x", }, "QPI_LL.RxL_OCCUPANCY_NCS": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCS flits only.", "Desc": "RxQ Occupancy - NCS", "EvSel": 23, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, }, "QPI_LL.RxL_OCCUPANCY_NCS.VN0": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCS flits only.", "Desc": "RxQ Occupancy - NCS", "EvSel": 23, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bxxxxxxx1", }, "QPI_LL.RxL_OCCUPANCY_NCS.VN1": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCS flits only.", "Desc": "RxQ Occupancy - NCS", "EvSel": 23, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bxxxxxx1x", }, "QPI_LL.RxL_OCCUPANCY_NDR": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NDR flits only.", "Desc": "RxQ Occupancy - NDR", "EvSel": 26, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, }, "QPI_LL.RxL_OCCUPANCY_NDR.VN0": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NDR flits only.", "Desc": "RxQ Occupancy - NDR", "EvSel": 26, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bxxxxxxx1", }, "QPI_LL.RxL_OCCUPANCY_NDR.VN1": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NDR flits only.", "Desc": "RxQ Occupancy - NDR", "EvSel": 26, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bxxxxxx1x", }, "QPI_LL.RxL_OCCUPANCY_SNP": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors SNP flits only.", "Desc": "RxQ Occupancy - SNP", "EvSel": 25, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, }, "QPI_LL.RxL_OCCUPANCY_SNP.VN0": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors SNP flits only.", "Desc": "RxQ Occupancy - SNP", "EvSel": 25, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bxxxxxxx1", }, "QPI_LL.RxL_OCCUPANCY_SNP.VN1": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors SNP flits only.", "Desc": "RxQ Occupancy - SNP", "EvSel": 25, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, "Umask": "bxxxxxx1x", }, "QPI_LL.TxL0P_POWER_CYCLES": { "Box": "QPI_LL", "Category": "QPI_LL POWER_TX Events", "Counters": "0-3", "Defn": "Number of QPI qfclk cycles spent in L0p power mode. L0p is a mode where we disable 1/2 of the QPI lanes, decreasing our bandwidth in order to save power. It increases snoop and data transfer latencies and decreases overall bandwidth. This mode can be very useful in NUMA optimized workloads that largely only utilize QPI for snoops and their responses. Use edge detect to count the number of instances when the QPI link entered L0p. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another.", "Desc": "Cycles in L0p", "EvSel": 13, "ExtSel": "", "Notes": "Using .edge_det to count transitions does not function if L1_POWER_CYCLES > 0.", }, "QPI_LL.TxL0_POWER_CYCLES": { "Box": "QPI_LL", "Category": "QPI_LL POWER_TX Events", "Counters": "0-3", "Defn": "Number of QPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.", "Desc": "Cycles in L0", "EvSel": 12, "ExtSel": "", "Notes": "Includes L0p cycles. To get just L0, subtract TxL0P_POWER_CYCLES", }, "QPI_LL.TxL_BYPASSED": { "Box": "QPI_LL", "Category": "QPI_LL TXQ Events", "Counters": "0-3", "Defn": "Counts the number of times that an incoming flit was able to bypass the Tx flit buffer and pass directly out the QPI Link. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link.", "Desc": "Tx Flit Buffer Bypassed", "EvSel": 5, "ExtSel": "", }, "QPI_LL.TxL_CYCLES_NE": { "Box": "QPI_LL", "Category": "QPI_LL TXQ Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the TxQ is not empty. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link.", "Desc": "Tx Flit Buffer Cycles not Empty", "EvSel": 6, "ExtSel": "", }, "QPI_LL.TxL_FLITS_G0": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.", "Desc": "Flits Transferred - Group 0", "EvSel": 0, "ExtSel": "", "MaxIncCyc": 2, }, "QPI_LL.TxL_FLITS_G0.NON_DATA": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.", "Desc": "Flits Transferred - Group 0", "EvSel": 0, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000100", }, "QPI_LL.TxL_FLITS_G0.DATA": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.", "Desc": "Flits Transferred - Group 0", "EvSel": 0, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000010", }, "QPI_LL.TxL_FLITS_G1": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits trasmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 1", "EvSel": 0, "ExtSel": "", "MaxIncCyc": 2, }, "QPI_LL.TxL_FLITS_G1.DRS_DATA": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits trasmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 1", "EvSel": 0, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00001000", }, "QPI_LL.TxL_FLITS_G1.HOM_NONREQ": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits trasmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 1", "EvSel": 0, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000100", }, "QPI_LL.TxL_FLITS_G1.HOM_REQ": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits trasmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 1", "EvSel": 0, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000010", }, "QPI_LL.TxL_FLITS_G1.DRS": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits trasmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 1", "EvSel": 0, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00011000", }, "QPI_LL.TxL_FLITS_G1.HOM": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits trasmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 1", "EvSel": 0, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000110", }, "QPI_LL.TxL_FLITS_G1.SNP": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits trasmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 1", "EvSel": 0, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000001", }, "QPI_LL.TxL_FLITS_G1.DRS_NONDATA": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits trasmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 1", "EvSel": 0, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00010000", }, "QPI_LL.TxL_FLITS_G2": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits trasmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 2", "EvSel": 1, "ExtSel": "", "MaxIncCyc": 2, }, "QPI_LL.TxL_FLITS_G2.NCS": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits trasmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 2", "EvSel": 1, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00010000", }, "QPI_LL.TxL_FLITS_G2.NCB": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits trasmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 2", "EvSel": 1, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00001100", }, "QPI_LL.TxL_FLITS_G2.NDR_AD": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits trasmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 2", "EvSel": 1, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000001", }, "QPI_LL.TxL_FLITS_G2.NCB_NONDATA": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits trasmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 2", "EvSel": 1, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00001000", }, "QPI_LL.TxL_FLITS_G2.NDR_AK": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits trasmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 2", "EvSel": 1, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000010", }, "QPI_LL.TxL_FLITS_G2.NCB_DATA": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits trasmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 2", "EvSel": 1, "ExtSel": "", "MaxIncCyc": 2, "Umask": "b00000100", }, "QPI_LL.TxL_INSERTS": { "Box": "QPI_LL", "Category": "QPI_LL TXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Tx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.", "Desc": "Tx Flit Buffer Allocations", "EvSel": 4, "ExtSel": "", }, "QPI_LL.TxL_OCCUPANCY": { "Box": "QPI_LL", "Category": "QPI_LL TXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of flits in the TxQ. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This can be used with the cycles not empty event to track average occupancy, or the allocations event to track average lifetime in the TxQ.", "Desc": "Tx Flit Buffer Occupancy", "EvSel": 7, "ExtSel": "", }, "QPI_LL.TxR_AD_HOM_CREDIT_ACQUIRED": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for Home messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - HOM", "EvSel": 38, "ExtSel": "", }, "QPI_LL.TxR_AD_HOM_CREDIT_ACQUIRED.VN0": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for Home messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - HOM", "EvSel": 38, "ExtSel": "", "Umask": "b00000001", }, "QPI_LL.TxR_AD_HOM_CREDIT_ACQUIRED.VN1": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for Home messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - HOM", "EvSel": 38, "ExtSel": "", "Umask": "b00000010", }, "QPI_LL.TxR_AD_HOM_CREDIT_OCCUPANCY": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO for HOM messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - AD HOM", "EvSel": 34, "ExtSel": "", "MaxIncCyc": 28, "SubCtr": 1, }, "QPI_LL.TxR_AD_HOM_CREDIT_OCCUPANCY.VN0": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO for HOM messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - AD HOM", "EvSel": 34, "ExtSel": "", "MaxIncCyc": 28, "SubCtr": 1, "Umask": "b00000001", }, "QPI_LL.TxR_AD_HOM_CREDIT_OCCUPANCY.VN1": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO for HOM messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - AD HOM", "EvSel": 34, "ExtSel": "", "MaxIncCyc": 28, "SubCtr": 1, "Umask": "b00000010", }, "QPI_LL.TxR_AD_NDR_CREDIT_ACQUIRED": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for NDR messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - AD NDR", "EvSel": 40, "ExtSel": "", }, "QPI_LL.TxR_AD_NDR_CREDIT_ACQUIRED.VN0": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for NDR messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - AD NDR", "EvSel": 40, "ExtSel": "", "Umask": "b00000001", }, "QPI_LL.TxR_AD_NDR_CREDIT_ACQUIRED.VN1": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for NDR messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - AD NDR", "EvSel": 40, "ExtSel": "", "Umask": "b00000010", }, "QPI_LL.TxR_AD_NDR_CREDIT_OCCUPANCY": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO for NDR messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - AD NDR", "EvSel": 36, "ExtSel": "", "MaxIncCyc": 8, "SubCtr": 1, }, "QPI_LL.TxR_AD_NDR_CREDIT_OCCUPANCY.VN0": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO for NDR messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - AD NDR", "EvSel": 36, "ExtSel": "", "MaxIncCyc": 8, "SubCtr": 1, "Umask": "b00000001", }, "QPI_LL.TxR_AD_NDR_CREDIT_OCCUPANCY.VN1": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO for NDR messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - AD NDR", "EvSel": 36, "ExtSel": "", "MaxIncCyc": 8, "SubCtr": 1, "Umask": "b00000010", }, "QPI_LL.TxR_AD_SNP_CREDIT_ACQUIRED": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for Snoop messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - SNP", "EvSel": 39, "ExtSel": "", }, "QPI_LL.TxR_AD_SNP_CREDIT_ACQUIRED.VN0": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for Snoop messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - SNP", "EvSel": 39, "ExtSel": "", "Umask": "b00000001", }, "QPI_LL.TxR_AD_SNP_CREDIT_ACQUIRED.VN1": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for Snoop messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - SNP", "EvSel": 39, "ExtSel": "", "Umask": "b00000010", }, "QPI_LL.TxR_AD_SNP_CREDIT_OCCUPANCY": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO fro Snoop messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - AD SNP", "EvSel": 35, "ExtSel": "", "MaxIncCyc": 28, "SubCtr": 1, }, "QPI_LL.TxR_AD_SNP_CREDIT_OCCUPANCY.VN0": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO fro Snoop messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - AD SNP", "EvSel": 35, "ExtSel": "", "MaxIncCyc": 28, "SubCtr": 1, "Umask": "b00000001", }, "QPI_LL.TxR_AD_SNP_CREDIT_OCCUPANCY.VN1": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO fro Snoop messages on AD.", "Desc": "R3QPI Egress Credit Occupancy - AD SNP", "EvSel": 35, "ExtSel": "", "MaxIncCyc": 28, "SubCtr": 1, "Umask": "b00000010", }, "QPI_LL.TxR_AK_NDR_CREDIT_ACQUIRED": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. Local NDR message class to AK Egress.", "Desc": "R3QPI Egress Credit Occupancy - AK NDR", "EvSel": 41, "ExtSel": "", }, "QPI_LL.TxR_AK_NDR_CREDIT_OCCUPANCY": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. Local NDR message class to AK Egress.", "Desc": "R3QPI Egress Credit Occupancy - AK NDR", "EvSel": 37, "ExtSel": "", "MaxIncCyc": 6, "SubCtr": 1, }, "QPI_LL.TxR_BL_DRS_CREDIT_ACQUIRED": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. DRS message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - DRS", "EvSel": 42, "ExtSel": "", }, "QPI_LL.TxR_BL_DRS_CREDIT_ACQUIRED.VN_SHR": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. DRS message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - DRS", "EvSel": 42, "ExtSel": "", "Umask": "b00000100", }, "QPI_LL.TxR_BL_DRS_CREDIT_ACQUIRED.VN0": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. DRS message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - DRS", "EvSel": 42, "ExtSel": "", "Umask": "b00000001", }, "QPI_LL.TxR_BL_DRS_CREDIT_ACQUIRED.VN1": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. DRS message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - DRS", "EvSel": 42, "ExtSel": "", "Umask": "b00000010", }, "QPI_LL.TxR_BL_DRS_CREDIT_OCCUPANCY": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. DRS message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - BL DRS", "EvSel": 31, "ExtSel": "", "MaxIncCyc": 8, "SubCtr": 1, }, "QPI_LL.TxR_BL_DRS_CREDIT_OCCUPANCY.VN_SHR": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. DRS message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - BL DRS", "EvSel": 31, "ExtSel": "", "MaxIncCyc": 8, "SubCtr": 1, "Umask": "b00000100", }, "QPI_LL.TxR_BL_DRS_CREDIT_OCCUPANCY.VN0": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. DRS message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - BL DRS", "EvSel": 31, "ExtSel": "", "MaxIncCyc": 8, "SubCtr": 1, "Umask": "b00000001", }, "QPI_LL.TxR_BL_DRS_CREDIT_OCCUPANCY.VN1": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. DRS message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - BL DRS", "EvSel": 31, "ExtSel": "", "MaxIncCyc": 8, "SubCtr": 1, "Umask": "b00000010", }, "QPI_LL.TxR_BL_NCB_CREDIT_ACQUIRED": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. NCB message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - NCB", "EvSel": 43, "ExtSel": "", }, "QPI_LL.TxR_BL_NCB_CREDIT_ACQUIRED.VN0": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. NCB message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - NCB", "EvSel": 43, "ExtSel": "", "Umask": "b00000001", }, "QPI_LL.TxR_BL_NCB_CREDIT_ACQUIRED.VN1": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. NCB message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - NCB", "EvSel": 43, "ExtSel": "", "Umask": "b00000010", }, "QPI_LL.TxR_BL_NCB_CREDIT_OCCUPANCY": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. NCB message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - BL NCB", "EvSel": 32, "ExtSel": "", "MaxIncCyc": 2, "SubCtr": 1, }, "QPI_LL.TxR_BL_NCB_CREDIT_OCCUPANCY.VN0": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. NCB message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - BL NCB", "EvSel": 32, "ExtSel": "", "MaxIncCyc": 2, "SubCtr": 1, "Umask": "b00000001", }, "QPI_LL.TxR_BL_NCB_CREDIT_OCCUPANCY.VN1": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. NCB message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - BL NCB", "EvSel": 32, "ExtSel": "", "MaxIncCyc": 2, "SubCtr": 1, "Umask": "b00000010", }, "QPI_LL.TxR_BL_NCS_CREDIT_ACQUIRED": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. NCS message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - NCS", "EvSel": 44, "ExtSel": "", }, "QPI_LL.TxR_BL_NCS_CREDIT_ACQUIRED.VN0": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. NCS message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - NCS", "EvSel": 44, "ExtSel": "", "Umask": "b00000001", }, "QPI_LL.TxR_BL_NCS_CREDIT_ACQUIRED.VN1": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. NCS message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - NCS", "EvSel": 44, "ExtSel": "", "Umask": "b00000010", }, "QPI_LL.TxR_BL_NCS_CREDIT_OCCUPANCY": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. NCS message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - BL NCS", "EvSel": 33, "ExtSel": "", "MaxIncCyc": 2, "SubCtr": 1, }, "QPI_LL.TxR_BL_NCS_CREDIT_OCCUPANCY.VN0": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. NCS message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - BL NCS", "EvSel": 33, "ExtSel": "", "MaxIncCyc": 2, "SubCtr": 1, "Umask": "b00000001", }, "QPI_LL.TxR_BL_NCS_CREDIT_OCCUPANCY.VN1": { "Box": "QPI_LL", "Category": "QPI_LL R3QPI_EGRESS_CREDITS Events", "Counters": "0-3", "Defn": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. NCS message class to BL Egress.", "Desc": "R3QPI Egress Credit Occupancy - BL NCS", "EvSel": 33, "ExtSel": "", "MaxIncCyc": 2, "SubCtr": 1, "Umask": "b00000010", }, "QPI_LL.VNA_CREDIT_RETURNS": { "Box": "QPI_LL", "Category": "QPI_LL VNA_CREDIT_RETURN Events", "Counters": "0-3", "Defn": "Number of VNA credits returned.", "Desc": "VNA Credits Returned", "EvSel": 28, "ExtSel": "", }, "QPI_LL.VNA_CREDIT_RETURN_OCCUPANCY": { "Box": "QPI_LL", "Category": "QPI_LL VNA_CREDIT_RETURN Events", "Counters": "0-3", "Defn": "Number of VNA credits in the Rx side that are waitng to be returned back across the link.", "Desc": "VNA Credits Pending Return - Occupancy", "EvSel": 27, "ExtSel": "", "MaxIncCyc": 128, "SubCtr": 1, }, # UBOX: "UBOX.EVENT_MSG": { "Box": "UBOX", "Category": "UBOX EVENT_MSG Events", "Counters": "0-1", "Defn": "Virtual Logical Wire (legacy) message were received from Uncore. Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.", "Desc": "VLW Received", "EvSel": 66, "ExtSel": "", }, "UBOX.EVENT_MSG.DOORBELL_RCVD": { "Box": "UBOX", "Category": "UBOX EVENT_MSG Events", "Counters": "0-1", "Defn": "Virtual Logical Wire (legacy) message were received from Uncore. Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.", "Desc": "VLW Received", "EvSel": 66, "ExtSel": "", "Umask": "bxxxx1xxx", }, "UBOX.EVENT_MSG.IPI_RCVD": { "Box": "UBOX", "Category": "UBOX EVENT_MSG Events", "Counters": "0-1", "Defn": "Virtual Logical Wire (legacy) message were received from Uncore. Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.", "Desc": "VLW Received", "EvSel": 66, "ExtSel": "", "Umask": "bxxxxx1xx", }, "UBOX.EVENT_MSG.INT_PRIO": { "Box": "UBOX", "Category": "UBOX EVENT_MSG Events", "Counters": "0-1", "Defn": "Virtual Logical Wire (legacy) message were received from Uncore. Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.", "Desc": "VLW Received", "EvSel": 66, "ExtSel": "", "Umask": "bxxx1xxxx", }, "UBOX.EVENT_MSG.VLW_RCVD": { "Box": "UBOX", "Category": "UBOX EVENT_MSG Events", "Counters": "0-1", "Defn": "Virtual Logical Wire (legacy) message were received from Uncore. Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.", "Desc": "VLW Received", "EvSel": 66, "ExtSel": "", "Umask": "bxxxxxxx1", }, "UBOX.EVENT_MSG.MSI_RCVD": { "Box": "UBOX", "Category": "UBOX EVENT_MSG Events", "Counters": "0-1", "Defn": "Virtual Logical Wire (legacy) message were received from Uncore. Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.", "Desc": "VLW Received", "EvSel": 66, "ExtSel": "", "Umask": "bxxxxxx1x", }, "UBOX.LOCK_CYCLES": { "Box": "UBOX", "Category": "UBOX LOCK Events", "Counters": "0-1", "Defn": "Number of times an IDI Lock/SplitLock sequence was started", "Desc": "IDI Lock/SplitLock Cycles", "EvSel": 68, "ExtSel": "", }, "UBOX.PHOLD_CYCLES": { "Box": "UBOX", "Category": "UBOX PHOLD Events", "Counters": "0-1", "Defn": "PHOLD cycles. Filter from source CoreID.", "Desc": "Cycles PHOLD Assert to Ack", "EvSel": 69, "ExtSel": "", }, "UBOX.PHOLD_CYCLES.ASSERT_TO_ACK": { "Box": "UBOX", "Category": "UBOX PHOLD Events", "Counters": "0-1", "Defn": "PHOLD cycles. Filter from source CoreID.", "Desc": "Cycles PHOLD Assert to Ack", "EvSel": 69, "ExtSel": "", "Umask": "bxxxxxxx1", }, "UBOX.RACU_REQUESTS": { "Box": "UBOX", "Category": "UBOX RACU Events", "Counters": "0-1", "Desc": "RACU Request", "EvSel": 70, "ExtSel": "", "Notes": "This will be dropped because PHOLD is not implemented this way", }, } derived = { # R3QPI: # HA: "HA.PCT_CYCLES_BL_FULL": { "Box": "HA", "Category": "HA EGRESS Events", "Defn": "Percentage of time the BL Egress Queue is full", "Desc": "Percent BL Egress Full", "Equation": "TxR_BL_CYCLES_FULL.ALL / SAMPLE_INTERVAL", "Obscure": 1, }, "HA.PCT_CYCLES_D2C_DISABLED": { "Box": "HA", "Category": "HA DIRECT2CORE Events", "Defn": "Percentage of time that Direct2Core was disabled.", "Desc": "Percent D2C Disabled", "Equation": "DIRECT2CORE_CYCLES_DISABLED / SAMPLE_INTERVAL", "Obscure": 1, }, "HA.PCT_RD_REQUESTS": { "Box": "HA", "Category": "HA REQUESTS Events", "Defn": "Percentage of HA traffic that is from Read Requests", "Desc": "Percent Read Requests", "Equation": "REQUESTS.READS / (REQUESTS.READS + REQUESTS.WRITES)", }, "HA.PCT_WR_REQUESTS": { "Box": "HA", "Category": "HA REQUESTS Events", "Defn": "Percentage of HA traffic that is from Write Requests", "Desc": "Percent Write Requests", "Equation": "REQUESTS.WRITES / (REQUESTS.READS + REQUESTS.WRITES)", }, # iMC: "iMC.MEM_BW_READS": { "Box": "iMC", "Category": "iMC CAS Events", "Defn": "Memory bandwidth consumed by reads. Expressed in bytes.", "Desc": "Read Memory Bandwidth", "Equation": "(CAS_COUNT.RD * 64)", }, "iMC.MEM_BW_TOTAL": { "Box": "iMC", "Category": "iMC CAS Events", "Defn": "Total memory bandwidth. Expressed in bytes.", "Desc": "Total Memory Bandwidth", "Equation": "MEM_BW_READS + MEM_BW_WRITES", }, "iMC.MEM_BW_WRITES": { "Box": "iMC", "Category": "iMC CAS Events", "Defn": "Memory bandwidth consumed by writes Expressed in bytes.", "Desc": "Write Memory Bandwidth", "Equation": "(CAS_COUNT.WR * 64)", }, "iMC.PCT_CYCLES_CRITICAL_THROTTLE": { "Box": "iMC", "Category": "iMC POWER Events", "Defn": "The percentage of cycles all DRAM ranks in critical thermal throttling", "Desc": "Percent Cycles Critical Throttle", "Equation": "POWER_CRITICAL_THROTTLE_CYCLES / MC_Chy_PCI_PMON_CTR_FIXED", }, "iMC.PCT_CYCLES_DLLOFF": { "Box": "iMC", "Category": "iMC POWER Events", "Defn": "The percentage of cycles all DRAM ranks in CKE slow (DLOFF) mode", "Desc": "Percent Cycles DLOFF", "Equation": "POWER_CHANNEL_DLLOFF / MC_Chy_PCI_PMON_CTR_FIXED", }, "iMC.PCT_CYCLES_DRAM_RANKx_IN_CKE": { "Box": "iMC", "Category": "iMC POWER Events", "Defn": "The percentage of cycles DRAM rank (x) spent in CKE ON mode.", "Desc": "Percent Cycles DRAM Rank x in CKE", "Equation": "POWER_CKE_CYCLES.RANKx / MC_Chy_PCI_PMON_CTR_FIXED", }, "iMC.PCT_CYCLES_DRAM_RANKx_IN_THR": { "Box": "iMC", "Category": "iMC POWER Events", "Defn": "The percentage of cycles DRAM rank (x) spent in thermal throttling.", "Desc": "Percent Cycles DRAM Rank x in CKE", "Equation": "POWER_THROTTLE_CYCLES.RANKx / MC_Chy_PCI_PMON_CTR_FIXED", }, "iMC.PCT_CYCLES_PPD": { "Box": "iMC", "Category": "iMC POWER Events", "Defn": "The percentage of cycles all DRAM ranks in PPD mode", "Desc": "Percent Cycles PPD", "Equation": "POWER_CHANNEL_PPD / MC_Chy_PCI_PMON_CTR_FIXED", }, "iMC.PCT_CYCLES_SELF_REFRESH": { "Box": "iMC", "Category": "iMC POWER Events", "Defn": "The percentage of cycles Memory is in self refresh power mode", "Desc": "Percent Cycles Self Refresh", "Equation": "POWER_SELF_REFRESH / MC_Chy_PCI_PMON_CTR_FIXED", }, "iMC.PCT_RD_REQUESTS": { "Box": "iMC", "Category": "iMC RPQ Events", "Defn": "Percentage of read requests from total requests.", "Desc": "Percent Read Requests", "Equation": "RPQ_INSERTS / (RPQ_INSERTS + WPQ_INSERTS)", }, "iMC.PCT_REQUESTS_PAGE_EMPTY": { "Box": "iMC", "Category": "iMC CAS Events", "Defn": "Percentage of memory requests that resulted in Page Empty", "Desc": "Percent Requests Page Empty", "Equation": "(ACT_COUNT - PRE_COUNT.PAGE_MISS)/ (CAS_COUNT.RD + CAS_COUNT.WR)", }, "iMC.PCT_REQUESTS_PAGE_HIT": { "Box": "iMC", "Category": "iMC CAS Events", "Defn": "Percentage of memory requests that resulted in Page Hits", "Desc": "Percent Requests Page Hit", "Equation": "1 - (PCT_REQUESTS_PAGE_EMPTY + PCT_REQUESTS_PAGE_MISS)", }, "iMC.PCT_REQUESTS_PAGE_MISS": { "Box": "iMC", "Category": "iMC CAS Events", "Defn": "Percentage of memory requests that resulted in Page Misses", "Desc": "Percent Requests Page Miss", "Equation": "PRE_COUNT.PAGE_MISS / (CAS_COUNT.RD + CAS_COUNT.WR)", }, "iMC.PCT_WR_REQUESTS": { "Box": "iMC", "Category": "iMC WPQ Events", "Defn": "Percentage of write requests from total requests.", "Desc": "Percent Write Requests", "Equation": "WPQ_INSERTS / (RPQ_INSERTS + WPQ_INSERTS)", }, # R2PCIe: "R2PCIe.CYC_USED_DN": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Defn": "Cycles Used in the Down direction, Even polarity", "Desc": "Cycles Used Down and Even", "Equation": "RING_BL_USED.CCW / SAMPLE_INTERVAL", "Obscure": 1, }, "R2PCIe.CYC_USED_UP": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Defn": "Cycles Used in the Up direction, Even polarity", "Desc": "Cycles Used Up and Even", "Equation": "RING_BL_USED.CW / SAMPLE_INTERVAL", "Obscure": 1, }, "R2PCIe.RING_THRU_DN_BYTES": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Defn": "Ring throughput in the Down direction, Even polarity in Bytes", "Desc": "Ring Throughput Down and Even", "Equation": "RING_BL_USED.CCW* 32", "Obscure": 1, }, "R2PCIe.RING_THRU_UP_BYTES": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Defn": "Ring throughput in the Up direction, Even polarity in Bytes", "Desc": "Ring Throughput Up and Even", "Equation": "RING_BL_USED.CW * 32", "Obscure": 1, }, # QPI_LL: "QPI_LL.DATA_FROM_QPI": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "Data received from QPI in bytes ( = DRS + NCB Data messages received from QPI)", "Desc": "Data From QPI", "Equation": "DRS_DATA_MSGS_FROM_QPI + NCB_DATA_MSGS_FROM_QPI", }, "QPI_LL.DATA_FROM_QPI_TO_HA_OR_IIO": { "Box": "QPI_LL", "Category": "QPI_LL DIRECT2CORE Events", "Defn": "Data received from QPI forwarded to HA or IIO. Expressed in Bytes", "Desc": "Data From QPI To HA or IIO", "Equation": "DATA_FROM_QPI - DATA_FROM_QPI_TO_LLC", }, "QPI_LL.DATA_FROM_QPI_TO_LLC": { "Box": "QPI_LL", "Category": "QPI_LL DIRECT2CORE Events", "Defn": "Data received from QPI forwarded to LLC. Expressed in Bytes", "Desc": "Data From QPI To LLC", "Equation": "DIRECT2CORE.SUCCESS_RBT_HIT * 64", }, "QPI_LL.DATA_FROM_QPI_TO_NODEx": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "Data packets received from QPI sent to Node ID 'x'. Expressed in bytes", "Desc": "Data From QPI To Node x", "Equation": "DRS_DataC_FROM_QPI_TO_NODEx + DRS_WRITE_FROM_QPI_TO_NODEx + NCB_DATA_FROM_QPI_TO_NODEx", }, "QPI_LL.DRS_DATA_MSGS_FROM_QPI": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Defn": "DRS Data Messges From QPI in bytes", "Desc": "DRS Data Messges From QPI", "Equation": "(RxL_FLITS_G1.DRS_DATA * 8)", }, "QPI_LL.DRS_DataC_FROM_QPI_TO_NODEx": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "DRS DataC packets received from QPI sent to Node ID 'x'. Expressed in bytes", "Desc": "DRS DataC From QPI To Node x", "Equation": "(CTO_COUNT with:{Q_Py_PCI_PMON_PKT_z_MATCH0{[12:0],dnid}={0x1C00,x}, Q_Py_PCI_PMON_PKT_z_MASK0[17:0]=0x3FF80}) * 64", "Filter": "QPIMask0[17:0],QPIMatch0[17:0]", }, "QPI_LL.DRS_DataC_M_FROM_QPI": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "DRS DataC_F packets received from QPI. Expressed in bytes", "Desc": "DRS DataC_Fs From QPI", "Equation": "(CTO_COUNT with:{Q_Py_PCI_PMON_PKT_z_MATCH0[12:0]=0x1C00, Q_Py_PCI_PMON_PKT_z_MASK0[12:0]=0x1FE0, Q_Py_PCI_PMON_PKT_z_MATCH1[19:16]=0x1, Q_Py_PCI_PMON_PKT_z_MASK1[19:16]=0xF }) * 64", "Filter": "QPIMask0[17:0],QPIMatch0[17:0],QPIMask1[19:16],QPIMatch1[19:16]", }, "QPI_LL.DRS_FULL_CACHELINE_MSGS_FROM_QPI": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "DRS Full Cacheline Data Messges From QPI in bytes", "Desc": "DRS Full Cacheline Data Messges From QPI", "Equation": "(CTO_COUNT with:{Q_Py_PCI_PMON_PKT_z_MATCH0[12:0]=0x1C00,Q_Py_PCI_PMON_PKT_z_MASK0[12:0]=0x1F00} * 64)", "Filter": "QPIMask0[12:0],QPIMatch0[12:0]", }, "QPI_LL.DRS_F_OR_E_FROM_QPI": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "DRS response in F or E states received from QPI in bytes. To calculate the total data response for each cache line state, it's necessary to add the contribution from three flavors {DataC, DataC_FrcAckCnflt, DataC_Cmp} of data response packets for each cache line state.", "Desc": "DRS Data in F or E From QPI", "Equation": "((CTO_COUNT with:{Q_Py_PCI_PMON_PKT_z_MATCH0[12:0]=0x1C00, Q_Py_PCI_PMON_PKT_z_MASK0[12:0]=0x1FE0, Q_Py_PCI_PMON_PKT_z_MATCH1[19:16]=0x4, Q_Py_PCI_PMON_PKT_z_MASK1[19:16]=0xF }) + (CTO_COUNT with:{Q_Py_PCI_PMON_PKT_z_MATCH0[12:0]=0x1C00, Q_Py_PCI_PMON_PKT_z_MASK0[12:0]=0x1FE0, Q_Py_PCI_PMON_PKT_z_MATCH1[19:16]=0x1, Q_Py_PCI_PMON_PKT_z_MASK1[19:16]=0xF }) + (CTO_COUNT with:{Q_Py_PCI_PMON_PKT_z_MATCH0[12:0]=0x1C40, Q_Py_PCI_PMON_PKT_z_MASK0[12:0]=0x1FE0, Q_Py_PCI_PMON_PKT_z_MATCH1[19:16]=0x4, Q_Py_PCI_PMON_PKT_z_MASK1[19:16]=0xF }) + (CTO_COUNT with:{Q_Py_PCI_PMON_PKT_z_MATCH0[12:0]=0x1C40, Q_Py_PCI_PMON_PKT_z_MASK0[12:0]=0x1FE0, Q_Py_PCI_PMON_PKT_z_MATCH1[19:16]=0x1, Q_Py_PCI_PMON_PKT_z_MASK1[19:16]=0xF }) + (CTO_COUNT with:{Q_Py_PCI_PMON_PKT_z_MATCH0[12:0]=0x1C20, Q_Py_PCI_PMON_PKT_z_MASK0[12:0]=0x1FE0, Q_Py_PCI_PMON_PKT_z_MATCH1[19:16]=0x4, Q_Py_PCI_PMON_PKT_z_MASK1[19:16]=0xF }) + (CTO_COUNT with:{Q_Py_PCI_PMON_PKT_z_MATCH0[12:0]=0x1C20, Q_Py_PCI_PMON_PKT_z_MASK0[12:0]=0x1FE0, Q_Py_PCI_PMON_PKT_z_MATCH1[19:16]=0x1, Q_Py_PCI_PMON_PKT_z_MASK1[19:16]=0xF })) * 64", "Filter": "QPIMask0[12:0],QPIMatch0[12:0],QPIMask1[19:16],QPIMatch1[19:16]", }, "QPI_LL.DRS_M_FROM_QPI": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "DRS response in M state received from QPI in bytes", "Desc": "DRS Data in M From QPI", "Equation": "(CTO_COUNT with:{Q_Py_PCI_PMON_PKT_z_MATCH0[12:0]=0x1C00, Q_Py_PCI_PMON_PKT_z_MASK0[12:0]=0x1FE0, Q_Py_PCI_PMON_PKT_z_MATCH1[19:16]=0x8, Q_Py_PCI_PMON_PKT_z_MASK1[19:16]=0xF }) * 64", "Filter": "QPIMask0[12:0],QPIMatch0[12:0],QPIMask1[19:16],QPIMatch1[19:16]", }, "QPI_LL.DRS_PTL_CACHELINE_MSGS_FROM_QPI": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "DRS Partial Cacheline Data Messges From QPI in bytes", "Desc": "DRS Partial Cacheline Data Messges From QPI", "Equation": "(CTO_COUNT with:{Q_Py_PCI_PMON_PKT_z_MATCH0[12:0]=0x1D00, Q_Py_PCI_PMON_PKT_z_MASK0[12:0]=0x1F00}) * 64", "Filter": "QPIMask0[12:0],QPIMatch0[12:0]", }, "QPI_LL.DRS_WB_FROM_QPI": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "DRS writeback packets received from QPI in bytes. This is the sum of Wb{I,S,E} DRS packets", "Desc": "DRS Writeback From QPI", "Equation": "DRS_WbI_FROM_QPI + DRS_WbS_FROM_QPI + DRS_WbE_FROM_QPI", }, "QPI_LL.DRS_WRITE_FROM_QPI_TO_NODEx": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "DRS Data packets (Any - DataC) received from QPI sent to Node ID 'x'. Expressed in bytes", "Desc": "DRS Data From QPI To Node x", "Equation": "((CTO_COUNT with:{Q_Py_PCI_PMON_PKT_z_MATCH0{[12:0],dnid}={0x1C00,x}, Q_Py_PCI_PMON_PKT_z_MASK0[17:0]=0x3FE00}) - (CTO_COUNT with:{Q_Py_PCI_PMON_PKT_z_MATCH0{[12:0],dnid}={0x1C00,x}, Q_Py_PCI_PMON_PKT_z_MASK0[17:0]=0x3FF80})) * 64", "Filter": "QPIMask0[17:0],QPIMatch0[17:0]", }, "QPI_LL.DRS_WbE_FROM_QPI": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "DRS writeback 'change to E state' packets received from QPI in bytes", "Desc": "DRS WbE From QPI", "Equation": "(CTO_COUNT with:{Q_Py_PCI_PMON_PKT_z_MATCH0[12:0]=0x1CC0, Q_Py_PCI_PMON_PKT_z_MASK0[12:0]=0x1FE0}) * 64", "Filter": "QPIMask0[12:0],QPIMatch0[12:0]", }, "QPI_LL.DRS_WbI_FROM_QPI": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "DRS writeback 'change to I state' packets received from QPI in bytes", "Desc": "DRS WbI From QPI", "Equation": "(CTO_COUNT with:{Q_Py_PCI_PMON_PKT_z_MATCH0[12:0]=0x1C80, Q_Py_PCI_PMON_PKT_z_MASK0[12:0]=0x1FE0}) * 64", "Filter": "QPIMask0[12:0],QPIMatch0[12:0]", }, "QPI_LL.DRS_WbS_FROM_QPI": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "DRS writeback 'change to S state' packets received from QPI in bytes", "Desc": "DRS WbSFrom QPI", "Equation": "(CTO_COUNT with:{Q_Py_PCI_PMON_PKT_z_MATCH0[12:0]=0x1CA0, Q_Py_PCI_PMON_PKT_z_MASK0[12:0]=0x1FE0}) * 64", "Filter": "QPIMask0[12:0],QPIMatch0[12:0]", }, "QPI_LL.NCB_DATA_FROM_QPI_TO_NODEx": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "NCB Data packets (Any - Interrupts) received from QPI sent to Node ID 'x'. Expressed in bytes", "Desc": "NCB Data From QPI To Node x", "Equation": "((CTO_COUNT with:{Q_Py_PCI_PMON_PKT_z_MATCH0{[12:0],dnid}={0x1800,x}, Q_Py_PCI_PMON_PKT_z_MASK0[17:0]=0x3FE00}) - (CTO_COUNT with:{Q_Py_PCI_PMON_PKT_z_MATCH0{[12:0],dnid}={0x1900,x}, Q_Py_PCI_PMON_PKT_z_MASK0[17:0]=0x3FF80})) * 64", "Filter": "QPIMask0[17:0],QPIMatch0[17:0]", }, "QPI_LL.NCB_DATA_MSGS_FROM_QPI": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Defn": "NCB Data Messages From QPI in bytes", "Desc": "NCB Data Messages From QPI", "Equation": "(RxL_FLITS_G2.NCB_DATA * 8)", }, "QPI_LL.PCT_LINK_FULL_POWER_CYCLES": { "Box": "QPI_LL", "Category": "QPI_LL POWER_RX Events", "Defn": "Percent of Cycles the QPI link is at Full Power", "Desc": "Percent Link Full Power Cycles", "Equation": "RxL0_POWER_CYCLES / CLOCKTICKS", "Obscure": 1, }, "QPI_LL.PCT_LINK_HALF_DISABLED_CYCLES": { "Box": "QPI_LL", "Category": "QPI_LL POWER_RX Events", "Defn": "Percent of Cycles the QPI link in power mode where half of the lanes are disabled.", "Desc": "Percent Link Half Disabled Cycles", "Equation": "RxL0P_POWER_CYCLES / CLOCKTICKS", "Obscure": 1, }, "QPI_LL.PCT_LINK_SHUTDOWN_CYCLES": { "Box": "QPI_LL", "Category": "QPI_LL POWER Events", "Defn": "Percent of Cycles the QPI link is Shutdown", "Desc": "Percent Link Shutdown Cycles", "Equation": "L1_POWER_CYCLES / CLOCKTICKS", "Obscure": 1, }, "QPI_LL.QPI_DATA_BW": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Defn": "QPI data transmit bandwidth in Bytes", "Desc": "QPI Data Bandwidth", "Equation": "TxL_FLITS_G0.DATA * 8", }, "QPI_LL.QPI_LINK_BW": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Defn": "QPI total transmit bandwidth in Bytes (includes control)", "Desc": "QPI Link Bandwidth", "Equation": "(TxL_FLITS_G0.DATA + TxL_FLITS_G0.NON_DATA) * 8", }, "QPI_LL.QPI_LINK_UTIL": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Defn": "Percentage of cycles that QPI Link was utilized. Calculated from 1 - Number of idle flits - time the link was 'off'", "Desc": "QPI Link Utilization", "Equation": "(RxL_FLITS_G0.DATA + RxL_FLITS_G0.NON_DATA) / (2 * CLOCKTICKS)", }, "QPI_LL.QPI_SPEED": { "Box": "QPI_LL", "Category": "QPI_LL CFCLK Events", "Defn": "QPI Speed - In GT/s (GigaTransfers / Second) - Max QPI Bandwidth is 2 * ROUND ( QPI Speed , 0)", "Desc": "QPI Speed", "Equation": "ROUND (( CLOCKTICKS / TSC ) * TSC_SPEED, 0 ) * ( 8 / 1000)", }, # PCU: "PCU.PCT_CYC_FREQ_CURRENT_LTD": { "Box": "PCU", "Category": "PCU FREQ_MAX_LIMIT Events", "Defn": "Percentage of Cycles the Max Frequency is limited by current", "Desc": "Percent Frequency Current Limited", "Equation": "FREQ_MAX_CURRENT_CYCLES / CLOCKTICKS", }, "PCU.PCT_CYC_FREQ_OS_LTD": { "Box": "PCU", "Category": "PCU FREQ_MAX_LIMIT Events", "Defn": "Percentage of Cycles the Max Frequency is limited by the OS", "Desc": "Percent Frequency OS Limited", "Equation": "FREQ_MAX_OS_CYCLES / CLOCKTICKS", }, "PCU.PCT_CYC_FREQ_POWER_LTD": { "Box": "PCU", "Category": "PCU FREQ_MAX_LIMIT Events", "Defn": "Percentage of Cycles the Max Frequency is limited by power", "Desc": "Percent Frequency Power Limited", "Equation": "FREQ_MAX_POWER_CYCLES / CLOCKTICKS", }, "PCU.PCT_CYC_FREQ_THERMAL_LTD": { "Box": "PCU", "Category": "PCU FREQ_MAX_LIMIT Events", "Defn": "Percentage of Cycles the Max Frequency is limited by thermal issues", "Desc": "Percent Frequency Thermal Limited", "Equation": "FREQ_MAX_CURRENT_CYCLES / CLOCKTICKS", }, # CBO: "CBO.AVG_INGRESS_DEPTH": { "Box": "CBO", "Category": "CBO INGRESS Events", "Defn": "Average Depth of the Ingress Queue through the sample interval", "Desc": "Average Ingress Depth", "Equation": "RxR_OCCUPANCY.IRQ / SAMPLE_INTERVAL", "Obscure": 1, }, "CBO.AVG_INGRESS_LATENCY": { "Box": "CBO", "Category": "CBO INGRESS Events", "Defn": "Average Latency of Requests through the Ingress Queue in Uncore Clocks", "Desc": "Average Ingress Latency", "Equation": "RxR_OCCUPANCY.IRQ / RxR_INSERTS.IRQ", "Obscure": 1, }, "CBO.AVG_INGRESS_LATENCY_WHEN_NE": { "Box": "CBO", "Category": "CBO INGRESS Events", "Defn": "Average Latency of Requests through the Ingress Queue in Uncore Clocks when Ingress Queue has at least one entry", "Desc": "Average Latency in Non-Empty Ingress", "Equation": "RxR_OCCUPANCY.IRQ / COUNTER0_OCCUPANCY{edge_det,thresh=0x1}", "Obscure": 1, }, "CBO.AVG_TOR_DRDS_MISS_WHEN_NE": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Average Number of Data Read Entries that Miss the LLC when the TOR is not empty.", "Desc": "Average Data Read Misses in Non-Empty TOR", "Equation": "(TOR_OCCUPANCY.MISS_OPCODE / COUNTER0_OCCUPANCY{edge_det,thresh=0x1}) with:Cn_MSR_PMON_BOX_FILTER1.opc=0x182", "Filter": "CBoFilter1[28:20]", }, "CBO.AVG_TOR_DRDS_WHEN_NE": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Average Number of Data Read Entries when the TOR is not empty.", "Desc": "Average Data Reads in Non-Empty TOR", "Equation": "(TOR_OCCUPANCY.OPCODE / COUNTER0_OCCUPANCY{edge_det,thresh=0x1}) with:Cn_MSR_PMON_BOX_FILTER1.opc=0x182", "Filter": "CBoFilter1[28:20]", }, "CBO.AVG_TOR_DRD_HIT_LATENCY": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Average Latency of Data Reads through the TOR that hit the LLC", "Desc": "Data Read Hit Latency through TOR", "Equation": "((TOR_OCCUPANCY.OPCODE - TOR_OCCUPANCY.MISS_OPCODE) / (TOR_INSERTS.OPCODE - TOR_INSERTS.MISS_OPCODE)) with:Cn_MSR_PMON_BOX_FILTER.opc=0x182", "Filter": "CBoFilter1[28:20]", }, "CBO.AVG_TOR_DRD_LATENCY": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Average Latency of Data Read Entries making their way through the TOR", "Desc": "Data Read Latency through TOR", "Equation": "(TOR_OCCUPANCY.OPCODE / TOR_INSERTS.OPCODE) with:Cn_MSR_PMON_BOX_FILTER1.opc=0x182", "Filter": "CBoFilter1[28:20]", }, "CBO.AVG_TOR_DRD_LOC_MISS_LATENCY": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Average Latency of Data Reads through the TOR that miss the LLC and were satsified by Local Memory", "Desc": "Data Read Local Miss Latency through TOR", "Equation": "(TOR_OCCUPANCY.MISS_OPCODE / TOR_INSERTS.MISS_OPCODE) with:Cn_MSR_PMON_BOX_FILTER1.{opc,nid}={0x182,my_node}", "Filter": "CBoFilter1[28:20], CBoFilter1[15:0]", }, "CBO.AVG_TOR_DRD_MISS_LATENCY": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Average Latency of Data Reads through the TOR that miss the LLC", "Desc": "Data Read Miss Latency through TOR", "Equation": "(TOR_OCCUPANCY.MISS_OPCODE / TOR_INSERTS.MISS_OPCODE) with:Cn_MSR_PMON_BOX_FILTER1.opc=0x182", "Filter": "CBoFilter1[28:20]", }, "CBO.AVG_TOR_DRD_REM_MISS_LATENCY": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Average Latency of Data Reads through the TOR that miss the LLC and were satsified by a Remote cache or Remote Memory", "Desc": "Data Read Remote Miss Latency through TOR", "Equation": "(TOR_OCCUPANCY.MISS_OPCODE / TOR_INSERTS.MISS_OPCODE) with:Cn_MSR_PMON_BOX_FILTER.{opc,nid}={0x182,other_nodes}", "Filter": "CBoFilter1[28:20], CBoFilter1[15:0]", }, "CBO.CYC_INGRESS_BLOCKED": { "Box": "CBO", "Category": "CBO INGRESS Events", "Defn": "Cycles the Ingress Request Queue arbiter was Blocked", "Desc": "Cycles Ingress Blocked", "Equation": "RxR_EXT_STARVED.IRQ / SAMPLE_INTERVAL", "Obscure": 1, }, "CBO.CYC_USED_DN": { "Box": "CBO", "Category": "CBO RING Events", "Defn": "Cycles Used in the Down direction, Even polarity", "Desc": "Cycles Used Down and Even", "Equation": "RING_BL_USED.CCW / SAMPLE_INTERVAL", "Obscure": 1, }, "CBO.CYC_USED_UP": { "Box": "CBO", "Category": "CBO RING Events", "Defn": "Cycles Used in the Up direction, Even polarity", "Desc": "Cycles Used Up and Even", "Equation": "RING_BL_USED.CW / SAMPLE_INTERVAL", "Obscure": 1, }, "CBO.FAST_STR_LLC_MISS": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Number of ItoM (fast string) operations that miss the LLC", "Desc": "Fast String misses", "Equation": "TOR_INSERTS.MISS_OPCODE with:Cn_MSR_PMON_BOX_FILTER1.opc=0x1C8", "Filter": "CBoFilter1[28:20]", }, "CBO.FAST_STR_LLC_REQ": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Number of ItoM (fast string) operations that reference the LLC", "Desc": "Fast String operations", "Equation": "TOR_INSERTS.OPCODE with:Cn_MSR_PMON_BOX_FILTER1.opc=0x1C8", "Filter": "CBoFilter1[28:20]", }, "CBO.INGRESS_REJ_V_INS": { "Box": "CBO", "Category": "CBO INGRESS Events", "Defn": "Ratio of Ingress Request Entries that were rejected vs. inserted", "Desc": "Ingress Rejects vs. Inserts", "Equation": "RxR_INSERTS.IRQ_REJ / RxR_INSERTS.IRQ", "Obscure": 1, }, "CBO.IO_READ_BW": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "IO Read Bandwidth in MB - Disk or Network Reads", "Desc": "IO Read Bandwidth", "Equation": "(TOR_INSERTS.OPCODE with:Cn_MSR_PMON_BOX_FILTER1.opc=0x19C + TOR_INSERTS.OPCODE with:Cn_MSR_PMON_BOX_FILTER.opc=0x1E6) * 64 / 1000000", "Filter": "CBoFilter1[28:20]", }, "CBO.IO_WRITE_BW": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "IO Write Bandwidth in MB - Disk or Network Writes", "Desc": "IO Write Bandwidth", "Equation": "(TOR_INSERTS.OPCODE with:Cn_MSR_PMON_BOX_FILTER1.opc=0x19E + TOR_INSERTS.OPCODE with:Cn_MSR_PMON_BOX_FILTER.opc=0x1E4) * 64 / 1000000", "Filter": "CBoFilter1[28:20]", }, "CBO.LLC_DRD_MISS_PCT": { "Box": "CBO", "Category": "CBO CACHE Events", "Defn": "LLC Data Read miss ratio", "Desc": "LLC DRD Miss Ratio", "Equation": "LLC_LOOKUP.DATA_READ with:Cn_MSR_PMON_BOX_FILTER0.state=0x1 / LLC_LOOKUP.DATA_READ with:Cn_MSR_PMON_BOX_FILTER.state=0x3F", "Filter": "CBoFilter0[23:17]", }, "CBO.LLC_DRD_RFO_MISS_TO_LOC_MEM": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "LLC Data Read and RFO misses satisfied by local memory.", "Desc": "LLC DRD+RFO Misses to Local Memory", "Equation": "(TOR_INSERTS.NID_MISS_OPCODE with:Cn_MSR_PMON_BOX_FILTER1.{opc,nid}={0x182,my_node} + TOR_INSERTS.NID_MISS_OPCODE with:Cn_MSR_PMON_BOX_FILTER.{opc,nid}={0x180,my_node} ) / (TOR_INSERTS.NID_MISS_OPCODE with:Cn_MSR_PMON_BOX_FILTER.{opc,nid}={0x182,0xF} + TOR_INSERTS.NID_MISS_OPCODE with:Cn_MSR_PMON_BOX_FILTER.{opc,nid}={0x180,0xF} )", "Filter": "CBoFilter1[28:20], CBoFilter1[15:0]", }, "CBO.LLC_DRD_RFO_MISS_TO_REM_MEM": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "LLC Data Read and RFO misses satisfied by a remote cache or remote memory.", "Desc": "LLC DRD+RFO Misses to Remote Memory", "Equation": "(TOR_INSERTS.NID_MISS_OPCODE with:Cn_MSR_PMON_BOX_FILTER1.{opc,nid}={0x182,other_nodes} + TOR_INSERTS.NID_MISS_OPCODE with:Cn_MSR_PMON_BOX_FILTER.{opc,nid}={0x180,other_nodes} ) / (TOR_INSERTS.NID_MISS_OPCODE with:Cn_MSR_PMON_BOX_FILTER.{opc,nid}={0x182,0xF} + TOR_INSERTS.NID_MISS_OPCODE with:Cn_MSR_PMON_BOX_FILTER.{opc,nid}={0x180,0xF} )", "Filter": "CBoFilter1[28:20], CBoFilter1[15:0]", }, "CBO.LLC_PCIE_DATA_BYTES": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "LLC write miss (disk/network reads) bandwidth in MB", "Desc": "LLC Miss Data from PCIe", "Equation": "TOR_INSERTS.OPCODE with:Cn_MSR_PMON_BOX_FILTER1.opc=0x19C * 64", "Filter": "CBoFilter1[28:20]", }, "CBO.LLC_RFO_MISS_PCT": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "LLC RFO Miss Ratio", "Desc": "LLC RFO Miss Ratio", "Equation": "(TOR_INSERTS.MISS_OPCODE / TOR_INSERTS.OPCODE) with:Cn_MSR_PMON_BOX_FILTER1.opc=0x180", "Filter": "CBoFilter1[28:20]", }, "CBO.MEM_WB_BYTES": { "Box": "CBO", "Category": "CBO CACHE Events", "Defn": "Data written back to memory in Number of Bytes", "Desc": "Memory Writebacks", "Equation": "LLC_VICTIMS.M_STATE * 64", }, "CBO.PARTIAL_PCI_READS": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Number of partial PCI reads", "Desc": "Partial PCI Reads", "Equation": "TOR_INSERTS.OPCODE with:Cn_MSR_PMON_BOX_FILTER1.opc=0x195", "Filter": "CBoFilter1[28:20]", }, "CBO.PARTIAL_PCI_WRITES": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Number of partial PCI writes", "Desc": "Partial PCI Writes", "Equation": "TOR_INSERTS.OPCODE with:Cn_MSR_PMON_BOX_FILTER1.opc=0x1E5", "Filter": "CBoFilter1[28:20]", }, "CBO.PCIE_DATA_BYTES": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Data from PCIe in Number of Bytes", "Desc": "PCIe Data Traffic", "Equation": "(TOR_INSERTS.OPCODE with:Cn_MSR_PMON_BOX_FILTER1.opc=0x194 + TOR_INSERTS.OPCODE with:Cn_MSR_PMON_BOX_FILTER.opc=0x19C) * 64", "Filter": "CBoFilter1[28:20]", }, "CBO.RING_THRU_DN_BYTES": { "Box": "CBO", "Category": "CBO RING Events", "Defn": "Ring throughput in the Down direction, Even polarity in Bytes", "Desc": "Ring Throughput Down and Even", "Equation": "RING_BL_USED.CCW* 32", "Obscure": 1, }, "CBO.RING_THRU_UP_BYTES": { "Box": "CBO", "Category": "CBO RING Events", "Defn": "Ring throughput in the Up direction, Even polarity in Bytes", "Desc": "Ring Throughput Up and Even", "Equation": "RING_BL_USED.CW * 32", "Obscure": 1, }, "CBO.STREAMED_FULL_STORES": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Number of Streamed Store (of Full Cache Line) Transactions", "Desc": "Streaming Stores (Full Line)", "Equation": "TOR_INSERTS.OPCODE with:Cn_MSR_PMON_BOX_FILTER1.opc=0x18C", "Filter": "CBoFilter1[28:20]", }, "CBO.STREAMED_PART_STORES": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Number of Streamed Store (of Partial Cache Line) Transactions", "Desc": "Streaming Stores (Partial Line)", "Equation": "TOR_INSERTS.OPCODE with:Cn_MSR_PMON_BOX_FILTER1.opc=0x18D", "Filter": "CBoFilter1[28:20]", }, "CBO.UC_READS": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Uncachable Read Transactions", "Desc": "Uncacheable Reads", "Equation": "TOR_INSERTS.MISS_OPCODE with:Cn_MSR_PMON_BOX_FILTER1.opc=0x187", "Filter": "CBoFilter1[28:20]", }, } categories = ( "CBO CACHE Events", "CBO EGRESS Events", "CBO INGRESS Events", "CBO INGRESS_RETRY Events", "CBO MISC Events", "CBO OCCUPANCY Events", "CBO RING Events", "CBO TOR Events", "CBO UCLK Events", "HA ADDR_OPCODE_MATCH Events", "HA BL_EGRESS Events", "HA BT (Backup Tracker) Events", "HA BYPASS Events", "HA CONFLICTS Events", "HA DIRECT2CORE Events", "HA DIRECTORY Events", "HA EGRESS Events", "HA IMC_MISC Events", "HA IMC_READS Events", "HA IMC_WRITES Events", "HA IODC Events", "HA OSB (Opportunistic Snoop Broadcast) Events", "HA OUTBOUND_TX Events", "HA QPI_IGR_CREDITS Events", "HA REQUESTS Events", "HA RING Events", "HA RPQ_CREDITS Events", "HA SNP_RESP Events", "HA TAD Events", "HA TRACKER Events", "HA UCLK Events", "HA WPQ_CREDITS Events", "IRP ADDRESS_MATCH Events", "IRP AK_INGRESS Events", "IRP BL_INGRESS_DRS Events", "IRP BL_INGRESS_NCB Events", "IRP BL_INGRESS_NCS Events", "IRP IO_CLKS Events", "IRP OUTBOUND_REQUESTS Events", "IRP STALL_CYCLES Events", "IRP TICKLES Events", "IRP TRANSACTIONS Events", "IRP WRITE_CACHE Events", "PCU CORE_C_STATE_TRANSITION Events", "PCU Delayed C-State Events", "PCU FREQ_MAX_LIMIT Events", "PCU FREQ_MIN_LIMIT Events", "PCU FREQ_RESIDENCY Events", "PCU FREQ_TRANS Events", "PCU MEMORY_PHASE_SHEDDING Events", "PCU PCLK Events", "PCU PKG_C_STATE_TRANSITION Events", "PCU POWER_STATE_OCC Events", "PCU PROCHOT Events", "PCU VOLT_TRANS Events", "PCU VR_HOT Events", "QPI_LL CFCLK Events", "QPI_LL CTO Events", "QPI_LL DIRECT2CORE Events", "QPI_LL FLITS_RX Events", "QPI_LL FLITS_TX Events", "QPI_LL POWER Events", "QPI_LL POWER_RX Events", "QPI_LL POWER_TX Events", "QPI_LL R3QPI_EGRESS_CREDITS Events", "QPI_LL RXQ Events", "QPI_LL RX_CREDITS_CONSUMED Events", "QPI_LL TXQ Events", "QPI_LL VNA_CREDIT_RETURN Events", "R2PCIe EGRESS Events", "R2PCIe INGRESS Events", "R2PCIe RING Events", "R2PCIe UCLK Events", "R3QPI EGRESS Credit Events", "R3QPI EGRESS Events", "R3QPI INGRESS Events", "R3QPI LINK_VN0_CREDITS Events", "R3QPI LINK_VN1_CREDITS Events", "R3QPI LINK_VNA_CREDITS Events", "R3QPI RING Events", "R3QPI UCLK Events", "UBOX EVENT_MSG Events", "UBOX LOCK Events", "UBOX PHOLD Events", "UBOX RACU Events", "iMC ACT Events", "iMC BYPASS Command Events", "iMC CAS Events", "iMC DCLK Events", "iMC DRAM_PRE_ALL Events", "iMC DRAM_REFRESH Events", "iMC ECC Events", "iMC MAJOR_MODES Events", "iMC POWER Events", "iMC PRE Events", "iMC PREEMPTION Events", "iMC RPQ Events", "iMC VMSE Events", "iMC WPQ Events", );
643,426
Python
.py
11,174
47.662341
1,158
0.602252
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,961
jkt_uc.py
andikleen_pmu-tools/ucevent/jkt_uc.py
# Support for Intel Xeon E5 2600 series uncore monitoring # see http://www.intel.com/content/dam/www/public/us/en/documents/design-guides/xeon-e5-2600-uncore-guide.pdf # for more details on the events and formulas. # aliases aliases = { "QPIMatch1": "Q_Py_PCI_PMON_PKT_MATCH1", "QPIMask0": "Q_Py_PCI_PMON_PKT_MASK0", "QPIMatch0": "Q_Py_PCI_PMON_BOX_MATCH0", "PCUFilter": "PCU_MSR_PMON_BOX_FILTER", "CBoFilter": "Cn_MSR_PMON_BOX_FILTER", "QPIMask1": "Q_Py_PCI_PMON_PKT_MASK1", } events = { # R3QPI: "R3QPI.CLOCKTICKS": { "Box": "R3QPI", "Category": "R3QPI UCLK Events", "Counters": "0-2", "Defn": "Counts the number of uclks in the QPI uclk domain. This could be slightly different than the count in the Ubox because of enable/freeze delays. However, because the QPI Agent is close to the Ubox, they generally should not diverge by more than a handful of cycles.", "Desc": "Number of uclks in domain", "EvSel": 1, }, "R3QPI.IIO_CREDITS_ACQUIRED": { "Box": "R3QPI", "Category": "R3QPI IIO_CREDITS Events", "Counters": "0-1", "Defn": "Counts the number of times the NCS/NCB/DRS credit is acquired in the QPI for sending messages on BL to the IIO. There is one credit for each of these three message classes (three credits total). NCS is used for reads to PCIe space, NCB is used for transfering data without coherency, and DRS is used for transfering data with coherency (cachable PCI transactions). This event can only track one message class at a time.", "Desc": "to IIO BL Credit Acquired", "EvSel": 32, }, "R3QPI.IIO_CREDITS_ACQUIRED.NCS": { "Box": "R3QPI", "Category": "R3QPI IIO_CREDITS Events", "Counters": "0-1", "Defn": "Counts the number of times the NCS/NCB/DRS credit is acquired in the QPI for sending messages on BL to the IIO. There is one credit for each of these three message classes (three credits total). NCS is used for reads to PCIe space, NCB is used for transfering data without coherency, and DRS is used for transfering data with coherency (cachable PCI transactions). This event can only track one message class at a time.", "Desc": "to IIO BL Credit Acquired", "EvSel": 32, "Umask": "bxx1xxxxx", }, "R3QPI.IIO_CREDITS_ACQUIRED.NCB": { "Box": "R3QPI", "Category": "R3QPI IIO_CREDITS Events", "Counters": "0-1", "Defn": "Counts the number of times the NCS/NCB/DRS credit is acquired in the QPI for sending messages on BL to the IIO. There is one credit for each of these three message classes (three credits total). NCS is used for reads to PCIe space, NCB is used for transfering data without coherency, and DRS is used for transfering data with coherency (cachable PCI transactions). This event can only track one message class at a time.", "Desc": "to IIO BL Credit Acquired", "EvSel": 32, "Umask": "bxxx1xxxx", }, "R3QPI.IIO_CREDITS_ACQUIRED.DRS": { "Box": "R3QPI", "Category": "R3QPI IIO_CREDITS Events", "Counters": "0-1", "Defn": "Counts the number of times the NCS/NCB/DRS credit is acquired in the QPI for sending messages on BL to the IIO. There is one credit for each of these three message classes (three credits total). NCS is used for reads to PCIe space, NCB is used for transfering data without coherency, and DRS is used for transfering data with coherency (cachable PCI transactions). This event can only track one message class at a time.", "Desc": "to IIO BL Credit Acquired", "EvSel": 32, "Umask": "bxxxx1xxx", }, "R3QPI.IIO_CREDITS_REJECT": { "Box": "R3QPI", "Category": "R3QPI IIO_CREDITS Events", "Counters": "0-1", "Defn": "Counts the number of times that a request attempted to acquire an NCS/NCB/DRS credit in the QPI for sending messages on BL to the IIO but was rejected because no credit was available. There is one credit for each of these three message classes (three credits total). NCS is used for reads to PCIe space, NCB is used for transfering data without coherency, and DRS is used for transfering data with coherency (cachable PCI transactions). This event can only track one message class at a time.", "Desc": "to IIO BL Credit Rejected", "EvSel": 33, }, "R3QPI.IIO_CREDITS_REJECT.NCS": { "Box": "R3QPI", "Category": "R3QPI IIO_CREDITS Events", "Counters": "0-1", "Defn": "Counts the number of times that a request attempted to acquire an NCS/NCB/DRS credit in the QPI for sending messages on BL to the IIO but was rejected because no credit was available. There is one credit for each of these three message classes (three credits total). NCS is used for reads to PCIe space, NCB is used for transfering data without coherency, and DRS is used for transfering data with coherency (cachable PCI transactions). This event can only track one message class at a time.", "Desc": "to IIO BL Credit Rejected", "EvSel": 33, "Umask": "bxx1xxxxx", }, "R3QPI.IIO_CREDITS_REJECT.NCB": { "Box": "R3QPI", "Category": "R3QPI IIO_CREDITS Events", "Counters": "0-1", "Defn": "Counts the number of times that a request attempted to acquire an NCS/NCB/DRS credit in the QPI for sending messages on BL to the IIO but was rejected because no credit was available. There is one credit for each of these three message classes (three credits total). NCS is used for reads to PCIe space, NCB is used for transfering data without coherency, and DRS is used for transfering data with coherency (cachable PCI transactions). This event can only track one message class at a time.", "Desc": "to IIO BL Credit Rejected", "EvSel": 33, "Umask": "bxxx1xxxx", }, "R3QPI.IIO_CREDITS_REJECT.DRS": { "Box": "R3QPI", "Category": "R3QPI IIO_CREDITS Events", "Counters": "0-1", "Defn": "Counts the number of times that a request attempted to acquire an NCS/NCB/DRS credit in the QPI for sending messages on BL to the IIO but was rejected because no credit was available. There is one credit for each of these three message classes (three credits total). NCS is used for reads to PCIe space, NCB is used for transfering data without coherency, and DRS is used for transfering data with coherency (cachable PCI transactions). This event can only track one message class at a time.", "Desc": "to IIO BL Credit Rejected", "EvSel": 33, "Umask": "bxxxx1xxx", }, "R3QPI.IIO_CREDITS_USED": { "Box": "R3QPI", "Category": "R3QPI IIO_CREDITS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the NCS/NCB/DRS credit is in use in the QPI for sending messages on BL to the IIO. There is one credit for each of these three message classes (three credits total). NCS is used for reads to PCIe space, NCB is used for transfering data without coherency, and DRS is used for transfering data with coherency (cachable PCI transactions). This event can only track one message class at a time.", "Desc": "to IIO BL Credit In Use", "EvSel": 34, }, "R3QPI.IIO_CREDITS_USED.NCS": { "Box": "R3QPI", "Category": "R3QPI IIO_CREDITS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the NCS/NCB/DRS credit is in use in the QPI for sending messages on BL to the IIO. There is one credit for each of these three message classes (three credits total). NCS is used for reads to PCIe space, NCB is used for transfering data without coherency, and DRS is used for transfering data with coherency (cachable PCI transactions). This event can only track one message class at a time.", "Desc": "to IIO BL Credit In Use", "EvSel": 34, "Umask": "bxx1xxxxx", }, "R3QPI.IIO_CREDITS_USED.NCB": { "Box": "R3QPI", "Category": "R3QPI IIO_CREDITS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the NCS/NCB/DRS credit is in use in the QPI for sending messages on BL to the IIO. There is one credit for each of these three message classes (three credits total). NCS is used for reads to PCIe space, NCB is used for transfering data without coherency, and DRS is used for transfering data with coherency (cachable PCI transactions). This event can only track one message class at a time.", "Desc": "to IIO BL Credit In Use", "EvSel": 34, "Umask": "bxxx1xxxx", }, "R3QPI.IIO_CREDITS_USED.DRS": { "Box": "R3QPI", "Category": "R3QPI IIO_CREDITS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the NCS/NCB/DRS credit is in use in the QPI for sending messages on BL to the IIO. There is one credit for each of these three message classes (three credits total). NCS is used for reads to PCIe space, NCB is used for transfering data without coherency, and DRS is used for transfering data with coherency (cachable PCI transactions). This event can only track one message class at a time.", "Desc": "to IIO BL Credit In Use", "EvSel": 34, "Umask": "bxxxx1xxx", }, "R3QPI.RING_AD_USED": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AD Ring in Use", "EvSel": 7, }, "R3QPI.RING_AD_USED.CW_EVEN": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AD Ring in Use", "EvSel": 7, "Umask": "bxxxxxxx1", }, "R3QPI.RING_AD_USED.CCW_EVEN": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AD Ring in Use", "EvSel": 7, "Umask": "bxxxxx1xx", }, "R3QPI.RING_AD_USED.CW_ODD": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AD Ring in Use", "EvSel": 7, "Umask": "bxxxxxx1x", }, "R3QPI.RING_AD_USED.CCW_ODD": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 AD Ring in Use", "EvSel": 7, "Umask": "bxxxx1xxx", }, "R3QPI.RING_AK_USED": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.", "Desc": "R3 AK Ring in Use", "EvSel": 8, }, "R3QPI.RING_AK_USED.CW_EVEN": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.", "Desc": "R3 AK Ring in Use", "EvSel": 8, "Umask": "bxxxxxxx1", }, "R3QPI.RING_AK_USED.CCW_EVEN": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.", "Desc": "R3 AK Ring in Use", "EvSel": 8, "Umask": "bxxxxx1xx", }, "R3QPI.RING_AK_USED.CW_ODD": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.", "Desc": "R3 AK Ring in Use", "EvSel": 8, "Umask": "bxxxxxx1x", }, "R3QPI.RING_AK_USED.CCW_ODD": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.", "Desc": "R3 AK Ring in Use", "EvSel": 8, "Umask": "bxxxx1xxx", }, "R3QPI.RING_BL_USED": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 BL Ring in Use", "EvSel": 9, }, "R3QPI.RING_BL_USED.CW_EVEN": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 BL Ring in Use", "EvSel": 9, "Umask": "bxxxxxxx1", }, "R3QPI.RING_BL_USED.CCW_EVEN": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 BL Ring in Use", "EvSel": 9, "Umask": "bxxxxx1xx", }, "R3QPI.RING_BL_USED.CW_ODD": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 BL Ring in Use", "EvSel": 9, "Umask": "bxxxxxx1x", }, "R3QPI.RING_BL_USED.CCW_ODD": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R3 BL Ring in Use", "EvSel": 9, "Umask": "bxxxx1xxx", }, "R3QPI.RING_IV_USED": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. The IV ring is unidirectional. Whether UP or DN is used is dependent on the system programming. Thereofore, one should generally set both the UP and DN bits for a given polarity (or both) at a given time.", "Desc": "R3 IV Ring in Use", "EvSel": 10, }, "R3QPI.RING_IV_USED.ANY": { "Box": "R3QPI", "Category": "R3QPI RING Events", "Counters": "0-2", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. The IV ring is unidirectional. Whether UP or DN is used is dependent on the system programming. Thereofore, one should generally set both the UP and DN bits for a given polarity (or both) at a given time.", "Desc": "R3 IV Ring in Use", "EvSel": 10, "Umask": "b00001111", }, "R3QPI.RxR_BYPASSED": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of times when the Ingress was bypassed and an incoming transaction was bypassed directly across the BGF and into the qfclk domain.", "Desc": "Ingress Bypassed", "EvSel": 18, }, "R3QPI.RxR_BYPASSED.AD": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of times when the Ingress was bypassed and an incoming transaction was bypassed directly across the BGF and into the qfclk domain.", "Desc": "Ingress Bypassed", "EvSel": 18, "Umask": "bxxxxxxx1", }, "R3QPI.RxR_CYCLES_NE": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Cycles Not Empty", "EvSel": 16, }, "R3QPI.RxR_CYCLES_NE.NCS": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Cycles Not Empty", "EvSel": 16, "Umask": "bxx1xxxxx", }, "R3QPI.RxR_CYCLES_NE.NCB": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Cycles Not Empty", "EvSel": 16, "Umask": "bxxx1xxxx", }, "R3QPI.RxR_CYCLES_NE.DRS": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Cycles Not Empty", "EvSel": 16, "Umask": "bxxxx1xxx", }, "R3QPI.RxR_CYCLES_NE.SNP": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Cycles Not Empty", "EvSel": 16, "Umask": "bxxxxxx1x", }, "R3QPI.RxR_CYCLES_NE.HOM": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Cycles Not Empty", "EvSel": 16, "Umask": "bxxxxxxx1", }, "R3QPI.RxR_CYCLES_NE.NDR": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Cycles Not Empty", "EvSel": 16, "Umask": "bxxxxx1xx", }, "R3QPI.RxR_INSERTS": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Allocations", "EvSel": 17, }, "R3QPI.RxR_INSERTS.NCS": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Allocations", "EvSel": 17, "Umask": "bxx1xxxxx", }, "R3QPI.RxR_INSERTS.NCB": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Allocations", "EvSel": 17, "Umask": "bxxx1xxxx", }, "R3QPI.RxR_INSERTS.DRS": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Allocations", "EvSel": 17, "Umask": "bxxxx1xxx", }, "R3QPI.RxR_INSERTS.SNP": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Allocations", "EvSel": 17, "Umask": "bxxxxxx1x", }, "R3QPI.RxR_INSERTS.HOM": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Allocations", "EvSel": 17, "Umask": "bxxxxxxx1", }, "R3QPI.RxR_INSERTS.NDR": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Allocations", "EvSel": 17, "Umask": "bxxxxx1xx", }, "R3QPI.RxR_OCCUPANCY": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": 0, "Defn": "Accumulates the occupancy of a given QPI Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.", "Desc": "Ingress Occupancy Accumulator", "EvSel": 19, "MaxIncCyc": 32, "SubCtr": 1, }, "R3QPI.RxR_OCCUPANCY.NCS": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": 0, "Defn": "Accumulates the occupancy of a given QPI Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.", "Desc": "Ingress Occupancy Accumulator", "EvSel": 19, "MaxIncCyc": 32, "SubCtr": 1, "Umask": "bxx1xxxxx", }, "R3QPI.RxR_OCCUPANCY.NCB": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": 0, "Defn": "Accumulates the occupancy of a given QPI Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.", "Desc": "Ingress Occupancy Accumulator", "EvSel": 19, "MaxIncCyc": 32, "SubCtr": 1, "Umask": "bxxx1xxxx", }, "R3QPI.RxR_OCCUPANCY.DRS": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": 0, "Defn": "Accumulates the occupancy of a given QPI Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.", "Desc": "Ingress Occupancy Accumulator", "EvSel": 19, "MaxIncCyc": 32, "SubCtr": 1, "Umask": "bxxxx1xxx", }, "R3QPI.RxR_OCCUPANCY.SNP": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": 0, "Defn": "Accumulates the occupancy of a given QPI Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.", "Desc": "Ingress Occupancy Accumulator", "EvSel": 19, "MaxIncCyc": 32, "SubCtr": 1, "Umask": "bxxxxxx1x", }, "R3QPI.RxR_OCCUPANCY.HOM": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": 0, "Defn": "Accumulates the occupancy of a given QPI Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.", "Desc": "Ingress Occupancy Accumulator", "EvSel": 19, "MaxIncCyc": 32, "SubCtr": 1, "Umask": "bxxxxxxx1", }, "R3QPI.RxR_OCCUPANCY.NDR": { "Box": "R3QPI", "Category": "R3QPI INGRESS Events", "Counters": 0, "Defn": "Accumulates the occupancy of a given QPI Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.", "Desc": "Ingress Occupancy Accumulator", "EvSel": 19, "MaxIncCyc": 32, "SubCtr": 1, "Umask": "bxxxxx1xx", }, "R3QPI.TxR_CYCLES_FULL": { "Box": "R3QPI", "Category": "R3QPI EGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the R2PCIe Egress buffer is full.", "Desc": "Egress Cycles Full", "EvSel": 37, }, "R3QPI.TxR_CYCLES_NE": { "Box": "R3QPI", "Category": "R3QPI EGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the QPI Egress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "Egress Cycles Not Empty", "EvSel": 35, }, "R3QPI.TxR_INSERTS": { "Box": "R3QPI", "Category": "R3QPI EGRESS Events", "Counters": "0-1", "Defn": "Counts the number of allocations into the QPI Egress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Egress Occupancy Accumulator event in order to calculate average queue latency. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "Egress Allocations", "EvSel": 36, }, "R3QPI.TxR_NACK": { "Box": "R3QPI", "Category": "R3QPI EGRESS Events", "Counters": "0-1", "Desc": "Egress NACK", "EvSel": 38, }, "R3QPI.VN0_CREDITS_REJECT": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.", "Desc": "VN0 Credit Acquisition Failed on DRS", "EvSel": 55, }, "R3QPI.VN0_CREDITS_REJECT.NCS": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.", "Desc": "VN0 Credit Acquisition Failed on DRS", "EvSel": 55, "Umask": "bxx1xxxxx", }, "R3QPI.VN0_CREDITS_REJECT.NCB": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.", "Desc": "VN0 Credit Acquisition Failed on DRS", "EvSel": 55, "Umask": "bxxx1xxxx", }, "R3QPI.VN0_CREDITS_REJECT.DRS": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.", "Desc": "VN0 Credit Acquisition Failed on DRS", "EvSel": 55, "Umask": "bxxxx1xxx", }, "R3QPI.VN0_CREDITS_REJECT.SNP": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.", "Desc": "VN0 Credit Acquisition Failed on DRS", "EvSel": 55, "Umask": "bxxxxxx1x", }, "R3QPI.VN0_CREDITS_REJECT.HOM": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.", "Desc": "VN0 Credit Acquisition Failed on DRS", "EvSel": 55, "Umask": "bxxxxxxx1", }, "R3QPI.VN0_CREDITS_REJECT.NDR": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.", "Desc": "VN0 Credit Acquisition Failed on DRS", "EvSel": 55, "Umask": "bxxxxx1xx", }, "R3QPI.VN0_CREDITS_USED": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.", "Desc": "VN0 Credit Used", "EvSel": 54, }, "R3QPI.VN0_CREDITS_USED.NCS": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.", "Desc": "VN0 Credit Used", "EvSel": 54, "Umask": "bxx1xxxxx", }, "R3QPI.VN0_CREDITS_USED.NCB": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.", "Desc": "VN0 Credit Used", "EvSel": 54, "Umask": "bxxx1xxxx", }, "R3QPI.VN0_CREDITS_USED.DRS": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.", "Desc": "VN0 Credit Used", "EvSel": 54, "Umask": "bxxxx1xxx", }, "R3QPI.VN0_CREDITS_USED.SNP": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.", "Desc": "VN0 Credit Used", "EvSel": 54, "Umask": "bxxxxxx1x", }, "R3QPI.VN0_CREDITS_USED.HOM": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.", "Desc": "VN0 Credit Used", "EvSel": 54, "Umask": "bxxxxxxx1", }, "R3QPI.VN0_CREDITS_USED.NDR": { "Box": "R3QPI", "Category": "R3QPI LINK_VN0_CREDITS Events", "Counters": "0-1", "Defn": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.", "Desc": "VN0 Credit Used", "EvSel": 54, "Umask": "bxxxxx1xx", }, "R3QPI.VNA_CREDITS_ACQUIRED": { "Box": "R3QPI", "Category": "R3QPI LINK_VNA_CREDITS Events", "Counters": "0-1", "Defn": "Number of QPI VNA Credit acquisitions. This event can be used in conjunction with the VNA In-Use Accumulator to calculate the average lifetime of a credit holder. VNA credits are used by all message classes in order to communicate across QPI. If a packet is unable to acquire credits, it will then attempt to use credts from the VN0 pool. Note that a single packet may require multiple flit buffers (i.e. when data is being transfered). Therefore, this event will increment by the number of credits acquired in each cycle. Filtering based on message class is not provided. One can count the number of packets transfered in a given message class using an qfclk event.", "Desc": "VNA credit Acquisitions", "EvSel": 51, "MaxIncCyc": 4, }, "R3QPI.VNA_CREDITS_REJECT": { "Box": "R3QPI", "Category": "R3QPI LINK_VNA_CREDITS Events", "Counters": "0-1", "Defn": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.", "Desc": "VNA Credit Reject", "EvSel": 52, }, "R3QPI.VNA_CREDITS_REJECT.NCS": { "Box": "R3QPI", "Category": "R3QPI LINK_VNA_CREDITS Events", "Counters": "0-1", "Defn": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.", "Desc": "VNA Credit Reject", "EvSel": 52, "Umask": "bxx1xxxxx", }, "R3QPI.VNA_CREDITS_REJECT.NCB": { "Box": "R3QPI", "Category": "R3QPI LINK_VNA_CREDITS Events", "Counters": "0-1", "Defn": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.", "Desc": "VNA Credit Reject", "EvSel": 52, "Umask": "bxxx1xxxx", }, "R3QPI.VNA_CREDITS_REJECT.DRS": { "Box": "R3QPI", "Category": "R3QPI LINK_VNA_CREDITS Events", "Counters": "0-1", "Defn": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.", "Desc": "VNA Credit Reject", "EvSel": 52, "Umask": "bxxxx1xxx", }, "R3QPI.VNA_CREDITS_REJECT.SNP": { "Box": "R3QPI", "Category": "R3QPI LINK_VNA_CREDITS Events", "Counters": "0-1", "Defn": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.", "Desc": "VNA Credit Reject", "EvSel": 52, "Umask": "bxxxxxx1x", }, "R3QPI.VNA_CREDITS_REJECT.HOM": { "Box": "R3QPI", "Category": "R3QPI LINK_VNA_CREDITS Events", "Counters": "0-1", "Defn": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.", "Desc": "VNA Credit Reject", "EvSel": 52, "Umask": "bxxxxxxx1", }, "R3QPI.VNA_CREDITS_REJECT.NDR": { "Box": "R3QPI", "Category": "R3QPI LINK_VNA_CREDITS Events", "Counters": "0-1", "Defn": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.", "Desc": "VNA Credit Reject", "EvSel": 52, "Umask": "bxxxxx1xx", }, "R3QPI.VNA_CREDIT_CYCLES_OUT": { "Box": "R3QPI", "Category": "R3QPI LINK_VNA_CREDITS Events", "Counters": "0-1", "Defn": "Number of QPI uclk cycles when the transmitted has no VNA credits available and therefore cannot send any requests on this channel. Note that this does not mean that no flits can be transmitted, as those holding VN0 credits will still (potentially) be able to transmit. Generally it is the goal of the uncore that VNA credits should not run out, as this can substantially throttle back useful QPI bandwidth.", "Desc": "Cycles with no VNA credits available", "EvSel": 49, }, "R3QPI.VNA_CREDIT_CYCLES_USED": { "Box": "R3QPI", "Category": "R3QPI LINK_VNA_CREDITS Events", "Counters": "0-1", "Defn": "Number of QPI uclk cycles with one or more VNA credits in use. This event can be used in conjunction with the VNA In-Use Accumulator to calculate the average number of used VNA credits.", "Desc": "Cycles with 1 or more VNA credits in use", "EvSel": 50, }, # CBO: "CBO.CLOCKTICKS": { "Box": "CBO", "Category": "CBO UCLK Events", "Counters": "0-3", "Desc": "Uncore Clocks", "EvSel": 0, }, "CBO.COUNTER0_OCCUPANCY": { "Box": "CBO", "Category": "CBO OCCUPANCY Events", "Counters": "1-3", "Defn": "Since occupancy counts can only be captured in the Cbo's 0 counter, this event allows a user to capture occupancy related information by filtering the Cb0 occupancy count captured in Counter 0. The filtering available is found in the control register - threshold, invert and edge detect. E.g. setting threshold to 1 can effectively monitor how many cycles the monitored queue has an entry.", "Desc": "Counter 0 Occupancy", "EvSel": 31, "MaxIncCyc": 20, "SubCtr": 1, }, "CBO.ISMQ_DRD_MISS_OCC": { "Box": "CBO", "Category": "CBO ISMQ Events", "Counters": "0-1", "EvSel": 33, "MaxIncCyc": 20, "SubCtr": 1, }, "CBO.LLC_LOOKUP": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-1", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set filter mask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.", "Desc": "Cache Lookups", "EvSel": 52, "Notes": "Bit 0 of the umask must always be set for this event. This allows us to match a given state (or states). The state is programmed in Cn_MSR_PMON_BOX_FILTER.state. The state field is a bit mask, so you can select (and monitor) multiple states at a time. 0 = I (miss), 1 = S, 2 = E, 3 = M, 4 = F. For example, if you wanted to monitor F and S hits, you could set 10010b in the 5-bit state field. To monitor any lookup, set the field to 0x1F.", }, "CBO.LLC_LOOKUP.DATA_READ": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-1", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set filter mask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.", "Desc": "Cache Lookups", "EvSel": 52, "Notes": "Bit 0 of the umask must always be set for this event. This allows us to match a given state (or states). The state is programmed in Cn_MSR_PMON_BOX_FILTER.state. The state field is a bit mask, so you can select (and monitor) multiple states at a time. 0 = I (miss), 1 = S, 2 = E, 3 = M, 4 = F. For example, if you wanted to monitor F and S hits, you could set 10010b in the 5-bit state field. To monitor any lookup, set the field to 0x1F.", "Umask": "b00000011", }, "CBO.LLC_LOOKUP.REMOTE_SNOOP": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-1", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set filter mask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.", "Desc": "Cache Lookups", "EvSel": 52, "Notes": "Bit 0 of the umask must always be set for this event. This allows us to match a given state (or states). The state is programmed in Cn_MSR_PMON_BOX_FILTER.state. The state field is a bit mask, so you can select (and monitor) multiple states at a time. 0 = I (miss), 1 = S, 2 = E, 3 = M, 4 = F. For example, if you wanted to monitor F and S hits, you could set 10010b in the 5-bit state field. To monitor any lookup, set the field to 0x1F.", "Umask": "b00001001", }, "CBO.LLC_LOOKUP.WRITE": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-1", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set filter mask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.", "Desc": "Cache Lookups", "EvSel": 52, "Notes": "Bit 0 of the umask must always be set for this event. This allows us to match a given state (or states). The state is programmed in Cn_MSR_PMON_BOX_FILTER.state. The state field is a bit mask, so you can select (and monitor) multiple states at a time. 0 = I (miss), 1 = S, 2 = E, 3 = M, 4 = F. For example, if you wanted to monitor F and S hits, you could set 10010b in the 5-bit state field. To monitor any lookup, set the field to 0x1F.", "Umask": "b00000101", }, "CBO.LLC_LOOKUP.NID": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-1", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set filter mask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.", "Desc": "Cache Lookups", "EvSel": 52, "Notes": "Bit 0 of the umask must always be set for this event. This allows us to match a given state (or states). The state is programmed in Cn_MSR_PMON_BOX_FILTER.state. The state field is a bit mask, so you can select (and monitor) multiple states at a time. 0 = I (miss), 1 = S, 2 = E, 3 = M, 4 = F. For example, if you wanted to monitor F and S hits, you could set 10010b in the 5-bit state field. To monitor any lookup, set the field to 0x1F.", "Umask": "b01000001", }, "CBO.LLC_VICTIMS": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-1", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, }, "CBO.LLC_VICTIMS.MISS": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-1", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "Umask": "bxxxx1xxx", }, "CBO.LLC_VICTIMS.NID": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-1", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "Umask": "bx1xxxxxx", }, "CBO.LLC_VICTIMS.S_STATE": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-1", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "Umask": "bxxxxx1xx", }, "CBO.LLC_VICTIMS.E_STATE": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-1", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "Umask": "bxxxxxx1x", }, "CBO.LLC_VICTIMS.M_STATE": { "Box": "CBO", "Category": "CBO CACHE Events", "Counters": "0-1", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "Umask": "bxxxxxxx1", }, "CBO.MISC": { "Box": "CBO", "Category": "CBO MISC Events", "Counters": "0-1", "Defn": "Miscellaneous events in the Cbo.", "Desc": "Cbo Misc", "EvSel": 57, }, "CBO.MISC.RFO_HIT_S": { "Box": "CBO", "Category": "CBO MISC Events", "Counters": "0-1", "Defn": "Miscellaneous events in the Cbo.", "Desc": "Cbo Misc", "EvSel": 57, "Umask": "bxxxx1xxx", }, "CBO.MISC.RSPI_WAS_FSE": { "Box": "CBO", "Category": "CBO MISC Events", "Counters": "0-1", "Defn": "Miscellaneous events in the Cbo.", "Desc": "Cbo Misc", "EvSel": 57, "Umask": "bxxxxxxx1", }, "CBO.MISC.STARTED": { "Box": "CBO", "Category": "CBO MISC Events", "Counters": "0-1", "Defn": "Miscellaneous events in the Cbo.", "Desc": "Cbo Misc", "EvSel": 57, "Umask": "bxxxxx1xx", }, "CBO.MISC.WC_ALIASING": { "Box": "CBO", "Category": "CBO MISC Events", "Counters": "0-1", "Defn": "Miscellaneous events in the Cbo.", "Desc": "Cbo Misc", "EvSel": 57, "Umask": "bxxxxxx1x", }, "CBO.RING_AD_USED": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, }, "CBO.RING_AD_USED.UP_ODD": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "Umask": "bxxxxxx1x", }, "CBO.RING_AD_USED.DOWN_ODD": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "Umask": "bxxxx1xxx", }, "CBO.RING_AD_USED.DOWN_EVEN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "Umask": "bxxxxx1xx", }, "CBO.RING_AD_USED.UP_EVEN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AD Ring In Use", "EvSel": 27, "Umask": "bxxxxxxx1", }, "CBO.RING_AK_USED": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, }, "CBO.RING_AK_USED.UP_ODD": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "Umask": "bxxxxxx1x", }, "CBO.RING_AK_USED.DOWN_ODD": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "Umask": "bxxxx1xxx", }, "CBO.RING_AK_USED.DOWN_EVEN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "Umask": "bxxxxx1xx", }, "CBO.RING_AK_USED.UP_EVEN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "AK Ring In Use", "EvSel": 28, "Umask": "bxxxxxxx1", }, "CBO.RING_BL_USED": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, }, "CBO.RING_BL_USED.UP_ODD": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "Umask": "bxxxxxx1x", }, "CBO.RING_BL_USED.DOWN_ODD": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "Umask": "bxxxx1xxx", }, "CBO.RING_BL_USED.DOWN_EVEN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "Umask": "bxxxxx1xx", }, "CBO.RING_BL_USED.UP_EVEN": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "BL Ring in Use", "EvSel": 29, "Umask": "bxxxxxxx1", }, "CBO.RING_BOUNCES": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-1", "Desc": "Number of LLC responses that bounced on the Ring.", "EvSel": 5, }, "CBO.RING_BOUNCES.IV_CORE": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-1", "Desc": "Number of LLC responses that bounced on the Ring.", "EvSel": 5, "Umask": "bxxxx1xxx", }, "CBO.RING_BOUNCES.AK_CORE": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-1", "Desc": "Number of LLC responses that bounced on the Ring.", "EvSel": 5, "Umask": "bxxxxxx1x", }, "CBO.RING_BOUNCES.BL_CORE": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-1", "Desc": "Number of LLC responses that bounced on the Ring.", "EvSel": 5, "Umask": "bxxxxx1xx", }, "CBO.RING_IV_USED": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring in JKT. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DOWN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DOWN_ODD.", "Desc": "BL Ring in Use", "EvSel": 30, }, "CBO.RING_IV_USED.ANY": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "2-3", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring in JKT. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DOWN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DOWN_ODD.", "Desc": "BL Ring in Use", "EvSel": 30, "Umask": "b00001111", }, "CBO.RING_SRC_THRTL": { "Box": "CBO", "Category": "CBO RING Events", "Counters": "0-1", "EvSel": 7, }, "CBO.RxR_EXT_STARVED": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-1", "Defn": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.", "Desc": "Ingress Arbiter Blocking Cycles", "EvSel": 18, }, "CBO.RxR_EXT_STARVED.IPQ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-1", "Defn": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.", "Desc": "Ingress Arbiter Blocking Cycles", "EvSel": 18, "Umask": "bxxxxxx1x", }, "CBO.RxR_EXT_STARVED.ISMQ_BIDS": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-1", "Defn": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.", "Desc": "Ingress Arbiter Blocking Cycles", "EvSel": 18, "Umask": "bxxxx1xxx", }, "CBO.RxR_EXT_STARVED.ISMQ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-1", "Defn": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.", "Desc": "Ingress Arbiter Blocking Cycles", "EvSel": 18, "Umask": "bxxxxx1xx", }, "CBO.RxR_EXT_STARVED.IRQ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-1", "Defn": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.", "Desc": "Ingress Arbiter Blocking Cycles", "EvSel": 18, "Umask": "bxxxxxxx1", }, "CBO.RxR_INSERTS": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-1", "Defn": "Counts number of allocations per cycle into the specified Ingress queue.", "Desc": "Ingress Allocations", "EvSel": 19, "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", }, "CBO.RxR_INSERTS.VFIFO": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-1", "Defn": "Counts number of allocations per cycle into the specified Ingress queue.", "Desc": "Ingress Allocations", "EvSel": 19, "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "Umask": "bxxx1xxxx", }, "CBO.RxR_INSERTS.IPQ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-1", "Defn": "Counts number of allocations per cycle into the specified Ingress queue.", "Desc": "Ingress Allocations", "EvSel": 19, "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "Umask": "bxxxxx1xx", }, "CBO.RxR_INSERTS.IRQ_REJECTED": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-1", "Defn": "Counts number of allocations per cycle into the specified Ingress queue.", "Desc": "Ingress Allocations", "EvSel": 19, "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "Umask": "bxxxxxx1x", }, "CBO.RxR_INSERTS.IRQ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": "0-1", "Defn": "Counts number of allocations per cycle into the specified Ingress queue.", "Desc": "Ingress Allocations", "EvSel": 19, "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "Umask": "bxxxxxxx1", }, "CBO.RxR_IPQ_RETRY": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-1", "Defn": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.", "Desc": "Probe Queue Retries", "EvSel": 49, }, "CBO.RxR_IPQ_RETRY.QPI_CREDITS": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-1", "Defn": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.", "Desc": "Probe Queue Retries", "EvSel": 49, "Umask": "bxxx1xxxx", }, "CBO.RxR_IPQ_RETRY.ADDR_CONFLICT": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-1", "Defn": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.", "Desc": "Probe Queue Retries", "EvSel": 49, "Umask": "bxxxxx1xx", }, "CBO.RxR_IPQ_RETRY.ANY": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-1", "Defn": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.", "Desc": "Probe Queue Retries", "EvSel": 49, "Umask": "bxxxxxxx1", }, "CBO.RxR_IPQ_RETRY.FULL": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-1", "Defn": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.", "Desc": "Probe Queue Retries", "EvSel": 49, "Umask": "bxxxxxx1x", }, "CBO.RxR_IRQ_RETRY": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-1", "Desc": "Ingress Request Queue Rejects", "EvSel": 50, }, "CBO.RxR_IRQ_RETRY.RTID": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-1", "Desc": "Ingress Request Queue Rejects", "EvSel": 50, "Umask": "bxxxx1xxx", }, "CBO.RxR_IRQ_RETRY.QPI_CREDITS": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-1", "Desc": "Ingress Request Queue Rejects", "EvSel": 50, "Umask": "bxxx1xxxx", }, "CBO.RxR_IRQ_RETRY.ADDR_CONFLICT": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-1", "Desc": "Ingress Request Queue Rejects", "EvSel": 50, "Umask": "bxxxxx1xx", }, "CBO.RxR_IRQ_RETRY.ANY": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-1", "Desc": "Ingress Request Queue Rejects", "EvSel": 50, "Umask": "bxxxxxxx1", }, "CBO.RxR_IRQ_RETRY.FULL": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-1", "Desc": "Ingress Request Queue Rejects", "EvSel": 50, "Umask": "bxxxxxx1x", }, "CBO.RxR_ISMQ_RETRY": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-1", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 51, }, "CBO.RxR_ISMQ_RETRY.RTID": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-1", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 51, "Umask": "bxxxx1xxx", }, "CBO.RxR_ISMQ_RETRY.QPI_CREDITS": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-1", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 51, "Umask": "bxxx1xxxx", }, "CBO.RxR_ISMQ_RETRY.ANY": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-1", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 51, "Umask": "bxxxxxxx1", }, "CBO.RxR_ISMQ_RETRY.IIO_CREDITS": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-1", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 51, "Umask": "bxx1xxxxx", }, "CBO.RxR_ISMQ_RETRY.FULL": { "Box": "CBO", "Category": "CBO INGRESS_RETRY Events", "Counters": "0-1", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 51, "Umask": "bxxxxxx1x", }, "CBO.RxR_OCCUPANCY": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": 0, "Defn": "Counts number of entries in the specified Ingress queue in each cycle.", "Desc": "Ingress Occupancy", "EvSel": 17, "MaxIncCyc": 20, "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "SubCtr": 1, }, "CBO.RxR_OCCUPANCY.VFIFO": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": 0, "Defn": "Counts number of entries in the specified Ingress queue in each cycle.", "Desc": "Ingress Occupancy", "EvSel": 17, "MaxIncCyc": 20, "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "SubCtr": 1, "Umask": "bxxx1xxxx", }, "CBO.RxR_OCCUPANCY.IPQ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": 0, "Defn": "Counts number of entries in the specified Ingress queue in each cycle.", "Desc": "Ingress Occupancy", "EvSel": 17, "MaxIncCyc": 20, "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "SubCtr": 1, "Umask": "bxxxxx1xx", }, "CBO.RxR_OCCUPANCY.IRQ_REJECTED": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": 0, "Defn": "Counts number of entries in the specified Ingress queue in each cycle.", "Desc": "Ingress Occupancy", "EvSel": 17, "MaxIncCyc": 20, "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "SubCtr": 1, "Umask": "bxxxxxx1x", }, "CBO.RxR_OCCUPANCY.IRQ": { "Box": "CBO", "Category": "CBO INGRESS Events", "Counters": 0, "Defn": "Counts number of entries in the specified Ingress queue in each cycle.", "Desc": "Ingress Occupancy", "EvSel": 17, "MaxIncCyc": 20, "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "SubCtr": 1, "Umask": "bxxxxxxx1", }, "CBO.TOR_INSERTS": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-1", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, }, "CBO.TOR_INSERTS.NID_MISS_ALL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-1", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "Umask": "b01001010", }, "CBO.TOR_INSERTS.NID_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-1", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "Umask": "b01000001", }, "CBO.TOR_INSERTS.MISS_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-1", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "Umask": "b00000011", }, "CBO.TOR_INSERTS.NID_ALL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-1", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "Umask": "b01001000", }, "CBO.TOR_INSERTS.NID_EVICTION": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-1", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "Umask": "b01000100", }, "CBO.TOR_INSERTS.NID_MISS_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-1", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "Umask": "b01000011", }, "CBO.TOR_INSERTS.EVICTION": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-1", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "Umask": "b00000100", }, "CBO.TOR_INSERTS.WB": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-1", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "Umask": "b00010000", }, "CBO.TOR_INSERTS.NID_WB": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-1", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "Umask": "b01010000", }, "CBO.TOR_INSERTS.OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-1", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "Umask": "b00000001", }, "CBO.TOR_INSERTS.MISS_ALL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": "0-1", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).", "Desc": "TOR Inserts", "EvSel": 53, "Umask": "b00001010", }, "CBO.TOR_OCCUPANCY": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "MaxIncCyc": 20, "SubCtr": 1, }, "CBO.TOR_OCCUPANCY.NID_MISS_ALL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b01001010", }, "CBO.TOR_OCCUPANCY.NID_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b01000001", }, "CBO.TOR_OCCUPANCY.MISS_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b00000011", }, "CBO.TOR_OCCUPANCY.ALL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b00001000", }, "CBO.TOR_OCCUPANCY.NID_ALL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b01001000", }, "CBO.TOR_OCCUPANCY.NID_EVICTION": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b01000100", }, "CBO.TOR_OCCUPANCY.NID_MISS_OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b01000011", }, "CBO.TOR_OCCUPANCY.EVICTION": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b00000100", }, "CBO.TOR_OCCUPANCY.OPCODE": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b00000001", }, "CBO.TOR_OCCUPANCY.MISS_ALL": { "Box": "CBO", "Category": "CBO TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)", "Desc": "TOR Occupancy", "EvSel": 54, "MaxIncCyc": 20, "SubCtr": 1, "Umask": "b00001010", }, "CBO.TxR_ADS_USED": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-1", "EvSel": 4, }, "CBO.TxR_INSERTS": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-1", "Defn": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, }, "CBO.TxR_INSERTS.BL_CACHE": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-1", "Defn": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "Umask": "bxxxxx1xx", }, "CBO.TxR_INSERTS.AK_CORE": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-1", "Defn": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "Umask": "bxx1xxxxx", }, "CBO.TxR_INSERTS.AD_CORE": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-1", "Defn": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "Umask": "bxxx1xxxx", }, "CBO.TxR_INSERTS.IV_CACHE": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-1", "Defn": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "Umask": "bxxxx1xxx", }, "CBO.TxR_INSERTS.BL_CORE": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-1", "Defn": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "Umask": "bx1xxxxxx", }, "CBO.TxR_INSERTS.AK_CACHE": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-1", "Defn": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "Umask": "bxxxxxx1x", }, "CBO.TxR_INSERTS.AD_CACHE": { "Box": "CBO", "Category": "CBO EGRESS Events", "Counters": "0-1", "Defn": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.", "Desc": "Egress Allocations", "EvSel": 2, "Umask": "bxxxxxxx1", }, # HA: "HA.ADDR_OPC_MATCH": { "Box": "HA", "Category": "HA ADDR_OPCODE_MATCH Events", "Counters": "0-3", "Desc": "QPI Address/Opcode Match", "EvSel": 32, }, "HA.ADDR_OPC_MATCH.FILT": { "Box": "HA", "Category": "HA ADDR_OPCODE_MATCH Events", "Counters": "0-3", "Desc": "QPI Address/Opcode Match", "EvSel": 32, "Umask": "b00000011", }, "HA.CLOCKTICKS": { "Box": "HA", "Category": "HA UCLK Events", "Counters": "0-3", "Defn": "Counts the number of uclks in the HA. This will be slightly different than the count in the Ubox because of enable/freeze delays. The HA is on the other side of the die from the fixed Ubox uclk counter, so the drift could be somewhat larger than in units that are closer like the QPI Agent.", "Desc": "uclks", "EvSel": 0, }, "HA.CONFLICT_CYCLES": { "Box": "HA", "Category": "HA CONFLICTS Events", "Counters": "0-3", "Desc": "Conflict Checks", "EvSel": 11, "Broken": 1, }, "HA.CONFLICT_CYCLES.CONFLICT": { "Box": "HA", "Category": "HA CONFLICTS Events", "Counters": "0-3", "Desc": "Conflict Checks", "EvSel": 11, "Umask": "bxxxxxx1x", }, "HA.CONFLICT_CYCLES.NO_CONFLICT": { "Box": "HA", "Category": "HA CONFLICTS Events", "Counters": "0-3", "Desc": "Conflict Checks", "EvSel": 11, "Umask": "bxxxxxxx1", "Broken": 1, }, "HA.DIRECT2CORE_COUNT": { "Box": "HA", "Category": "HA DIRECT2CORE Events", "Counters": "0-3", "Defn": "Number of Direct2Core messages sent", "Desc": "Direct2Core Messages Sent", "EvSel": 17, "Broken": 1, }, "HA.DIRECT2CORE_CYCLES_DISABLED": { "Box": "HA", "Category": "HA DIRECT2CORE Events", "Counters": "0-3", "Defn": "Number of cycles in which Direct2Core was disabled", "Desc": "Cycles when Direct2Core was Disabled", "EvSel": 18, "Obscure": 1, "Broken": 1, }, "HA.DIRECT2CORE_TXN_OVERRIDE": { "Box": "HA", "Category": "HA DIRECT2CORE Events", "Counters": "0-3", "Defn": "Number of Reads where Direct2Core overridden", "Desc": "Number of Reads that had Direct2Core Overridden", "EvSel": 19, "Broken": 1, }, "HA.DIRECTORY_LOOKUP": { "Box": "HA", "Category": "HA DIRECTORY Events", "Counters": "0-3", "Defn": "Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to.", "Desc": "Directory Lookups", "EvSel": 12, "Notes": "Only valid for parts that implement the Directory", "Broken": 1, }, "HA.DIRECTORY_LOOKUP.NO_SNP": { "Box": "HA", "Category": "HA DIRECTORY Events", "Counters": "0-3", "Defn": "Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to.", "Desc": "Directory Lookups", "EvSel": 12, "Notes": "Only valid for parts that implement the Directory", "Umask": "bxxxxxx1x", "Broken": 1, }, "HA.DIRECTORY_LOOKUP.SNP": { "Box": "HA", "Category": "HA DIRECTORY Events", "Counters": "0-3", "Defn": "Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to.", "Desc": "Directory Lookups", "EvSel": 12, "Notes": "Only valid for parts that implement the Directory", "Umask": "bxxxxxxx1", "Broken": 1, }, "HA.DIRECTORY_UPDATE": { "Box": "HA", "Category": "HA DIRECTORY Events", "Counters": "0-3", "Defn": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.", "Desc": "Directory Updates", "EvSel": 13, "Notes": "Only valid for parts that implement the Directory", "Broken": 1, }, "HA.DIRECTORY_UPDATE.SET": { "Box": "HA", "Category": "HA DIRECTORY Events", "Counters": "0-3", "Defn": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.", "Desc": "Directory Updates", "EvSel": 13, "Notes": "Only valid for parts that implement the Directory", "Umask": "bxxxxxxx1", "Broken": 1, }, "HA.DIRECTORY_UPDATE.ANY": { "Box": "HA", "Category": "HA DIRECTORY Events", "Counters": "0-3", "Defn": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.", "Desc": "Directory Updates", "EvSel": 13, "Notes": "Only valid for parts that implement the Directory", "Umask": "bxxxxxx11", "Broken": 1, }, "HA.DIRECTORY_UPDATE.CLEAR": { "Box": "HA", "Category": "HA DIRECTORY Events", "Counters": "0-3", "Defn": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.", "Desc": "Directory Updates", "EvSel": 13, "Notes": "Only valid for parts that implement the Directory", "Umask": "bxxxxxx1x", "Broken": 1, }, "HA.IGR_NO_CREDIT_CYCLES": { "Box": "HA", "Category": "HA QPI_IGR_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.", "Desc": "Cycles without QPI Ingress Credits", "EvSel": 34, }, "HA.IGR_NO_CREDIT_CYCLES.AD_QPI1": { "Box": "HA", "Category": "HA QPI_IGR_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.", "Desc": "Cycles without QPI Ingress Credits", "EvSel": 34, "Umask": "bxxxxxx1x", }, "HA.IGR_NO_CREDIT_CYCLES.AD_QPI0": { "Box": "HA", "Category": "HA QPI_IGR_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.", "Desc": "Cycles without QPI Ingress Credits", "EvSel": 34, "Umask": "bxxxxxxx1", }, "HA.IGR_NO_CREDIT_CYCLES.BL_QPI1": { "Box": "HA", "Category": "HA QPI_IGR_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.", "Desc": "Cycles without QPI Ingress Credits", "EvSel": 34, "Umask": "bxxxx1xxx", }, "HA.IGR_NO_CREDIT_CYCLES.BL_QPI0": { "Box": "HA", "Category": "HA QPI_IGR_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.", "Desc": "Cycles without QPI Ingress Credits", "EvSel": 34, "Umask": "bxxxxx1xx", }, "HA.IMC_RETRY": { "Box": "HA", "Category": "HA IMC_MISC Events", "Counters": "0-3", "Desc": "Retry Events", "EvSel": 30, }, "HA.IMC_WRITES": { "Box": "HA", "Category": "HA IMC_WRITES Events", "Counters": "0-3", "Defn": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.", "Desc": "HA to iMC Full Line Writes Issued", "EvSel": 26, }, "HA.IMC_WRITES.PARTIAL_ISOCH": { "Box": "HA", "Category": "HA IMC_WRITES Events", "Counters": "0-3", "Defn": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.", "Desc": "HA to iMC Full Line Writes Issued", "EvSel": 26, "Umask": "bxxxx1xxx", }, "HA.IMC_WRITES.ALL": { "Box": "HA", "Category": "HA IMC_WRITES Events", "Counters": "0-3", "Defn": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.", "Desc": "HA to iMC Full Line Writes Issued", "EvSel": 26, "Umask": "b00001111", }, "HA.IMC_WRITES.PARTIAL": { "Box": "HA", "Category": "HA IMC_WRITES Events", "Counters": "0-3", "Defn": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.", "Desc": "HA to iMC Full Line Writes Issued", "EvSel": 26, "Umask": "bxxxxxx1x", }, "HA.IMC_WRITES.FULL": { "Box": "HA", "Category": "HA IMC_WRITES Events", "Counters": "0-3", "Defn": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.", "Desc": "HA to iMC Full Line Writes Issued", "EvSel": 26, "Umask": "bxxxxxxx1", }, "HA.IMC_WRITES.FULL_ISOCH": { "Box": "HA", "Category": "HA IMC_WRITES Events", "Counters": "0-3", "Defn": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.", "Desc": "HA to iMC Full Line Writes Issued", "EvSel": 26, "Umask": "bxxxxx1xx", }, "HA.REQUESTS": { "Box": "HA", "Category": "HA REQUESTS Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "Read and Write Requests", "EvSel": 1, }, "HA.REQUESTS.READS": { "Box": "HA", "Category": "HA REQUESTS Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "Read and Write Requests", "EvSel": 1, "Umask": "b00000011", }, "HA.REQUESTS.WRITES": { "Box": "HA", "Category": "HA REQUESTS Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "Read and Write Requests", "EvSel": 1, "Umask": "b00001100", }, "HA.RPQ_CYCLES_NO_REG_CREDITS": { "Box": "HA", "Category": "HA RPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and \"special\" requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "iMC RPQ Credits Empty - Regular", "EvSel": 21, "MaxIncCyc": 4, }, "HA.RPQ_CYCLES_NO_REG_CREDITS.CHN1": { "Box": "HA", "Category": "HA RPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and \"special\" requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "iMC RPQ Credits Empty - Regular", "EvSel": 21, "MaxIncCyc": 4, "Umask": "bxxxxxx1x", }, "HA.RPQ_CYCLES_NO_REG_CREDITS.CHN2": { "Box": "HA", "Category": "HA RPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and \"special\" requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "iMC RPQ Credits Empty - Regular", "EvSel": 21, "MaxIncCyc": 4, "Umask": "bxxxxx1xx", }, "HA.RPQ_CYCLES_NO_REG_CREDITS.CHN3": { "Box": "HA", "Category": "HA RPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and \"special\" requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "iMC RPQ Credits Empty - Regular", "EvSel": 21, "MaxIncCyc": 4, "Umask": "bxxxx1xxx", }, "HA.RPQ_CYCLES_NO_REG_CREDITS.CHN0": { "Box": "HA", "Category": "HA RPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and \"special\" requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "iMC RPQ Credits Empty - Regular", "EvSel": 21, "MaxIncCyc": 4, "Umask": "bxxxxxxx1", }, "HA.TAD_REQUESTS_G0": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 0", "EvSel": 27, "MaxIncCyc": 2, }, "HA.TAD_REQUESTS_G0.REGION0": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 0", "EvSel": 27, "MaxIncCyc": 2, "Umask": "bxxxxxxx1", }, "HA.TAD_REQUESTS_G0.REGION7": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 0", "EvSel": 27, "MaxIncCyc": 2, "Umask": "b1xxxxxxx", }, "HA.TAD_REQUESTS_G0.REGION3": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 0", "EvSel": 27, "MaxIncCyc": 2, "Umask": "bxxxx1xxx", }, "HA.TAD_REQUESTS_G0.REGION4": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 0", "EvSel": 27, "MaxIncCyc": 2, "Umask": "bxxx1xxxx", }, "HA.TAD_REQUESTS_G0.REGION2": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 0", "EvSel": 27, "MaxIncCyc": 2, "Umask": "bxxxxx1xx", }, "HA.TAD_REQUESTS_G0.REGION1": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 0", "EvSel": 27, "MaxIncCyc": 2, "Umask": "bxxxxxx1x", }, "HA.TAD_REQUESTS_G0.REGION5": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 0", "EvSel": 27, "MaxIncCyc": 2, "Umask": "bxx1xxxxx", }, "HA.TAD_REQUESTS_G0.REGION6": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 0", "EvSel": 27, "MaxIncCyc": 2, "Umask": "bx1xxxxxx", }, "HA.TAD_REQUESTS_G1": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 1", "EvSel": 28, "MaxIncCyc": 2, }, "HA.TAD_REQUESTS_G1.REGION9": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 1", "EvSel": 28, "MaxIncCyc": 2, "Umask": "bxxxxxx1x", }, "HA.TAD_REQUESTS_G1.REGION10": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 1", "EvSel": 28, "MaxIncCyc": 2, "Umask": "bxxxxx1xx", }, "HA.TAD_REQUESTS_G1.REGION11": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 1", "EvSel": 28, "MaxIncCyc": 2, "Umask": "bxxxx1xxx", }, "HA.TAD_REQUESTS_G1.REGION8": { "Box": "HA", "Category": "HA TAD Events", "Counters": "0-3", "Defn": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.", "Desc": "HA Requests to a TAD Region - Group 1", "EvSel": 28, "MaxIncCyc": 2, "Umask": "bxxxxxxx1", }, "HA.TRACKER_INSERTS": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the local HA tracker pool. This can be used in conjunction with the occupancy accumulation event in order to calculate average latency. One cannot filter between reads and writes. HA trackers are allocated as soon as a request enters the HA and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "Tracker Allocations", "EvSel": 6, }, "HA.TRACKER_INSERTS.ALL": { "Box": "HA", "Category": "HA TRACKER Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the local HA tracker pool. This can be used in conjunction with the occupancy accumulation event in order to calculate average latency. One cannot filter between reads and writes. HA trackers are allocated as soon as a request enters the HA and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.", "Desc": "Tracker Allocations", "EvSel": 6, "Umask": "b00000011", }, "HA.TxR_AD": { "Box": "HA", "Category": "HA OUTBOUND_TX Events", "Counters": "0-3", "Defn": "Counts the number of outbound transactions on the AD ring. This can be filtered by the NDR and SNP message classes. See the filter descriptions for more details.", "Desc": "Outbound NDR Ring Transactions", "EvSel": 15, }, "HA.TxR_AD.SNP": { "Box": "HA", "Category": "HA OUTBOUND_TX Events", "Counters": "0-3", "Defn": "Counts the number of outbound transactions on the AD ring. This can be filtered by the NDR and SNP message classes. See the filter descriptions for more details.", "Desc": "Outbound NDR Ring Transactions", "EvSel": 15, "Umask": "bxxxxxx1x", }, "HA.TxR_AD.NDR": { "Box": "HA", "Category": "HA OUTBOUND_TX Events", "Counters": "0-3", "Defn": "Counts the number of outbound transactions on the AD ring. This can be filtered by the NDR and SNP message classes. See the filter descriptions for more details.", "Desc": "Outbound NDR Ring Transactions", "EvSel": 15, "Umask": "bxxxxxxx1", }, "HA.TxR_AD_CYCLES_FULL": { "Box": "HA", "Category": "HA AD_EGRESS Events", "Counters": "0-3", "Defn": "AD Egress Full", "Desc": "AD Egress Full", "EvSel": 42, }, "HA.TxR_AD_CYCLES_FULL.SCHED1": { "Box": "HA", "Category": "HA AD_EGRESS Events", "Counters": "0-3", "Defn": "AD Egress Full", "Desc": "AD Egress Full", "EvSel": 42, "Umask": "bxxxxxx1x", }, "HA.TxR_AD_CYCLES_FULL.ALL": { "Box": "HA", "Category": "HA AD_EGRESS Events", "Counters": "0-3", "Defn": "AD Egress Full", "Desc": "AD Egress Full", "EvSel": 42, "Umask": "bxxxxxx11", }, "HA.TxR_AD_CYCLES_FULL.SCHED0": { "Box": "HA", "Category": "HA AD_EGRESS Events", "Counters": "0-3", "Defn": "AD Egress Full", "Desc": "AD Egress Full", "EvSel": 42, "Umask": "bxxxxxxx1", }, "HA.TxR_AK_CYCLES_FULL": { "Box": "HA", "Category": "HA AK_EGRESS Events", "Counters": "0-3", "Defn": "AK Egress Full", "Desc": "AK Egress Full", "EvSel": 50, }, "HA.TxR_AK_CYCLES_FULL.SCHED1": { "Box": "HA", "Category": "HA AK_EGRESS Events", "Counters": "0-3", "Defn": "AK Egress Full", "Desc": "AK Egress Full", "EvSel": 50, "Umask": "bxxxxxx1x", }, "HA.TxR_AK_CYCLES_FULL.ALL": { "Box": "HA", "Category": "HA AK_EGRESS Events", "Counters": "0-3", "Defn": "AK Egress Full", "Desc": "AK Egress Full", "EvSel": 50, "Umask": "bxxxxxx11", }, "HA.TxR_AK_CYCLES_FULL.SCHED0": { "Box": "HA", "Category": "HA AK_EGRESS Events", "Counters": "0-3", "Defn": "AK Egress Full", "Desc": "AK Egress Full", "EvSel": 50, "Umask": "bxxxxxxx1", }, "HA.TxR_AK_NDR": { "Box": "HA", "Category": "HA OUTBOUND_TX Events", "Counters": "0-3", "Defn": "Counts the number of outbound NDR transactions sent on the AK ring. NDR stands for \"non-data response\" and is generally used for completions that do not include data. AK NDR is used for messages to the local socket.", "Desc": "Outbound NDR Ring Transactions", "EvSel": 14, }, "HA.TxR_BL": { "Box": "HA", "Category": "HA OUTBOUND_TX Events", "Counters": "0-3", "Defn": "Counts the number of DRS messages sent out on the BL ring. This can be filtered by the destination.", "Desc": "Outbound DRS Ring Transactions to Cache", "EvSel": 16, }, "HA.TxR_BL.DRS_QPI": { "Box": "HA", "Category": "HA OUTBOUND_TX Events", "Counters": "0-3", "Defn": "Counts the number of DRS messages sent out on the BL ring. This can be filtered by the destination.", "Desc": "Outbound DRS Ring Transactions to Cache", "EvSel": 16, "Umask": "bxxxxx1xx", }, "HA.TxR_BL.DRS_CACHE": { "Box": "HA", "Category": "HA OUTBOUND_TX Events", "Counters": "0-3", "Defn": "Counts the number of DRS messages sent out on the BL ring. This can be filtered by the destination.", "Desc": "Outbound DRS Ring Transactions to Cache", "EvSel": 16, "Umask": "bxxxxxxx1", }, "HA.TxR_BL.DRS_CORE": { "Box": "HA", "Category": "HA OUTBOUND_TX Events", "Counters": "0-3", "Defn": "Counts the number of DRS messages sent out on the BL ring. This can be filtered by the destination.", "Desc": "Outbound DRS Ring Transactions to Cache", "EvSel": 16, "Umask": "bxxxxxx1x", }, "HA.TxR_BL_CYCLES_FULL": { "Box": "HA", "Category": "HA BL_EGRESS Events", "Counters": "0-3", "Defn": "BL Egress Full", "Desc": "BL Egress Full", "EvSel": 54, }, "HA.TxR_BL_CYCLES_FULL.SCHED1": { "Box": "HA", "Category": "HA BL_EGRESS Events", "Counters": "0-3", "Defn": "BL Egress Full", "Desc": "BL Egress Full", "EvSel": 54, "Umask": "bxxxxxx1x", }, "HA.TxR_BL_CYCLES_FULL.ALL": { "Box": "HA", "Category": "HA BL_EGRESS Events", "Counters": "0-3", "Defn": "BL Egress Full", "Desc": "BL Egress Full", "EvSel": 54, "Umask": "bxxxxxx11", }, "HA.TxR_BL_CYCLES_FULL.SCHED0": { "Box": "HA", "Category": "HA BL_EGRESS Events", "Counters": "0-3", "Defn": "BL Egress Full", "Desc": "BL Egress Full", "EvSel": 54, "Umask": "bxxxxxxx1", }, "HA.WPQ_CYCLES_NO_REG_CREDITS": { "Box": "HA", "Category": "HA WPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and \"special\" requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "HA iMC CHN0 WPQ Credits Empty - Regular", "EvSel": 24, "MaxIncCyc": 4, }, "HA.WPQ_CYCLES_NO_REG_CREDITS.CHN1": { "Box": "HA", "Category": "HA WPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and \"special\" requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "HA iMC CHN0 WPQ Credits Empty - Regular", "EvSel": 24, "MaxIncCyc": 4, "Umask": "bxxxxxx1x", }, "HA.WPQ_CYCLES_NO_REG_CREDITS.CHN2": { "Box": "HA", "Category": "HA WPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and \"special\" requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "HA iMC CHN0 WPQ Credits Empty - Regular", "EvSel": 24, "MaxIncCyc": 4, "Umask": "bxxxxx1xx", }, "HA.WPQ_CYCLES_NO_REG_CREDITS.CHN3": { "Box": "HA", "Category": "HA WPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and \"special\" requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "HA iMC CHN0 WPQ Credits Empty - Regular", "EvSel": 24, "MaxIncCyc": 4, "Umask": "bxxxx1xxx", }, "HA.WPQ_CYCLES_NO_REG_CREDITS.CHN0": { "Box": "HA", "Category": "HA WPQ_CREDITS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and \"special\" requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.", "Desc": "HA iMC CHN0 WPQ Credits Empty - Regular", "EvSel": 24, "MaxIncCyc": 4, "Umask": "bxxxxxxx1", }, # iMC: "iMC.ACT_COUNT": { "Box": "iMC", "Category": "iMC ACT Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.", "Desc": "DRAM Activate Count", "EvSel": 1, }, "iMC.CAS_COUNT": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, }, "iMC.CAS_COUNT.WR_RMM": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "Umask": "bxxxx1xxx", }, "iMC.CAS_COUNT.RD_UNDERFILL": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "Umask": "bxxxxxx1x", }, "iMC.CAS_COUNT.ALL": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "Umask": "b00001111", }, "iMC.CAS_COUNT.RD": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "Umask": "b00000011", }, "iMC.CAS_COUNT.RD_REG": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "Umask": "bxxxxxxx1", }, "iMC.CAS_COUNT.WR_WMM": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "Umask": "bxxxxx1xx", }, "iMC.CAS_COUNT.WR": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Defn": "DRAM RD_CAS and WR_CAS Commands", "Desc": "DRAM RD_CAS and WR_CAS Commands.", "EvSel": 4, "Umask": "b00001100", }, "iMC.DRAM_PRE_ALL": { "Box": "iMC", "Category": "iMC DRAM_PRE_ALL Events", "Counters": "0-3", "Defn": "Counts the number of times that the precharge all command was sent.", "Desc": "DRAM Precharge All Commands", "EvSel": 6, }, "iMC.DRAM_REFRESH": { "Box": "iMC", "Category": "iMC DRAM_REFRESH Events", "Counters": "0-3", "Defn": "Counts the number of refreshes issued.", "Desc": "Number of DRAM Refreshes Issued", "EvSel": 5, }, "iMC.DRAM_REFRESH.PANIC": { "Box": "iMC", "Category": "iMC DRAM_REFRESH Events", "Counters": "0-3", "Defn": "Counts the number of refreshes issued.", "Desc": "Number of DRAM Refreshes Issued", "EvSel": 5, "Umask": "bxxxxxx1x", }, "iMC.DRAM_REFRESH.HIGH": { "Box": "iMC", "Category": "iMC DRAM_REFRESH Events", "Counters": "0-3", "Defn": "Counts the number of refreshes issued.", "Desc": "Number of DRAM Refreshes Issued", "EvSel": 5, "Umask": "bxxxxx1xx", }, "iMC.ECC_CORRECTABLE_ERRORS": { "Box": "iMC", "Category": "iMC ECC Events", "Counters": "0-3", "Defn": "Counts the number of ECC errors detected and corrected by the iMC on this channel. This counter is only useful with ECC DRAM devices. This count will increment one time for each correction regardless of the number of bits corrected. The iMC can correct up to 4 bit errors in independent channel mode and 8 bit erros in lockstep mode.", "Desc": "ECC Correctable Errors", "EvSel": 9, }, "iMC.MAJOR_MODES": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Defn": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.", "Desc": "Cycles in a Major Mode", "EvSel": 7, }, "iMC.MAJOR_MODES.ISOCH": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Defn": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.", "Desc": "Cycles in a Major Mode", "EvSel": 7, "Umask": "bxxxx1xxx", }, "iMC.MAJOR_MODES.READ": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Defn": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.", "Desc": "Cycles in a Major Mode", "EvSel": 7, "Umask": "bxxxxxxx1", }, "iMC.MAJOR_MODES.PARTIAL": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Defn": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.", "Desc": "Cycles in a Major Mode", "EvSel": 7, "Umask": "bxxxxx1xx", }, "iMC.MAJOR_MODES.WRITE": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Defn": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.", "Desc": "Cycles in a Major Mode", "EvSel": 7, "Umask": "bxxxxxx1x", }, "iMC.POWER_CHANNEL_DLLOFF": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles when all the ranks in the channel are in CKE Slow (DLLOFF) mode.", "Desc": "Channel DLLOFF Cycles", "EvSel": 132, "Notes": "IBT = Input Buffer Termination = Off", }, "iMC.POWER_CHANNEL_PPD": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles when all the ranks in the channel are in PPD mode. If IBT=off is enabled, then this can be used to count those cycles. If it is not enabled, then this can count the number of cycles when that could have been taken advantage of.", "Desc": "Channel PPD Cycles", "EvSel": 133, "MaxIncCyc": 4, "Notes": "IBT = Input Buffer Termination = On", }, "iMC.POWER_CKE_CYCLES": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "MaxIncCyc": 16, }, "iMC.POWER_CKE_CYCLES.RANK5": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "MaxIncCyc": 16, "Umask": "bxx1xxxxx", }, "iMC.POWER_CKE_CYCLES.RANK6": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "MaxIncCyc": 16, "Umask": "bx1xxxxxx", }, "iMC.POWER_CKE_CYCLES.RANK3": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "MaxIncCyc": 16, "Umask": "bxxxx1xxx", }, "iMC.POWER_CKE_CYCLES.RANK4": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "MaxIncCyc": 16, "Umask": "bxxx1xxxx", }, "iMC.POWER_CKE_CYCLES.RANK1": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "MaxIncCyc": 16, "Umask": "bxxxxxx1x", }, "iMC.POWER_CKE_CYCLES.RANK0": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "MaxIncCyc": 16, "Umask": "bxxxxxxx1", }, "iMC.POWER_CKE_CYCLES.RANK2": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "MaxIncCyc": 16, "Umask": "bxxxxx1xx", }, "iMC.POWER_CKE_CYCLES.RANK7": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "MaxIncCyc": 16, "Umask": "b1xxxxxxx", }, "iMC.POWER_CRITICAL_THROTTLE_CYCLES": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the iMC is in critical thermal throttling. When this happens, all traffic is blocked. This should be rare unless something bad is going on in the platform. There is no filtering by rank for this event.", "Desc": "Critical Throttle Cycles", "EvSel": 134, }, "iMC.POWER_SELF_REFRESH": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the iMC is in self-refresh and the iMC still has a clock. This happens in some package C-states. For example, the PCU may ask the iMC to enter self-refresh even though some of the cores are still processing. One use of this is for Monroe technology. Self-refresh is required during package C3 and C6, but there is no clock in the iMC at this time, so it is not possible to count these cases.", "Desc": "Clock-Enabled Self-Refresh", "EvSel": 67, }, "iMC.POWER_THROTTLE_CYCLES": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for any Rank", "EvSel": 65, }, "iMC.POWER_THROTTLE_CYCLES.RANK5": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 5", "EvSel": 65, "Umask": "bxx1xxxxx", }, "iMC.POWER_THROTTLE_CYCLES.RANK6": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 6", "EvSel": 65, "Umask": "bx1xxxxxx", }, "iMC.POWER_THROTTLE_CYCLES.RANK3": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 3", "EvSel": 65, "Umask": "bxxxx1xxx", }, "iMC.POWER_THROTTLE_CYCLES.RANK4": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 4", "EvSel": 65, "Umask": "bxxx1xxxx", }, "iMC.POWER_THROTTLE_CYCLES.RANK1": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 1", "EvSel": 65, "Umask": "bxxxxxx1x", }, "iMC.POWER_THROTTLE_CYCLES.RANK0": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 65, "Umask": "bxxxxxxx1", }, "iMC.POWER_THROTTLE_CYCLES.RANK2": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 2", "EvSel": 65, "Umask": "bxxxxx1xx", }, "iMC.POWER_THROTTLE_CYCLES.RANK7": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 7", "EvSel": 65, "Umask": "b1xxxxxxx", }, "iMC.PREEMPTION": { "Box": "iMC", "Category": "iMC PREEMPTION Events", "Counters": "0-3", "Defn": "Counts the number of times a read in the iMC preempts another read or write. Generally reads to an open page are issued ahead of requests to closed pages. This improves the page hit rate of the system. However, high priority requests can cause pages of active requests to be closed in order to get them out. This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.", "Desc": "Read Preemption Count", "EvSel": 8, }, "iMC.PREEMPTION.RD_PREEMPT_WR": { "Box": "iMC", "Category": "iMC PREEMPTION Events", "Counters": "0-3", "Defn": "Counts the number of times a read in the iMC preempts another read or write. Generally reads to an open page are issued ahead of requests to closed pages. This improves the page hit rate of the system. However, high priority requests can cause pages of active requests to be closed in order to get them out. This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.", "Desc": "Read Preemption Count", "EvSel": 8, "Umask": "bxxxxxx1x", }, "iMC.PREEMPTION.RD_PREEMPT_RD": { "Box": "iMC", "Category": "iMC PREEMPTION Events", "Counters": "0-3", "Defn": "Counts the number of times a read in the iMC preempts another read or write. Generally reads to an open page are issued ahead of requests to closed pages. This improves the page hit rate of the system. However, high priority requests can cause pages of active requests to be closed in order to get them out. This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.", "Desc": "Read Preemption Count", "EvSel": 8, "Umask": "bxxxxxxx1", }, "iMC.PRE_COUNT": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Precharge commands sent on this channel.", "Desc": "DRAM Precharge commands.", "EvSel": 2, }, "iMC.PRE_COUNT.PAGE_CLOSE": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Precharge commands sent on this channel.", "Desc": "DRAM Precharge commands.", "EvSel": 2, "Umask": "bxxxxxx1x", }, "iMC.PRE_COUNT.PAGE_MISS": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Precharge commands sent on this channel.", "Desc": "DRAM Precharge commands.", "EvSel": 2, "Umask": "bxxxxxxx1", }, "iMC.RPQ_CYCLES_FULL": { "Box": "iMC", "Category": "iMC RPQ Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the Read Pending Queue is full. When the RPQ is full, the HA will not be able to issue any additional read requests into the iMC. This count should be similar count in the HA which tracks the number of cycles that the HA has no RPQ credits, just somewhat smaller to account for the credit return overhead. We generally do not expect to see RPQ become full except for potentially during Write Major Mode or while running with slow DRAM. This event only tracks non-ISOC queue entries.", "Desc": "Read Pending Queue Full Cycles", "EvSel": 18, }, "iMC.RPQ_CYCLES_NE": { "Box": "iMC", "Category": "iMC RPQ Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the Read Pending Queue is not empty. This can then be used to calculate the average occupancy (in conjunction with the Read Pending Queue Occupancy count). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This filter is to be used in conjunction with the occupancy filter so that one can correctly track the average occupancies for schedulable entries and scheduled requests.", "Desc": "Read Pending Queue Not Empty", "EvSel": 17, }, "iMC.RPQ_INSERTS": { "Box": "iMC", "Category": "iMC RPQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.", "Desc": "Read Pending Queue Allocations", "EvSel": 16, }, "iMC.RPQ_OCCUPANCY": { "Box": "iMC", "Category": "iMC RPQ Events", "Counters": "0-3", "Defn": "Accumulates the occupancies of the Read Pending Queue each cycle. This can then be used to calculate both the average occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory.", "Desc": "Read Pending Queue Occupancy", "EvSel": 128, "MaxIncCyc": 22, "SubCtr": 1, }, "iMC.WPQ_CYCLES_FULL": { "Box": "iMC", "Category": "iMC WPQ Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the Write Pending Queue is full. When the WPQ is full, the HA will not be able to issue any additional read requests into the iMC. This count should be similar count in the HA which tracks the number of cycles that the HA has no WPQ credits, just somewhat smaller to account for the credit return overhead.", "Desc": "Write Pending Queue Full Cycles", "EvSel": 34, }, "iMC.WPQ_CYCLES_NE": { "Box": "iMC", "Category": "iMC WPQ Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the Write Pending Queue is not empty. This can then be used to calculate the average queue occupancy (in conjunction with the WPQ Occupancy Accumulation count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have \"posted\" to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies.", "Desc": "Write Pending Queue Not Empty", "EvSel": 33, }, "iMC.WPQ_INSERTS": { "Box": "iMC", "Category": "iMC WPQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the Write Pending Queue. This can then be used to calculate the average queuing latency (in conjunction with the WPQ occupancy count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have \"posted\" to the iMC.", "Desc": "Write Pending Queue Allocations", "EvSel": 32, }, "iMC.WPQ_OCCUPANCY": { "Box": "iMC", "Category": "iMC WPQ Events", "Counters": "0-3", "Defn": "Accumulates the occupancies of the Write Pending Queue each cycle. This can then be used to calculate both the average queue occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have \"posted\" to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies. So, we provide filtering based on if the request has posted or not. By using the \"not posted\" filter, we can track how long writes spent in the iMC before completions were sent to the HA. The \"posted\" filter, on the other hand, provides information about how much queueing is actually happenning in the iMC for writes before they are actually issued to memory. High average occupancies will generally coincide with high write major mode counts.", "Desc": "Write Pending Queue Occupancy", "EvSel": 129, "MaxIncCyc": 32, "SubCtr": 1, }, "iMC.WPQ_READ_HIT": { "Box": "iMC", "Category": "iMC WPQ Events", "Counters": "0-3", "Defn": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.", "Desc": "Write Pending Queue CAM Match", "EvSel": 35, }, "iMC.WPQ_WRITE_HIT": { "Box": "iMC", "Category": "iMC WPQ Events", "Counters": "0-3", "Defn": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.", "Desc": "Write Pending Queue CAM Match", "EvSel": 36, }, # R2PCIe: "R2PCIe.CLOCKTICKS": { "Box": "R2PCIe", "Category": "R2PCIe UCLK Events", "Counters": "0-3", "Defn": "Counts the number of uclks in the R2PCIe uclk domain. This could be slightly different than the count in the Ubox because of enable/freeze delays. However, because the R2PCIe is close to the Ubox, they generally should not diverge by more than a handful of cycles.", "Desc": "Number of uclks in domain", "EvSel": 1, }, "R2PCIe.RING_AD_USED": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AD Ring in Use", "EvSel": 7, }, "R2PCIe.RING_AD_USED.CW_EVEN": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AD Ring in Use", "EvSel": 7, "Umask": "bxxxxxxx1", }, "R2PCIe.RING_AD_USED.CCW_EVEN": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AD Ring in Use", "EvSel": 7, "Umask": "bxxxxx1xx", }, "R2PCIe.RING_AD_USED.CW_ODD": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AD Ring in Use", "EvSel": 7, "Umask": "bxxxxxx1x", }, "R2PCIe.RING_AD_USED.CCW_ODD": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AD Ring in Use", "EvSel": 7, "Umask": "bxxxx1xxx", }, "R2PCIe.RING_AK_USED": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AK Ring in Use", "EvSel": 8, }, "R2PCIe.RING_AK_USED.CW_EVEN": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AK Ring in Use", "EvSel": 8, "Umask": "bxxxxxxx1", }, "R2PCIe.RING_AK_USED.CCW_EVEN": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AK Ring in Use", "EvSel": 8, "Umask": "bxxxxx1xx", }, "R2PCIe.RING_AK_USED.CW_ODD": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AK Ring in Use", "EvSel": 8, "Umask": "bxxxxxx1x", }, "R2PCIe.RING_AK_USED.CCW_ODD": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 AK Ring in Use", "EvSel": 8, "Umask": "bxxxx1xxx", }, "R2PCIe.RING_BL_USED": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 BL Ring in Use", "EvSel": 9, }, "R2PCIe.RING_BL_USED.CW_EVEN": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 BL Ring in Use", "EvSel": 9, "Umask": "bxxxxxxx1", }, "R2PCIe.RING_BL_USED.CCW_EVEN": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 BL Ring in Use", "EvSel": 9, "Umask": "bxxxxx1xx", }, "R2PCIe.RING_BL_USED.CW_ODD": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 BL Ring in Use", "EvSel": 9, "Umask": "bxxxxxx1x", }, "R2PCIe.RING_BL_USED.CCW_ODD": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.", "Desc": "R2 BL Ring in Use", "EvSel": 9, "Umask": "bxxxx1xxx", }, "R2PCIe.RING_IV_USED": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sunk into the ring stop. The IV ring is unidirectional. Whether UP or DN is used is dependent on the system programming. Thereofore, one should generally set both the UP and DN bits for a given polarity (or both) at a given time.", "Desc": "R2 IV Ring in Use", "EvSel": 10, }, "R2PCIe.RING_IV_USED.ANY": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sunk into the ring stop. The IV ring is unidirectional. Whether UP or DN is used is dependent on the system programming. Thereofore, one should generally set both the UP and DN bits for a given polarity (or both) at a given time.", "Desc": "R2 IV Ring in Use", "EvSel": 10, "Umask": "b00001111", }, "R2PCIe.RxR_AK_BOUNCES": { "Box": "R2PCIe", "Category": "R2PCIe INGRESS Events", "Counters": 0, "Defn": "Counts the number of times when a request destined for the AK ingress bounced.", "Desc": "AK Ingress Bounced", "EvSel": 18, }, "R2PCIe.RxR_CYCLES_NE": { "Box": "R2PCIe", "Category": "R2PCIe INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the R2PCIe Ingress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Cycles Not Empty", "EvSel": 16, }, "R2PCIe.RxR_CYCLES_NE.NCS": { "Box": "R2PCIe", "Category": "R2PCIe INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the R2PCIe Ingress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Cycles Not Empty", "EvSel": 16, "Umask": "bxx1xxxxx", }, "R2PCIe.RxR_CYCLES_NE.NCB": { "Box": "R2PCIe", "Category": "R2PCIe INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the R2PCIe Ingress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Cycles Not Empty", "EvSel": 16, "Umask": "bxxx1xxxx", }, "R2PCIe.RxR_CYCLES_NE.DRS": { "Box": "R2PCIe", "Category": "R2PCIe INGRESS Events", "Counters": "0-1", "Defn": "Counts the number of cycles when the R2PCIe Ingress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "Ingress Cycles Not Empty", "EvSel": 16, "Umask": "bxxxx1xxx", }, "R2PCIe.TxR_CYCLES_FULL": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": 0, "Defn": "Counts the number of cycles when the R2PCIe Egress buffer is full.", "Desc": "Egress Cycles Full", "EvSel": 37, }, "R2PCIe.TxR_CYCLES_FULL.AK": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": 0, "Defn": "Counts the number of cycles when the R2PCIe Egress buffer is full.", "Desc": "Egress Cycles Full", "EvSel": 37, "Umask": "bxxxxxx1x", }, "R2PCIe.TxR_CYCLES_FULL.BL": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": 0, "Defn": "Counts the number of cycles when the R2PCIe Egress buffer is full.", "Desc": "Egress Cycles Full", "EvSel": 37, "Umask": "bxxxxx1xx", }, "R2PCIe.TxR_CYCLES_FULL.AD": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": 0, "Defn": "Counts the number of cycles when the R2PCIe Egress buffer is full.", "Desc": "Egress Cycles Full", "EvSel": 37, "Umask": "bxxxxxxx1", }, "R2PCIe.TxR_CYCLES_NE": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": 0, "Defn": "Counts the number of cycles when the R2PCIe Egress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "Egress Cycles Not Empty", "EvSel": 35, }, "R2PCIe.TxR_CYCLES_NE.AK": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": 0, "Defn": "Counts the number of cycles when the R2PCIe Egress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "Egress Cycles Not Empty", "EvSel": 35, "Umask": "bxxxxxx1x", }, "R2PCIe.TxR_CYCLES_NE.BL": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": 0, "Defn": "Counts the number of cycles when the R2PCIe Egress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "Egress Cycles Not Empty", "EvSel": 35, "Umask": "bxxxxx1xx", }, "R2PCIe.TxR_CYCLES_NE.AD": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": 0, "Defn": "Counts the number of cycles when the R2PCIe Egress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "Egress Cycles Not Empty", "EvSel": 35, "Umask": "bxxxxxxx1", }, "R2PCIe.TxR_INSERTS": { "Box": "R2PCIe", "Category": "R2PCIe EGRESS Events", "Counters": 0, "Defn": "Counts the number of allocations into the R2PCIe Egress. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue latency. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "Egress Allocations", "EvSel": 36, }, # PCU: "PCU.CLOCKTICKS": { "Box": "PCU", "Category": "PCU PCLK Events", "Counters": "0-3", "Defn": "The PCU runs off a fixed 800 MHz clock. This event counts the number of pclk cycles measured while the counter was enabled. The pclk, like the Memory Controller's dclk, counts at a constant rate making it a good measure of actual wall time.", "Desc": "pclk Cycles", "EvSel": 0, }, "PCU.CORE0_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "EvSel": 3, "ExtSel": 1, "Notes": "This only tracks the hardware portion in the RCFSM (CFCFSM). This portion is just doing the core C state transition. It does not include any necessary frequency/voltage transitions.", }, "PCU.CORE1_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "ExtSel": 1, "EvSel": 4, }, "PCU.CORE2_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "ExtSel": 1, "EvSel": 5, }, "PCU.CORE3_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "ExtSel": 1, "EvSel": 6, }, "PCU.CORE4_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "ExtSel": 1, "EvSel": 7, }, "PCU.CORE5_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "ExtSel": 1, "EvSel": 8, }, "PCU.CORE6_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "ExtSel": 1, "EvSel": 9, }, "PCU.CORE7_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions. There is one event per core.", "Desc": "Core C State Transition Cycles", "ExtSel": 1, "EvSel": 10, }, "PCU.DEMOTIONS_CORE0": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "ExtSel": 1, "EvSel": 30, "Filter": "PCUFilter[7:0]", }, "PCU.DEMOTIONS_CORE1": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 31, "Filter": "PCUFilter[7:0]", }, "PCU.DEMOTIONS_CORE2": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 32, }, "PCU.DEMOTIONS_CORE3": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 33, "Filter": "PCUFilter[7:0]", }, "PCU.DEMOTIONS_CORE4": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 34, "Filter": "PCUFilter[7:0]", }, "PCU.DEMOTIONS_CORE5": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 35, "Filter": "PCUFilter[7:0]", }, "PCU.DEMOTIONS_CORE6": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 36, "Filter": "PCUFilter[7:0]", }, "PCU.DEMOTIONS_CORE7": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Counts the number of times when a configurable cores had a C-state demotion", "Desc": "Core C State Demotions", "EvSel": 37, "Filter": "PCUFilter[7:0]", }, "PCU.FREQ_BAND0_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_RESIDENCY Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.", "Desc": "Frequency Residency", "EvSel": 11, "Filter": "PCUFilter[7:0]", "Notes": "The PMON control registers in the PCU only update on a frequency transition. Changing the measuring threshold during a sample interval may introduce errors in the counts. This is especially true when running at a constant frequency for an extended period of time. There is a corner case here: we set this code on the GV transition. So, if we never GV we will never call this code. This event does not include transition times. It is handled on fast path.", }, "PCU.FREQ_BAND1_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_RESIDENCY Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.", "Desc": "Frequency Residency", "EvSel": 12, "Filter": "PCUFilter[15:8]", "Notes": "The PMON control registers in the PCU only update on a frequency transition. Changing the measuring threshold during a sample interval may introduce errors in the counts. This is especially true when running at a constant frequency for an extended period of time. There is a corner case here: we set this code on the GV transition. So, if we never GV we will never call this code. This event does not include transition times. It is handled on fast path.", }, "PCU.FREQ_BAND2_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_RESIDENCY Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.", "Desc": "Frequency Residency", "EvSel": 13, "Filter": "PCUFilter[23:16]", "Notes": "The PMON control registers in the PCU only update on a frequency transition. Changing the measuring threshold during a sample interval may introduce errors in the counts. This is especially true when running at a constant frequency for an extended period of time. There is a corner case here: we set this code on the GV transition. So, if we never GV we will never call this code. This event does not include transition times. It is handled on fast path.", }, "PCU.FREQ_BAND3_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_RESIDENCY Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.", "Desc": "Frequency Residency", "EvSel": 14, "Filter": "PCUFilter[31:24]", "Notes": "The PMON control registers in the PCU only update on a frequency transition. Changing the measuring threshold during a sample interval may introduce errors in the counts. This is especially true when running at a constant frequency for an extended period of time. There is a corner case here: we set this code on the GV transition. So, if we never GV we will never call this code. This event does not include transition times. It is handled on fast path.", }, "PCU.FREQ_MAX_CURRENT_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_MAX_LIMIT Events", "Counters": "0-3", "Defn": "Counts the number of cycles when current is the upper limit on frequency.", "Desc": "Current Strongest Upper Limit Cycles", "EvSel": 7, "Notes": "This is fast path, will clear our other limits when it happens. The slow loop portion, which covers the other limits, can double count EDP. Clearing should fix this up in the next fast path event, but this will happen. Add up all the cycles and it won't make sense, but the general distribution is true.", }, "PCU.FREQ_MAX_LIMIT_THERMAL_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_MAX_LIMIT Events", "Counters": "0-3", "Defn": "Counts the number of cycles when thermal conditions are the upper limit on frequency. This is related to the THERMAL_THROTTLE CYCLES_ABOVE_TEMP event, which always counts cycles when we are above the thermal temperature. This event (STRONGEST_UPPER_LIMIT) is sampled at the output of the algorithm that determines the actual frequency, while THERMAL_THROTTLE looks at the input.", "Desc": "Thermal Strongest Upper Limit Cycles", "EvSel": 4, }, "PCU.FREQ_MAX_OS_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_MAX_LIMIT Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the OS is the upper limit on frequency.", "Desc": "OS Strongest Upper Limit Cycles", "EvSel": 6, "Notes": "Essentially, this event says the OS is getting the frequency it requested.", }, "PCU.FREQ_MAX_POWER_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_MAX_LIMIT Events", "Counters": "0-3", "Defn": "Counts the number of cycles when power is the upper limit on frequency.", "Desc": "Power Strongest Upper Limit Cycles", "EvSel": 5, }, "PCU.FREQ_MIN_IO_P_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_MIN_LIMIT Events", "Counters": "0-3", "Defn": "Counts the number of cycles when IO P Limit is preventing us from dropping the frequency lower. This algorithm monitors the needs to the IO subsystem on both local and remote sockets and will maintain a frequency high enough to maintain good IO BW. This is necessary for when all the IA cores on a socket are idle but a user still would like to maintain high IO Bandwidth.", "Desc": "IO P Limit Strongest Lower Limit Cycles", "ExtSel": 1, "EvSel": 1, }, "PCU.FREQ_MIN_PERF_P_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_MIN_LIMIT Events", "Counters": "0-3", "Defn": "Counts the number of cycles when Perf P Limit is preventing us from dropping the frequency lower. Perf P Limit is an algorithm that takes input from remote sockets when determining if a socket should drop it's frequency down. This is largely to minimize increases in snoop and remote read latencies.", "Desc": "Perf P Limit Strongest Lower Limit Cycles", "ExtSel": 1, "EvSel": 2, }, "PCU.FREQ_TRANS_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_TRANS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the system is changing frequency. This can not be filtered by thread ID. One can also use it with the occupancy counter that monitors number of threads in C0 to estimate the performance impact that frequency transitions had on the system.", "Desc": "Cycles spent changing Frequency", "ExtSel": 1, "EvSel": 0, }, "PCU.MEMORY_PHASE_SHEDDING_CYCLES": { "Box": "PCU", "Category": "PCU MEMORY_PHASE_SHEDDING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the PCU has triggered memory phase shedding. This is a mode that can be run in the iMC physicals that saves power at the expense of additional latency.", "Desc": "Memory Phase Shedding Cycles", "EvSel": 47, "Notes": "Is this the package C one? Yes", }, "PCU.POWER_STATE_OCCUPANCY": { "Box": "PCU", "Category": "PCU POWER_STATE_OCC Events", "Counters": "0-3", "Defn": "This is an occupancy event that tracks the number of cores that are in C0. It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", "Desc": "Number of cores in C0", "EvSel": 128, "MaxIncCyc": 8, "SubCtr": 1, }, "PCU.POWER_STATE_OCCUPANCY.CORES_C3": { "Box": "PCU", "Category": "PCU POWER_STATE_OCC Events", "Counters": "0-3", "Defn": "This is an occupancy event that tracks the number of cores that are in C3. It can be used by itself to get the average number of cores in C3, with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", "Desc": "Number of cores in C3", "EvSel": 128, "MaxIncCyc": 8, "SubCtr": 1, "Umask": "b10000000", }, "PCU.POWER_STATE_OCCUPANCY.CORES_C0": { "Box": "PCU", "Category": "PCU POWER_STATE_OCC Events", "Counters": "0-3", "Defn": "This is an occupancy event that tracks the number of cores that are in C0. It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", "Desc": "Number of cores in C0", "EvSel": 128, "MaxIncCyc": 8, "SubCtr": 1, "Umask": "b01000000", }, "PCU.POWER_STATE_OCCUPANCY.CORES_C6": { "Box": "PCU", "Category": "PCU POWER_STATE_OCC Events", "Counters": "0-3", "Defn": "This is an occupancy event that tracks the number of cores that are in C6. It can be used by itself to get the average number of cores in C6, with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.", "Desc": "Number of cores in C6", "EvSel": 128, "MaxIncCyc": 8, "SubCtr": 1, "Umask": "b11000000", }, "PCU.PROCHOT_EXTERNAL_CYCLES": { "Box": "PCU", "Category": "PCU PROCHOT Events", "Counters": "0-3", "Defn": "Counts the number of cycles that we are in external PROCHOT mode. This mode is triggered when a sensor off the die determines that something off-die (like DRAM) is too hot and must throttle to avoid damaging the chip.", "Desc": "External Prochot", "EvSel": 10, }, "PCU.PROCHOT_INTERNAL_CYCLES": { "Box": "PCU", "Category": "PCU PROCHOT Events", "Counters": "0-3", "Defn": "Counts the number of cycles that we are in Interal PROCHOT mode. This mode is triggered when a sensor on the die determines that we are too hot and must throttle to avoid damaging the chip.", "Desc": "ExtSel Prochot", "EvSel": 9, }, "PCU.TOTAL_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions across all cores.", "Desc": "Total Core C State Transition Cycles", "ExtSel": 1, "EvSel": 11, }, "PCU.VOLT_TRANS_CYCLES_CHANGE": { "Box": "PCU", "Category": "PCU VOLT_TRANS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the system is changing voltage. There is no filtering supported with this event. One can use it as a simple event, or use it conjunction with the occupancy events to monitor the number of cores or threads that were impacted by the transition. This event is calculated by or'ing together the increasing and decreasing events.", "Desc": "Cycles Changing Voltage", "EvSel": 3, }, "PCU.VOLT_TRANS_CYCLES_DECREASE": { "Box": "PCU", "Category": "PCU VOLT_TRANS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the system is decreasing voltage. There is no filtering supported with this event. One can use it as a simple event, or use it conjunction with the occupancy events to monitor the number of cores or threads that were impacted by the transition.", "Desc": "Cycles Decreasing Voltage", "EvSel": 2, }, "PCU.VOLT_TRANS_CYCLES_INCREASE": { "Box": "PCU", "Category": "PCU VOLT_TRANS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the system is increasing voltage. There is no filtering supported with this event. One can use it as a simple event, or use it conjunction with the occupancy events to monitor the number of cores or threads that were impacted by the transition.", "Desc": "Cycles Increasing Voltage", "EvSel": 1, }, "PCU.VR_HOT_CYCLES": { "Box": "PCU", "Category": "PCU VR_HOT Events", "Counters": "0-3", "Desc": "VR Hot", "EvSel": 50, }, # QPI_LL: "QPI_LL.CLOCKTICKS": { "Box": "QPI_LL", "Category": "QPI_LL CFCLK Events", "Counters": "0-3", "Defn": "Counts the number of clocks in the QPI LL. This clock runs at 1/8th the \"GT/s\" speed of the QPI link. For example, a 8GT/s link will have qfclk or 1GHz. JKT does not support dynamic link speeds, so this frequency is fixed.", "Desc": "Number of qfclks", "EvSel": 20, }, "QPI_LL.CTO_COUNT": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Counters": "0-3", "Defn": "Counts the number of CTO (cluster trigger outs) events that were asserted across the two slots. If both slots trigger in a given cycle, the event will increment by 2. You can use edge detect to count the number of cases when both events triggered.", "Desc": "Count of CTO Events", "ExtSel": 1, "EvSel": 56, "MaxIncCyc": 2, "SubCtr": 1, }, "QPI_LL.DIRECT2CORE": { "Box": "QPI_LL", "Category": "QPI_LL DIRECT2CORE Events", "Counters": "0-3", "Defn": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.", "Desc": "Direct 2 Core Spawning", "EvSel": 19, }, "QPI_LL.DIRECT2CORE.FAILURE_RBT": { "Box": "QPI_LL", "Category": "QPI_LL DIRECT2CORE Events", "Counters": "0-3", "Defn": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.", "Desc": "Direct 2 Core Spawning", "EvSel": 19, "Umask": "bxxxxx1xx", }, "QPI_LL.DIRECT2CORE.SUCCESS": { "Box": "QPI_LL", "Category": "QPI_LL DIRECT2CORE Events", "Counters": "0-3", "Defn": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.", "Desc": "Direct 2 Core Spawning", "EvSel": 19, "Umask": "bxxxxxxx1", }, "QPI_LL.DIRECT2CORE.FAILURE_CREDITS_RBT": { "Box": "QPI_LL", "Category": "QPI_LL DIRECT2CORE Events", "Counters": "0-3", "Defn": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.", "Desc": "Direct 2 Core Spawning", "EvSel": 19, "Umask": "bxxxx1xxx", }, "QPI_LL.DIRECT2CORE.FAILURE_CREDITS": { "Box": "QPI_LL", "Category": "QPI_LL DIRECT2CORE Events", "Counters": "0-3", "Defn": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.", "Desc": "Direct 2 Core Spawning", "EvSel": 19, "Umask": "bxxxxxx1x", }, "QPI_LL.L1_POWER_CYCLES": { "Box": "QPI_LL", "Category": "QPI_LL POWER Events", "Counters": "0-3", "Defn": "Number of QPI qfclk cycles spent in L1 power mode. L1 is a mode that totally shuts down a QPI link. Use edge detect to count the number of instances when the QPI link entered L1. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. Because L1 totally shuts down the link, it takes a good amount of time to exit this mode.", "Desc": "Cycles in L1", "EvSel": 18, }, "QPI_LL.RxL0P_POWER_CYCLES": { "Box": "QPI_LL", "Category": "QPI_LL POWER_RX Events", "Counters": "0-3", "Defn": "Number of QPI qfclk cycles spent in L0p power mode. L0p is a mode where we disable 1/2 of the QPI lanes, decreasing our bandwidth in order to save power. It increases snoop and data transfer latencies and decreases overall bandwidth. This mode can be very useful in NUMA optimized workloads that largely only utilize QPI for snoops and their responses. Use edge detect to count the number of instances when the QPI link entered L0p. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another.", "Desc": "Cycles in L0p", "EvSel": 16, "Notes": "Using .edge_det to count transitions does not function if L1_POWER_CYCLES > 0.", }, "QPI_LL.RxL0_POWER_CYCLES": { "Box": "QPI_LL", "Category": "QPI_LL POWER_RX Events", "Counters": "0-3", "Defn": "Number of QPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.", "Desc": "Cycles in L0", "EvSel": 15, }, "QPI_LL.RxL_BYPASSED": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Counts the number of times that an incoming flit was able to bypass the flit buffer and pass directly across the BGF and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of flits transfered, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.", "Desc": "Rx Flit Buffer Bypassed", "EvSel": 9, }, "QPI_LL.RxL_CREDITS_CONSUMED_VN0": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN0 Credit Consumed", "EvSel": 30, "ExtSel": 1, }, "QPI_LL.RxL_CREDITS_CONSUMED_VN0.NCS": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN0 Credit Consumed", "EvSel": 30, "Umask": "bxxxxx1xx", "ExtSel": 1, }, "QPI_LL.RxL_CREDITS_CONSUMED_VN0.NCB": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN0 Credit Consumed", "EvSel": 30, "Umask": "bxxxxxx1x", "ExtSel": 1, }, "QPI_LL.RxL_CREDITS_CONSUMED_VN0.SNP": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN0 Credit Consumed", "EvSel": 30, "Umask": "bxxx1xxxx", "ExtSel": 1, }, "QPI_LL.RxL_CREDITS_CONSUMED_VN0.HOM": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN0 Credit Consumed", "EvSel": 30, "Umask": "bxxxx1xxx", "ExtSel": 1, }, "QPI_LL.RxL_CREDITS_CONSUMED_VN0.DRS": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN0 Credit Consumed", "EvSel": 30, "Umask": "bxxxxxxx1", "ExtSel": 1, }, "QPI_LL.RxL_CREDITS_CONSUMED_VN0.NDR": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN0 Credit Consumed", "EvSel": 30, "Umask": "bxx1xxxxx", "ExtSel": 1, }, "QPI_LL.RxL_CREDITS_CONSUMED_VNA": { "Box": "QPI_LL", "Category": "QPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VNA credit was consumed (i.e. message uses a VNA credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VNA Credit Consumed", "EvSel": 29, "ExtSel": 1, }, "QPI_LL.RxL_CYCLES_NE": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy.", "Desc": "RxQ Cycles Not Empty", "EvSel": 10, }, "QPI_LL.RxL_FLITS_G0": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.", "Desc": "Flits Received - Group 0", "EvSel": 1, "MaxIncCyc": 2, }, "QPI_LL.RxL_FLITS_G0.NON_DATA": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.", "Desc": "Flits Received - Group 0", "EvSel": 1, "MaxIncCyc": 2, "Umask": "bxxxxx1xx", }, "QPI_LL.RxL_FLITS_G0.DATA": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.", "Desc": "Flits Received - Group 0", "EvSel": 1, "MaxIncCyc": 2, "Umask": "bxxxxxx1x", }, "QPI_LL.RxL_FLITS_G0.IDLE": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.", "Desc": "Flits Received - Group 0", "EvSel": 1, "MaxIncCyc": 2, "Umask": "bxxxxxxx1", }, "QPI_LL.RxL_FLITS_G1": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 1", "EvSel": 2, "MaxIncCyc": 2, }, "QPI_LL.RxL_FLITS_G1.DRS_DATA": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 1", "EvSel": 2, "MaxIncCyc": 2, "Umask": "bxxxx1xxx", "ExtSel": 1, }, "QPI_LL.RxL_FLITS_G1.HOM_NONREQ": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 1", "EvSel": 2, "MaxIncCyc": 2, "Umask": "bxxxxx1xx", "ExtSel": 1, }, "QPI_LL.RxL_FLITS_G1.HOM_REQ": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 1", "EvSel": 2, "MaxIncCyc": 2, "Umask": "bxxxxxx1x", "ExtSel": 1, }, "QPI_LL.RxL_FLITS_G1.DRS": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 1", "EvSel": 2, "MaxIncCyc": 2, "Umask": "b00011000", "ExtSel": 1, }, "QPI_LL.RxL_FLITS_G1.HOM": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 1", "EvSel": 2, "MaxIncCyc": 2, "Umask": "b00000110", "ExtSel": 1, }, "QPI_LL.RxL_FLITS_G1.SNP": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 1", "EvSel": 2, "MaxIncCyc": 2, "Umask": "bxxxxxxx1", "ExtSel": 1, }, "QPI_LL.RxL_FLITS_G1.DRS_NONDATA": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 1", "EvSel": 2, "MaxIncCyc": 2, "Umask": "bxxx1xxxx", "ExtSel": 1, }, "QPI_LL.RxL_FLITS_G2": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 2", "EvSel": 3, "MaxIncCyc": 2, "ExtSel": 1, }, "QPI_LL.RxL_FLITS_G2.NCS": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 2", "EvSel": 3, "MaxIncCyc": 2, "Umask": "bxxx1xxxx", "ExtSel": 1, }, "QPI_LL.RxL_FLITS_G2.NCB": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 2", "EvSel": 3, "MaxIncCyc": 2, "Umask": "b00001100", "ExtSel": 1, }, "QPI_LL.RxL_FLITS_G2.NDR_AD": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 2", "EvSel": 3, "MaxIncCyc": 2, "Umask": "bxxxxxxx1", "ExtSel": 1, }, "QPI_LL.RxL_FLITS_G2.NCB_NONDATA": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 2", "EvSel": 3, "MaxIncCyc": 2, "Umask": "bxxxx1xxx", "ExtSel": 1, }, "QPI_LL.RxL_FLITS_G2.NDR_AK": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 2", "EvSel": 3, "MaxIncCyc": 2, "Umask": "bxxxxxx1x", "ExtSel": 1, }, "QPI_LL.RxL_FLITS_G2.NCB_DATA": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Counters": "0-3", "Defn": "Counts the number of flits received from the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Received - Group 2", "EvSel": 3, "MaxIncCyc": 2, "Umask": "bxxxxx1xx", "ExtSel": 1, }, "QPI_LL.RxL_INSERTS": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.", "Desc": "Rx Flit Buffer Allocations", "EvSel": 8, }, "QPI_LL.RxL_INSERTS_DRS": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only DRS flits.", "Desc": "Rx Flit Buffer Allocations - DRS", "EvSel": 9, "ExtSel": 1, }, "QPI_LL.RxL_INSERTS_HOM": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only HOM flits.", "Desc": "Rx Flit Buffer Allocations - HOM", "EvSel": 12, "ExtSel": 1, }, "QPI_LL.RxL_INSERTS_NCB": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCB flits.", "Desc": "Rx Flit Buffer Allocations - NCB", "EvSel": 10, "ExtSel": 1, }, "QPI_LL.RxL_INSERTS_NCS": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCS flits.", "Desc": "Rx Flit Buffer Allocations - NCS", "EvSel": 11, "ExtSel": 1, }, "QPI_LL.RxL_INSERTS_NDR": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NDR flits.", "Desc": "Rx Flit Buffer Allocations - NDR", "EvSel": 14, "ExtSel": 1, }, "QPI_LL.RxL_INSERTS_SNP": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only SNP flits.", "Desc": "Rx Flit Buffer Allocations - SNP", "EvSel": 13, "ExtSel": 1, }, "QPI_LL.RxL_OCCUPANCY": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.", "Desc": "RxQ Occupancy - All Packets", "EvSel": 11, "MaxIncCyc": 128, "SubCtr": 1, }, "QPI_LL.RxL_OCCUPANCY_DRS": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors DRS flits only.", "Desc": "RxQ Occupancy - DRS", "EvSel": 21, "MaxIncCyc": 128, "SubCtr": 1, "ExtSel": 1, }, "QPI_LL.RxL_OCCUPANCY_HOM": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors HOM flits only.", "Desc": "RxQ Occupancy - HOM", "EvSel": 24, "MaxIncCyc": 128, "SubCtr": 1, "ExtSel": 1, }, "QPI_LL.RxL_OCCUPANCY_NCB": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCB flits only.", "Desc": "RxQ Occupancy - NCB", "EvSel": 22, "MaxIncCyc": 128, "SubCtr": 1, "ExtSel": 1, }, "QPI_LL.RxL_OCCUPANCY_NCS": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCS flits only.", "Desc": "RxQ Occupancy - NCS", "EvSel": 23, "MaxIncCyc": 128, "SubCtr": 1, "ExtSel": 1, }, "QPI_LL.RxL_OCCUPANCY_NDR": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NDR flits only.", "Desc": "RxQ Occupancy - NDR", "EvSel": 26, "MaxIncCyc": 128, "SubCtr": 1, "ExtSel": 1, }, "QPI_LL.RxL_OCCUPANCY_SNP": { "Box": "QPI_LL", "Category": "QPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors SNP flits only.", "Desc": "RxQ Occupancy - SNP", "EvSel": 25, "MaxIncCyc": 128, "SubCtr": 1, "ExtSel": 1, }, "QPI_LL.TxL0P_POWER_CYCLES": { "Box": "QPI_LL", "Category": "QPI_LL POWER_TX Events", "Counters": "0-3", "Defn": "Number of QPI qfclk cycles spent in L0p power mode. L0p is a mode where we disable 1/2 of the QPI lanes, decreasing our bandwidth in order to save power. It increases snoop and data transfer latencies and decreases overall bandwidth. This mode can be very useful in NUMA optimized workloads that largely only utilize QPI for snoops and their responses. Use edge detect to count the number of instances when the QPI link entered L0p. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another.", "Desc": "Cycles in L0p", "EvSel": 13, "Notes": "Using .edge_det to count transitions does not function if L1_POWER_CYCLES > 0.", }, "QPI_LL.TxL0_POWER_CYCLES": { "Box": "QPI_LL", "Category": "QPI_LL POWER_TX Events", "Counters": "0-3", "Defn": "Number of QPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.", "Desc": "Cycles in L0", "EvSel": 12, }, "QPI_LL.TxL_BYPASSED": { "Box": "QPI_LL", "Category": "QPI_LL TXQ Events", "Counters": "0-3", "Defn": "Counts the number of times that an incoming flit was able to bypass the Tx flit buffer and pass directly out the QPI Link. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link.", "Desc": "Tx Flit Buffer Bypassed", "EvSel": 5, }, "QPI_LL.TxL_CYCLES_NE": { "Box": "QPI_LL", "Category": "QPI_LL TXQ Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the TxQ is not empty. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link.", "Desc": "Tx Flit Buffer Cycles not Empty", "EvSel": 6, }, "QPI_LL.TxL_FLITS_G0": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.", "Desc": "Flits Transferred - Group 0", "EvSel": 0, "MaxIncCyc": 2, }, "QPI_LL.TxL_FLITS_G0.NON_DATA": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.", "Desc": "Flits Transferred - Group 0", "EvSel": 0, "MaxIncCyc": 2, "Umask": "bxxxxx1xx", }, "QPI_LL.TxL_FLITS_G0.DATA": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.", "Desc": "Flits Transferred - Group 0", "EvSel": 0, "MaxIncCyc": 2, "Umask": "bxxxxxx1x", }, "QPI_LL.TxL_FLITS_G0.IDLE": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.", "Desc": "Flits Transferred - Group 0", "EvSel": 0, "MaxIncCyc": 2, "Umask": "bxxxxxxx1", }, "QPI_LL.TxL_FLITS_G1": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits trasmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 1", "EvSel": 0, "MaxIncCyc": 2, "ExtSel": 1, }, "QPI_LL.TxL_FLITS_G1.DRS_DATA": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits trasmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 1", "EvSel": 0, "MaxIncCyc": 2, "Umask": "bxxxx1xxx", "ExtSel": 1, }, "QPI_LL.TxL_FLITS_G1.HOM_NONREQ": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits trasmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 1", "EvSel": 0, "MaxIncCyc": 2, "Umask": "bxxxxx1xx", "ExtSel": 1, }, "QPI_LL.TxL_FLITS_G1.HOM_REQ": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits trasmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 1", "EvSel": 0, "MaxIncCyc": 2, "Umask": "bxxxxxx1x", "ExtSel": 1, }, "QPI_LL.TxL_FLITS_G1.DRS": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits trasmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 1", "EvSel": 0, "MaxIncCyc": 2, "Umask": "b00011000", "ExtSel": 1, }, "QPI_LL.TxL_FLITS_G1.HOM": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits trasmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 1", "EvSel": 0, "MaxIncCyc": 2, "Umask": "b00000110", "ExtSel": 1, }, "QPI_LL.TxL_FLITS_G1.SNP": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits trasmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 1", "EvSel": 0, "MaxIncCyc": 2, "Umask": "bxxxxxxx1", "ExtSel": 1, }, "QPI_LL.TxL_FLITS_G1.DRS_NONDATA": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits trasmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 1", "EvSel": 0, "MaxIncCyc": 2, "Umask": "bxxx1xxxx", "ExtSel": 1, }, "QPI_LL.TxL_FLITS_G2": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits trasmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 2", "EvSel": 1, "MaxIncCyc": 2, "ExtSel": 1, }, "QPI_LL.TxL_FLITS_G2.NCS": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits trasmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 2", "EvSel": 1, "MaxIncCyc": 2, "Umask": "bxxx1xxxx", "ExtSel": 1, }, "QPI_LL.TxL_FLITS_G2.NCB": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits trasmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 2", "EvSel": 1, "MaxIncCyc": 2, "Umask": "b00001100", "ExtSel": 1, }, "QPI_LL.TxL_FLITS_G2.NDR_AD": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits trasmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 2", "EvSel": 1, "MaxIncCyc": 2, "Umask": "bxxxxxxx1", "ExtSel": 1, }, "QPI_LL.TxL_FLITS_G2.NCB_NONDATA": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits trasmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 2", "EvSel": 1, "MaxIncCyc": 2, "Umask": "bxxxx1xxx", "ExtSel": 1, }, "QPI_LL.TxL_FLITS_G2.NDR_AK": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits trasmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 2", "EvSel": 1, "MaxIncCyc": 2, "Umask": "bxxxxxx1x", "ExtSel": 1, }, "QPI_LL.TxL_FLITS_G2.NCB_DATA": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_TX Events", "Counters": "0-3", "Defn": "Counts the number of flits trasmitted across the QPI Link. This is one of three \"groups\" that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each \"flit\" is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\". Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as \"data\" bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information. To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.", "Desc": "Flits Transferred - Group 2", "EvSel": 1, "MaxIncCyc": 2, "Umask": "bxxxxx1xx", "ExtSel": 1, }, "QPI_LL.TxL_INSERTS": { "Box": "QPI_LL", "Category": "QPI_LL TXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the QPI Tx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.", "Desc": "Tx Flit Buffer Allocations", "EvSel": 4, }, "QPI_LL.TxL_OCCUPANCY": { "Box": "QPI_LL", "Category": "QPI_LL TXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of flits in the TxQ. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This can be used with the cycles not empty event to track average occupancy, or the allocations event to track average lifetime in the TxQ.", "Desc": "Tx Flit Buffer Occupancy", "EvSel": 7, }, "QPI_LL.VNA_CREDIT_RETURNS": { "Box": "QPI_LL", "Category": "QPI_LL VNA_CREDIT_RETURN Events", "Counters": "0-3", "Defn": "Number of VNA credits returned.", "Desc": "VNA Credits Returned", "EvSel": 28, "ExtSel": 1, }, "QPI_LL.VNA_CREDIT_RETURN_OCCUPANCY": { "Box": "QPI_LL", "Category": "QPI_LL VNA_CREDIT_RETURN Events", "Counters": "0-3", "Defn": "Number of VNA credits in the Rx side that are waitng to be returned back across the link.", "Desc": "VNA Credits Pending Return - Occupancy", "EvSel": 27, "MaxIncCyc": 128, "SubCtr": 1, "ExtSel": 1, }, # UBOX: "UBOX.EVENT_MSG": { "Box": "UBOX", "Category": "UBOX EVENT_MSG Events", "Counters": "0-1", "Defn": "Virtual Logical Wire (legacy) message were received from Uncore. Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.", "Desc": "VLW Received", "EvSel": 66, }, "UBOX.EVENT_MSG.DOORBELL_RCVD": { "Box": "UBOX", "Category": "UBOX EVENT_MSG Events", "Counters": "0-1", "Defn": "Virtual Logical Wire (legacy) message were received from Uncore. Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.", "Desc": "VLW Received", "EvSel": 66, "Umask": "bxxxx1xxx", }, "UBOX.EVENT_MSG.IPI_RCVD": { "Box": "UBOX", "Category": "UBOX EVENT_MSG Events", "Counters": "0-1", "Defn": "Virtual Logical Wire (legacy) message were received from Uncore. Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.", "Desc": "VLW Received", "EvSel": 66, "Umask": "bxxxxx1xx", }, "UBOX.EVENT_MSG.INT_PRIO": { "Box": "UBOX", "Category": "UBOX EVENT_MSG Events", "Counters": "0-1", "Defn": "Virtual Logical Wire (legacy) message were received from Uncore. Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.", "Desc": "VLW Received", "EvSel": 66, "Umask": "bxxx1xxxx", }, "UBOX.EVENT_MSG.VLW_RCVD": { "Box": "UBOX", "Category": "UBOX EVENT_MSG Events", "Counters": "0-1", "Defn": "Virtual Logical Wire (legacy) message were received from Uncore. Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.", "Desc": "VLW Received", "EvSel": 66, "Umask": "bxxxxxxx1", }, "UBOX.EVENT_MSG.MSI_RCVD": { "Box": "UBOX", "Category": "UBOX EVENT_MSG Events", "Counters": "0-1", "Defn": "Virtual Logical Wire (legacy) message were received from Uncore. Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.", "Desc": "VLW Received", "EvSel": 66, "Umask": "bxxxxxx1x", }, "UBOX.FILTER_MATCH": { "Box": "UBOX", "Category": "UBOX FILTER_MATCH Events", "Counters": "0-1", "Defn": "Filter match per thread (w/ or w/o Filter Enable). Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.", "Desc": "Filter Match", "EvSel": 65, }, "UBOX.FILTER_MATCH.U2C_ENABLE": { "Box": "UBOX", "Category": "UBOX FILTER_MATCH Events", "Counters": "0-1", "Defn": "Filter match per thread (w/ or w/o Filter Enable). Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.", "Desc": "Filter Match", "EvSel": 65, "Umask": "bxxxxx1xx", }, "UBOX.FILTER_MATCH.U2C_DISABLE": { "Box": "UBOX", "Category": "UBOX FILTER_MATCH Events", "Counters": "0-1", "Defn": "Filter match per thread (w/ or w/o Filter Enable). Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.", "Desc": "Filter Match", "EvSel": 65, "Umask": "bxxxx1xxx", }, "UBOX.FILTER_MATCH.DISABLE": { "Box": "UBOX", "Category": "UBOX FILTER_MATCH Events", "Counters": "0-1", "Defn": "Filter match per thread (w/ or w/o Filter Enable). Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.", "Desc": "Filter Match", "EvSel": 65, "Umask": "bxxxxxx1x", }, "UBOX.FILTER_MATCH.ENABLE": { "Box": "UBOX", "Category": "UBOX FILTER_MATCH Events", "Counters": "0-1", "Defn": "Filter match per thread (w/ or w/o Filter Enable). Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.", "Desc": "Filter Match", "EvSel": 65, "Umask": "bxxxxxxx1", }, "UBOX.LOCK_CYCLES": { "Box": "UBOX", "Category": "UBOX LOCK Events", "Counters": "0-1", "Defn": "Number of times an IDI Lock/SplitLock sequence was started", "Desc": "IDI Lock/SplitLock Cycles", "EvSel": 68, }, } derived = { # HA: "HA.PCT_CYCLES_BL_FULL": { "Box": "HA", "Category": "HA BL_EGRESS Events", "Defn": "Percentage of time the BL Egress Queue is full", "Desc": "Percent BL Egress Full", "Equation": "TxR_BL_CYCLES_FULL.ALL / SAMPLE_INTERVAL", "Obscure": 1, }, "HA.PCT_CYCLES_CONFLICT": { "Box": "HA", "Category": "HA CONFLICTS Events", "Defn": "Percentage of time in Conflict Resolution", "Desc": "Percent Conflict", "Equation": "CONFLICT_CYCLES.CONFLICT / SAMPLE_INTERVAL", "Broken": 1, }, "HA.PCT_CYCLES_D2C_DISABLED": { "Box": "HA", "Category": "HA DIRECT2CORE Events", "Defn": "Percentage of time that Direct2Core was disabled.", "Desc": "Percent D2C Disabled", "Equation": "DIRECT2CORE_CYCLES_DISABLED / SAMPLE_INTERVAL", "Obscure": 1, }, "HA.PCT_RD_REQUESTS": { "Box": "HA", "Category": "HA REQUESTS Events", "Defn": "Percentage of HA traffic that is from Read Requests", "Desc": "Percent Read Requests", "Equation": "REQUESTS.READS / (REQUESTS.READS + REQUESTS.WRITES)", }, "HA.PCT_WR_REQUESTS": { "Box": "HA", "Category": "HA REQUESTS Events", "Defn": "Percentage of HA traffic that is from Write Requests", "Desc": "Percent Write Requests", "Equation": "REQUESTS.WRITES / (REQUESTS.READS + REQUESTS.WRITES)", }, # iMC: "iMC.MEM_BW_READS": { "Box": "iMC", "Category": "iMC CAS Events", "Defn": "Memory bandwidth consumed by reads. Expressed in bytes.", "Desc": "Read Memory Bandwidth", "Equation": "(CAS_COUNT.RD * 64)", }, "iMC.MEM_BW_TOTAL": { "Box": "iMC", "Category": "iMC CAS Events", "Defn": "Total memory bandwidth. Expressed in bytes.", "Desc": "Total Memory Bandwidth", "Equation": "MEM_BW_READS + MEM_BW_WRITES", }, "iMC.MEM_BW_WRITES": { "Box": "iMC", "Category": "iMC CAS Events", "Defn": "Memory bandwidth consumed by writes Expressed in bytes.", "Desc": "Write Memory Bandwidth", "Equation": "(CAS_COUNT.WR * 64)", }, "iMC.PCT_CYCLES_CRITICAL_THROTTLE": { "Box": "iMC", "Category": "iMC POWER Events", "Defn": "The percentage of cycles all DRAM ranks in critical thermal throttling", "Desc": "Percent Cycles Critical Throttle", "Equation": "POWER_CRITICAL_THROTTLE_CYCLES / MC_Chy_PCI_PMON_CTR_FIXED", }, "iMC.PCT_CYCLES_DLOFF": { "Box": "iMC", "Category": "iMC POWER Events", "Defn": "The percentage of cycles all DRAM ranks in CKE slow (DLOFF) mode", "Desc": "Percent Cycles DLOFF", "Equation": "POWER_CHANNEL_DLLOFF / MC_Chy_PCI_PMON_CTR_FIXED", }, "iMC.PCT_CYCLES_DRAM_RANKx_IN_CKE": { "Box": "iMC", "Category": "iMC POWER Events", "Defn": "The percentage of cycles DRAM rank (x) spent in CKE ON mode.", "Desc": "Percent Cycles DRAM Rank x in CKE", "Equation": "POWER_CKE_CYCLES.RANKx / MC_Chy_PCI_PMON_CTR_FIXED", }, "iMC.PCT_CYCLES_DRAM_RANKx_IN_THR": { "Box": "iMC", "Category": "iMC POWER Events", "Defn": "The percentage of cycles DRAM rank (x) spent in thermal throttling.", "Desc": "Percent Cycles DRAM Rank x in CKE", "Equation": "POWER_THROTTLE_CYCLES.RANKx / MC_Chy_PCI_PMON_CTR_FIXED", }, "iMC.PCT_CYCLES_PPD": { "Box": "iMC", "Category": "iMC POWER Events", "Defn": "The percentage of cycles all DRAM ranks in PPD mode", "Desc": "Percent Cycles PPD", "Equation": "POWER_CHANNEL_PPD / MC_Chy_PCI_PMON_CTR_FIXED", }, "iMC.PCT_CYCLES_SELF_REFRESH": { "Box": "iMC", "Category": "iMC POWER Events", "Defn": "The percentage of cycles Memory is in self refresh power mode", "Desc": "Percent Cycles Self Refresh", "Equation": "POWER_SELF_REFRESH / MC_Chy_PCI_PMON_CTR_FIXED", }, "iMC.PCT_RD_REQUESTS": { "Box": "iMC", "Category": "iMC RPQ Events", "Defn": "Percentage of read requests from total requests.", "Desc": "Percent Read Requests", "Equation": "RPQ_INSERTS / (RPQ_INSERTS + WPQ_INSERTS)", }, "iMC.PCT_REQUESTS_PAGE_EMPTY": { "Box": "iMC", "Category": "iMC CAS Events", "Defn": "Percentage of memory requests that resulted in Page Empty", "Desc": "Percent Requests Page Empty", "Equation": "(ACT_COUNT - PRE_COUNT.PAGE_MISS)/ (CAS_COUNT.RD + CAS_COUNT.WR)", }, "iMC.PCT_REQUESTS_PAGE_HIT": { "Box": "iMC", "Category": "iMC CAS Events", "Defn": "Percentage of memory requests that resulted in Page Hits", "Desc": "Percent Requests Page Hit", "Equation": "1 - (PCT_REQUESTS_PAGE_EMPTY + PCT_REQUESTS_PAGE_MISS)", }, "iMC.PCT_REQUESTS_PAGE_MISS": { "Box": "iMC", "Category": "iMC CAS Events", "Defn": "Percentage of memory requests that resulted in Page Misses", "Desc": "Percent Requests Page Miss", "Equation": "PRE_COUNT.PAGE_MISS / (CAS_COUNT.RD + CAS_COUNT.WR)", }, "iMC.PCT_WR_REQUESTS": { "Box": "iMC", "Category": "iMC WPQ Events", "Defn": "Percentage of write requests from total requests.", "Desc": "Percent Write Requests", "Equation": "WPQ_INSERTS / (RPQ_INSERTS + WPQ_INSERTS)", }, # R2PCIe: "R2PCIe.CYC_USED_DNEVEN": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Defn": "Cycles Used in the Down direction, Even polarity", "Desc": "Cycles Used Down and Even", "Equation": "RING_BL_USED.CCW_EVEN / SAMPLE_INTERVAL", "Obscure": 1, }, "R2PCIe.CYC_USED_DNODD": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Defn": "Cycles Used in the Down direction, Odd polarity", "Desc": "Cycles Used Down and Odd", "Equation": "RING_BL_USED.CCW_ODD / SAMPLE_INTERVAL", "Obscure": 1, }, "R2PCIe.CYC_USED_UPEVEN": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Defn": "Cycles Used in the Up direction, Even polarity", "Desc": "Cycles Used Up and Even", "Equation": "RING_BL_USED.CW_EVEN / SAMPLE_INTERVAL", "Obscure": 1, }, "R2PCIe.CYC_USED_UPODD": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Defn": "Cycles Used in the Up direction, Odd polarity", "Desc": "Cycles Used Up and Odd", "Equation": "RING_BL_USED.CW_ODD / SAMPLE_INTERVAL", "Obscure": 1, }, "R2PCIe.RING_THRU_DNEVEN_BYTES": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Defn": "Ring throughput in the Down direction, Even polarity in Bytes", "Desc": "Ring Throughput Down and Even", "Equation": "RING_BL_USED.CCW_EVEN * 32", "Obscure": 1, }, "R2PCIe.RING_THRU_DNODD_BYTES": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Defn": "Ring throughput in the Down direction, Odd polarity in Bytes", "Desc": "Ring Throughput Down and Odd", "Equation": "RING_BL_USED.CCW_ODD * 32", "Obscure": 1, }, "R2PCIe.RING_THRU_UPEVEN_BYTES": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Defn": "Ring throughput in the Up direction, Even polarity in Bytes", "Desc": "Ring Throughput Up and Even", "Equation": "RING_BL_USED.CW_EVEN * 32", "Obscure": 1, }, "R2PCIe.RING_THRU_UPODD_BYTES": { "Box": "R2PCIe", "Category": "R2PCIe RING Events", "Defn": "Ring throughput in the Up direction, Odd polarity in Bytes", "Desc": "Ring Throughput Up and Odd", "Equation": "RING_BL_USED.CW_ODD * 32", "Obscure": 1, }, # QPI_LL: "QPI_LL.DATA_FROM_QPI": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "Data received from QPI in bytes ( = DRS + NCB Data messages received from QPI)", "Desc": "Data From QPI", "Equation": "DRS_DATA_MSGS_FROM_QPI + NCB_DATA_MSGS_FROM_QPI", }, "QPI_LL.DATA_FROM_QPI_TO_HA_OR_IIO": { "Box": "QPI_LL", "Category": "QPI_LL DIRECT2CORE Events", "Defn": "Data received from QPI forwarded to HA or IIO. Expressed in Bytes", "Desc": "Data From QPI To HA or IIO", "Equation": "DATA_FROM_QPI - DATA_FROM_QPI_TO_LLC", "Broken": 1, }, "QPI_LL.DATA_FROM_QPI_TO_LLC": { "Box": "QPI_LL", "Category": "QPI_LL DIRECT2CORE Events", "Defn": "Data received from QPI forwarded to LLC. Expressed in Bytes", "Desc": "Data From QPI To LLC", "Equation": "DIRECT2CORE.SUCCESS * 64", "Broken": 1, }, "QPI_LL.DATA_FROM_QPI_TO_NODEx": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "Data packets received from QPI sent to Node ID 'x'. Expressed in bytes", "Desc": "Data From QPI To Node x", "Equation": "DRS_DataC_FROM_QPI_TO_NODEx + DRS_WRITE_FROM_QPI_TO_NODEx + NCB_DATA_FROM_QPI_TO_NODEx", }, "QPI_LL.DRS_DATA_MSGS_FROM_QPI": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Defn": "DRS Data Messages From QPI in bytes", "Desc": "DRS Data Messages From QPI", "Equation": "(RxL_FLITS_G1.DRS_DATA * 8)", "Obscure": 1, }, "QPI_LL.DRS_DataC_FROM_QPI_TO_NODEx": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "DRS DataC packets received from QPI sent to Node ID 'x'. Expressed in bytes", "Desc": "DRS DataC From QPI To Node x", "Equation": "(CTO_COUNT with:{Q_Py_PCI_PMON_PKT_MATCH0{[12:0],dnid}={0x1C00,x},Q_Py_PCI_PMON_PKT_MASK0[17:0]=0x3FF80}) * 64", "Filter": "QPIMask0[17:0],QPIMatch0[17:0]", "Obscure": 1, }, "QPI_LL.DRS_FULL_CACHELINE_MSGS_FROM_QPI": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "DRS Full Cacheline Data Messages From QPI in bytes", "Desc": "DRS Full Cacheline Data Messages From QPI", "Equation": "(CTO_COUNT with:{Q_Py_PCI_PMON_PKT_MATCH0[12:0]=0x1C00,Q_Py_PCI_PMON_PKT_MASK0[12:0]=0x1F00}) * 64", "Filter": "QPIMask0[12:0],QPIMatch0[12:0]", "Obscure": 1, }, "QPI_LL.DRS_F_OR_E_FROM_QPI": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "DRS response in F or E states received from QPI in bytes. To calculate the total data response for each cache line state, it's necessary to add the contribution from three flavors {DataC, DataC_FrcAckCnflt, DataC_Cmp} of data response packets for each cache line state.", "Desc": "DRS Data in F or E From QPI", "Equation": "((CTO_COUNT with:{Q_Py_PCI_PMON_PKT_MATCH0[12:0]=0x1C00, Q_Py_PCI_PMON_PKT_MASK0[12:0]=0x1FE0, Q_Py_PCI_PMON_PKT_MATCH1[19:16]=0x4, Q_Py_PCI_PMON_PKT_MASK1[19:16]=0xF }) + (CTO_COUNT with:{Q_Py_PCI_PMON_PKT_MATCH0[12:0]=0x1C00, Q_Py_PCI_PMON_PKT_MASK0[12:0]=0x1FE0, Q_Py_PCI_PMON_PKT_MATCH1[19:16]=0x1, Q_Py_PCI_PMON_PKT_MASK1[19:16]=0xF }) + (CTO_COUNT with:{Q_Py_PCI_PMON_PKT_MATCH0[12:0]=0x1C40, Q_Py_PCI_PMON_PKT_MASK0[12:0]=0x1FE0, Q_Py_PCI_PMON_PKT_MATCH1[19:16]=0x4, Q_Py_PCI_PMON_PKT_MASK1[19:16]=0xF }) + (CTO_COUNT with:{Q_Py_PCI_PMON_PKT_MATCH0[12:0]=0x1C40, Q_Py_PCI_PMON_PKT_MASK0[12:0]=0x1FE0, Q_Py_PCI_PMON_PKT_MATCH1[19:16]=0x1, Q_Py_PCI_PMON_PKT_MASK1[19:16]=0xF }) + (CTO_COUNT with:{Q_Py_PCI_PMON_PKT_MATCH0[12:0]=0x1C20, Q_Py_PCI_PMON_PKT_MASK0[12:0]=0x1FE0, Q_Py_PCI_PMON_PKT_MATCH1[19:16]=0x4, Q_Py_PCI_PMON_PKT_MASK1[19:16]=0xF }) + (CTO_COUNT with:{Q_Py_PCI_PMON_PKT_MATCH0[12:0]=0x1C20, Q_Py_PCI_PMON_PKT_MASK0[12:0]=0x1FE0, Q_Py_PCI_PMON_PKT_MATCH1[19:16]=0x1, Q_Py_PCI_PMON_PKT_MASK1[19:16]=0xF })) * 64", "Filter": "QPIMask0[12:0],QPIMatch0[12:0],QPIMask1[19:16],QPIMatch1[19:16]", "Obscure": 1, }, "QPI_LL.DRS_M_FROM_QPI": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "DRS response in M state received from QPI in bytes", "Desc": "DRS Data in M From QPI", "Equation": "(CTO_COUNT with:{Q_Py_PCI_PMON_PKT_MATCH0[12:0]=0x1C00, Q_Py_PCI_PMON_PKT_MASK0[12:0]=0x1FE0, Q_Py_PCI_PMON_PKT_MATCH1[19:16]=0x8, Q_Py_PCI_PMON_PKT_MASK1[19:16]=0xF }) * 64", "Filter": "QPIMask0[12:0],QPIMatch0[12:0],QPIMask1[19:16],QPIMatch1[19:16]", "Obscure": 1, }, "QPI_LL.DRS_PTL_CACHELINE_MSGS_FROM_QPI": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "DRS Partial Cacheline Data Messages From QPI in bytes", "Desc": "DRS Partial Cacheline Data Messages From QPI", "Equation": "(CTO_COUNT with:{Q_Py_PCI_PMON_PKT_MATCH0[12:0]=0x1D00, Q_Py_PCI_PMON_PKT_MASK0[12:0]=0x1F00}) * 64", "Filter": "QPIMask0[12:0],QPIMatch0[12:0]", "Obscure": 1, }, "QPI_LL.DRS_WB_FROM_QPI": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "DRS writeback packets received from QPI in bytes. This is the sum of Wb{I,S,E} DRS packets", "Desc": "DRS Writeback From QPI", "Equation": "DRS_WbI_FROM_QPI + DRS_WbS_FROM_QPI + DRS_WbE_FROM_QPI", "Obscure": 1, }, "QPI_LL.DRS_WRITE_FROM_QPI_TO_NODEx": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "DRS Data packets (Any - DataC) received from QPI sent to Node ID 'x'. Expressed in bytes", "Desc": "DRS Data From QPI To Node x", "Equation": "(CTO_COUNT with:{Q_Py_PCI_PMON_PKT_MATCH0{[12:0],dnid}={0x1C00,x},Q_Py_PCI_PMON_PKT_MASK0[17:0]=0x3FE00} - CTO_COUNT with:{Q_Py_PCI_PMON_PKT_MATCH0{[12:0],dnid}={0x1C00,x},Q_Py_PCI_PMON_PKT_MASK0[17:0]=0x3FF80}) * 64", "Filter": "QPIMask0[17:0],QPIMatch0[17:0]", "Obscure": 1, }, "QPI_LL.DRS_WbE_FROM_QPI": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "DRS writeback 'change to E state' packets received from QPI in bytes", "Desc": "DRS WbE From QPI", "Equation": "(CTO_COUNT with:{Q_Py_PCI_PMON_PKT_MATCH0[12:0]=0x1CC0, Q_Py_PCI_PMON_PKT_MASK0[12:0]=0x1FE0}) * 64", "Filter": "QPIMask0[12:0],QPIMatch0[12:0]", "Obscure": 1, }, "QPI_LL.DRS_WbI_FROM_QPI": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "DRS writeback 'change to I state' packets received from QPI in bytes", "Desc": "DRS WbI From QPI", "Equation": "(CTO_COUNT with:{Q_Py_PCI_PMON_PKT_MATCH0[12:0]=0x1C80, Q_Py_PCI_PMON_PKT_MASK0[12:0]=0x1FE0}) * 64", "Filter": "QPIMask0[12:0],QPIMatch0[12:0]", "Obscure": 1, }, "QPI_LL.DRS_WbS_FROM_QPI": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "DRS writeback 'change to S state' packets received from QPI in bytes", "Desc": "DRS WbSFrom QPI", "Equation": "(CTO_COUNT with:{Q_Py_PCI_PMON_PKT_MATCH0[12:0]=0x1CA0, Q_Py_PCI_PMON_PKT_MASK0[12:0]=0x1FE0}) * 64", "Filter": "QPIMask0[12:0],QPIMatch0[12:0]", "Obscure": 1, }, "QPI_LL.NCB_DATA_FROM_QPI_TO_NODEx": { "Box": "QPI_LL", "Category": "QPI_LL CTO Events", "Defn": "NCB Data packets (Any - Interrupts) received from QPI sent to Node ID 'x'. Expressed in bytes", "Desc": "NCB Data From QPI To Node x", "Equation": "((CTO_COUNT with:{Q_Py_PCI_PMON_PKT_MATCH0{[12:0],dnid}={0x1800,x},Q_Py_PCI_PMON_PKT_MASK0[17:0]=0x3FE00}) - (CTO_COUNT with:{Q_Py_PCI_PMON_PKT_MATCH0{[12:0],dnid}={0x1900,x},Q_Py_PCI_PMON_PKT_MASK0[17:0]=0x3FF80})) * 64", "Filter": "QPIMask0[17:0],QPIMatch0[17:0]", "Obscure": 1, }, "QPI_LL.NCB_DATA_MSGS_FROM_QPI": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Defn": "NCB Data Messages From QPI in bytes", "Desc": "NCB Data Messages From QPI", "Equation": "(RxL_FLITS_G2.NCB_DATA * 8)", "Obscure": 1, }, "QPI_LL.PCT_LINK_FULL_POWER_CYCLES": { "Box": "QPI_LL", "Category": "QPI_LL POWER_RX Events", "Defn": "Percent of Cycles the QPI link is at Full Power", "Desc": "Percent Link Full Power Cycles", "Equation": "RxL0_POWER_CYCLES / CLOCKTICKS", "Obscure": 1, }, "QPI_LL.PCT_LINK_HALF_DISABLED_CYCLES": { "Box": "QPI_LL", "Category": "QPI_LL POWER_RX Events", "Defn": "Percent of Cycles the QPI link in power mode where half of the lanes are disabled.", "Desc": "Percent Link Half Disabled Cycles", "Equation": "RxL0P_POWER_CYCLES / CLOCKTICKS", "Obscure": 1, }, "QPI_LL.PCT_LINK_SHUTDOWN_CYCLES": { "Box": "QPI_LL", "Category": "QPI_LL POWER Events", "Defn": "Percent of Cycles the QPI link is Shutdown", "Desc": "Percent Link Shutdown Cycles", "Equation": "L1_POWER_CYCLES / CLOCKTICKS", "Obscure": 1, }, "QPI_LL.QPI_LINK_UTIL": { "Box": "QPI_LL", "Category": "QPI_LL FLITS_RX Events", "Defn": "Percentage of cycles that QPI Link was utilized. Calculated from 1 - Number of idle flits - time the link was 'off'", "Desc": "QPI Link Utilization", "Equation": "(RxL_FLITS_G0.DATA + RxL_FLITS_G0.NON_DATA) / (2 * CLOCKTICKS)", }, # PCU: "PCU.PCT_FREQ_BAND0": { "Box": "PCU", "Category": "PCU FREQ_RESIDENCY Events", "Defn": "Counts the percent that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.", "Desc": "Frequency Residency", "Notes": "The PMON control registers in the PCU only update on a frequency transition. Changing the measuring threshold during a sample interval may introduce errors in the counts. This is especially true when running at a constant frequency for an extended period of time. There is a corner case here: we set this code on the GV transition. So, if we never GV we will never call this code. This event does not include transition times. It is handled on fast path.", "Equation": "FREQ_BAND0_CYCLES / CLOCKTICKS" }, "PCU.PCT_FREQ_BAND1": { "Box": "PCU", "Category": "PCU FREQ_RESIDENCY Events", "Defn": "Counts the percent that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.", "Desc": "Frequency Residency", "Notes": "The PMON control registers in the PCU only update on a frequency transition. Changing the measuring threshold during a sample interval may introduce errors in the counts. This is especially true when running at a constant frequency for an extended period of time. There is a corner case here: we set this code on the GV transition. So, if we never GV we will never call this code. This event does not include transition times. It is handled on fast path.", "Equation": "FREQ_BAND1_CYCLES / CLOCKTICKS" }, "PCU.PCT_FREQ_BAND2": { "Box": "PCU", "Category": "PCU FREQ_RESIDENCY Events", "Defn": "Counts the percent that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.", "Desc": "Frequency Residency", "Notes": "The PMON control registers in the PCU only update on a frequency transition. Changing the measuring threshold during a sample interval may introduce errors in the counts. This is especially true when running at a constant frequency for an extended period of time. There is a corner case here: we set this code on the GV transition. So, if we never GV we will never call this code. This event does not include transition times. It is handled on fast path.", "Equation": "FREQ_BAND2_CYCLES / CLOCKTICKS" }, "PCU.PCT_FREQ_BAND3": { "Box": "PCU", "Category": "PCU FREQ_RESIDENCY Events", "Defn": "Counts the percent that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.", "Desc": "Frequency Residency", "Notes": "The PMON control registers in the PCU only update on a frequency transition. Changing the measuring threshold during a sample interval may introduce errors in the counts. This is especially true when running at a constant frequency for an extended period of time. There is a corner case here: we set this code on the GV transition. So, if we never GV we will never call this code. This event does not include transition times. It is handled on fast path.", "Equation": "FREQ_BAND3_CYCLES / CLOCKTICKS" }, "PCU.PCT_FREQ_CURRENT_LTD": { "Box": "PCU", "Category": "PCU FREQ_MAX_LIMIT Events", "Defn": "Percent of Cycles the Max Frequency is limited by current", "Desc": "Percent of Cycles Frequency Current Limited", "Equation": "FREQ_MAX_CURRENT_CYCLES / CLOCKTICKS", }, "PCU.PCT_FREQ_OS_LTD": { "Box": "PCU", "Category": "PCU FREQ_MAX_LIMIT Events", "Defn": "Percent of Cycles the Max Frequency is limited by the OS", "Desc": "Percent of Cycles Frequency OS Limited", "Equation": "FREQ_MAX_OS_CYCLES / CLOCKTICKS", }, "PCU.PCT_FREQ_POWER_LTD": { "Box": "PCU", "Category": "PCU FREQ_MAX_LIMIT Events", "Defn": "Percent of Cycles the Max Frequency is limited by power", "Desc": "Percent of Cycles Frequency Power Limited", "Equation": "FREQ_MAX_POWER_CYCLES / CLOCKTICKS", }, "PCU.PCT_FREQ_THERMAL_LTD": { "Box": "PCU", "Category": "PCU FREQ_MAX_LIMIT Events", "Defn": "Percent of Cycles the Max Frequency is limited by thermal issues", "Desc": "Percent of Cycles Frequency Thermal Limited", "Equation": "FREQ_MAX_CURRENT_CYCLES / CLOCKTICKS", }, # CBO: "CBO.AVG_INGRESS_DEPTH": { "Box": "CBO", "Category": "CBO INGRESS Events", "Defn": "Average Depth of the Ingress Queue through the sample interval", "Desc": "Average Ingress Depth", "Equation": "RxR_OCCUPANCY.IRQ / SAMPLE_INTERVAL", "Obscure": 1, }, "CBO.AVG_INGRESS_LATENCY": { "Box": "CBO", "Category": "CBO INGRESS Events", "Defn": "Average Latency of Requests through the Ingress Queue in Uncore Clocks", "Desc": "Average Ingress Latency", "Equation": "RxR_OCCUPANCY.IRQ / RxR_INSERTS.IRQ", "Obscure": 1, }, "CBO.AVG_INGRESS_LATENCY_WHEN_NE": { "Box": "CBO", "Category": "CBO INGRESS Events", "Defn": "Average Latency of Requests through the Ingress Queue in Uncore Clocks when Ingress Queue has at least one entry", "Desc": "Average Latency in Non-Empty Ingress", "Equation": "RxR_OCCUPANCY.IRQ / COUNTER0_OCCUPANCY with:{edge_det=1,thresh=0x1}", "Obscure": 1, }, "CBO.AVG_TOR_DRDS_MISS_WHEN_NE": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Average Number of Data Read Entries that Miss the LLC when the TOR is not empty.", "Desc": "Average Data Read Misses in Non-Empty TOR", "Equation": "(TOR_OCCUPANCY.MISS_OPCODE / COUNTER0_OCCUPANCY with:{edge_det=1,thresh=0x1}) with:Cn_MSR_PMON_BOX_FILTER.opc=0x182", "Filter": "CBoFilter[31:23]", "Obscure": 1, }, "CBO.AVG_TOR_DRDS_WHEN_NE": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Average Number of Data Read Entries when the TOR is not empty.", "Desc": "Average Data Reads in Non-Empty TOR", "Equation": "(TOR_OCCUPANCY.OPCODE / COUNTER0_OCCUPANCY with:{edge_det=1,thresh=0x1}) with:Cn_MSR_PMON_BOX_FILTER.opc=0x182", "Filter": "CBoFilter[31:23]", "Obscure": 1, }, "CBO.AVG_TOR_DRD_HIT_LATENCY": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Average Latency of Data Reads through the TOR that hit the LLC", "Desc": "Data Read Hit Latency through TOR", "Equation": "((TOR_OCCUPANCY.OPCODE - TOR_OCCUPANCY.MISS_OPCODE) / (TOR_INSERTS.OPCODE - TOR_INSERTS.MISS_OPCODE)) with:Cn_MSR_PMON_BOX_FILTER.opc=0x182", "Filter": "CBoFilter[31:23]", "Obscure": 1, }, "CBO.AVG_TOR_DRD_LATENCY": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Average Latency of Data Read Entries making their way through the TOR", "Desc": "Data Read Latency through TOR", "Equation": "(TOR_OCCUPANCY.OPCODE / TOR_INSERTS.OPCODE) with:Cn_MSR_PMON_BOX_FILTER.opc=0x182", "Filter": "CBoFilter[31:23]", "Obscure": 1, }, "CBO.AVG_TOR_DRD_MISS_LATENCY": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Average Latency of Data Reads through the TOR that miss the LLC", "Desc": "Data Read Miss Latency through TOR", "Equation": "(TOR_OCCUPANCY.MISS_OPCODE / TOR_INSERTS.MISS_OPCODE) with:Cn_MSR_PMON_BOX_FILTER.opc=0x182", "Filter": "CBoFilter[31:23]", "Obscure": 1, }, "CBO.CYC_INGRESS_BLOCKED": { "Box": "CBO", "Category": "CBO INGRESS Events", "Defn": "Cycles the Ingress Request Queue arbiter was Blocked", "Desc": "Cycles Ingress Blocked", "Equation": "RxR_EXT_STARVED.IRQ / SAMPLE_INTERVAL", "Obscure": 1, }, "CBO.CYC_USED_DNEVEN": { "Box": "CBO", "Category": "CBO RING Events", "Defn": "Cycles Used in the Down direction, Even polarity", "Desc": "Cycles Used Down and Even", "Equation": "RING_BL_USED.DOWN_EVEN / SAMPLE_INTERVAL", "Obscure": 1 }, "CBO.CYC_USED_DNODD": { "Box": "CBO", "Category": "CBO RING Events", "Defn": "Cycles Used in the Down direction, Odd polarity", "Desc": "Cycles Used Down and Odd", "Equation": "RING_BL_USED.DOWN_ODD / SAMPLE_INTERVAL", "Obscure": 1 }, "CBO.CYC_USED_UPEVEN": { "Box": "CBO", "Category": "CBO RING Events", "Defn": "Cycles Used in the Up direction, Even polarity", "Desc": "Cycles Used Up and Even", "Equation": "RING_BL_USED.UP_EVEN / SAMPLE_INTERVAL", "Obscure": 1 }, "CBO.CYC_USED_UPODD": { "Box": "CBO", "Category": "CBO RING Events", "Defn": "Cycles Used in the Up direction, Odd polarity", "Desc": "Cycles Used Up and Odd", "Equation": "RING_BL_USED.UP_ODD / SAMPLE_INTERVAL", "Obscure": 1 }, "CBO.INGRESS_REJ_V_INS": { "Box": "CBO", "Category": "CBO INGRESS Events", "Defn": "Ratio of Ingress Request Entries that were rejected vs. inserted", "Desc": "Ingress Rejects vs. Inserts", "Equation": "RxR_INSERTS.IRQ_REJECTED / RxR_INSERTS.IRQ", "Obscure": 1 }, "CBO.LLC_DRD_MISS_PCT": { "Box": "CBO", "Category": "CBO CACHE Events", "Defn": "LLC Data Read miss ratio", "Desc": "LLC DRD Miss Ratio", "Equation": "LLC_LOOKUP.DATA_READ with:Cn_MSR_PMON_BOX_FILTER.state=0x1 / LLC_LOOKUP.DATA_READ with:Cn_MSR_PMON_BOX_FILTER.state=0x1F", "Filter": "CBoFilter[22:18]", "Obscure": 1, # too much multiplexing error }, "CBO.LLC_PCIE_DATA_BYTES": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "LLC Miss Data from PCIe in Number of Bytes", "Desc": "LLC Miss Data from PCIe", "Equation": "TOR_INSERTS.OPCODE with:Cn_MSR_PMON_BOX_FILTER.opc=0x19C * 64", "Filter": "CBoFilter[31:23]", "Broken": 1, }, "CBO.LLC_RFO_MISS_PCT": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "LLC RFO Miss Ratio", "Desc": "LLC RFO Miss Ratio", "Equation": "(TOR_INSERTS.MISS_OPCODE / TOR_INSERTS.OPCODE) with:Cn_MSR_PMON_BOX_FILTER.opc=0x180", "Filter": "CBoFilter[31:23]", }, "CBO.MEM_WB_BYTES": { "Box": "CBO", "Category": "CBO CACHE Events", "Defn": "Data written back to memory in Number of Bytes", "Desc": "Memory Writebacks", "Equation": "LLC_VICTIMS.M_STATE * 64", }, "CBO.PCIE_DATA_BYTES": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "Data from PCIe in Number of Bytes", "Desc": "PCIe Data Traffic", "Equation": "(TOR_INSERTS.OPCODE with:Cn_MSR_PMON_BOX_FILTER.opc=0x194 + TOR_INSERTS.OPCODE with:Cn_MSR_PMON_BOX_FILTER.opc=0x19c) * 64", "Filter": "CBoFilter[31:23]", "Broken": 1, }, "CBO.RING_THRU_DNEVEN_BYTES": { "Box": "CBO", "Category": "CBO RING Events", "Defn": "Ring throughput in the Down direction, Even polarity in Bytes", "Desc": "Ring Throughput Down and Even", "Equation": "RING_BL_USED.DOWN_EVEN * 32", "Obscure": 1, }, "CBO.RING_THRU_DNODD_BYTES": { "Box": "CBO", "Category": "CBO RING Events", "Defn": "Ring throughput in the Down direction, Odd polarity in Bytes", "Desc": "Ring Throughput Down and Odd", "Equation": "RING_BL_USED.DOWN_ODD * 32", "Obscure": 1, }, "CBO.RING_THRU_UPEVEN_BYTES": { "Box": "CBO", "Category": "CBO RING Events", "Defn": "Ring throughput in the Up direction, Even polarity in Bytes", "Desc": "Ring Throughput Up and Even", "Equation": "RING_BL_USED.UP_EVEN * 32", "Obscure": 1, }, "CBO.RING_THRU_UPODD_BYTES": { "Box": "CBO", "Category": "CBO RING Events", "Defn": "Ring throughput in the Up direction, Odd polarity in Bytes", "Desc": "Ring Throughput Up and Odd", "Equation": "RING_BL_USED.UP_ODD * 32", "Obscure": 1, }, } categories = ( "CBO CACHE Events", "CBO EGRESS Events", "CBO INGRESS Events", "CBO INGRESS_RETRY Events", "CBO ISMQ Events", "CBO MISC Events", "CBO OCCUPANCY Events", "CBO RING Events", "CBO TOR Events", "CBO UCLK Events", "HA ADDR_OPCODE_MATCH Events", "HA AD_EGRESS Events", "HA AK_EGRESS Events", "HA BL_EGRESS Events", "HA CONFLICTS Events", "HA DIRECT2CORE Events", "HA DIRECTORY Events", "HA IMC_MISC Events", "HA IMC_WRITES Events", "HA OUTBOUND_TX Events", "HA QPI_IGR_CREDITS Events", "HA REQUESTS Events", "HA RPQ_CREDITS Events", "HA TAD Events", "HA TRACKER Events", "HA UCLK Events", "HA WPQ_CREDITS Events", "PCU CORE_C_STATE_TRANSITION Events", "PCU FREQ_MAX_LIMIT Events", "PCU FREQ_MIN_LIMIT Events", "PCU FREQ_RESIDENCY Events", "PCU FREQ_TRANS Events", "PCU MEMORY_PHASE_SHEDDING Events", "PCU PCLK Events", "PCU POWER_STATE_OCC Events", "PCU PROCHOT Events", "PCU VOLT_TRANS Events", "PCU VR_HOT Events", "QPI_LL CFCLK Events", "QPI_LL CRC_ERRORS_RX Events", "QPI_LL CTO Events", "QPI_LL DIRECT2CORE Events", "QPI_LL FLITS_RX Events", "QPI_LL FLITS_TX Events", "QPI_LL POWER Events", "QPI_LL POWER_RX Events", "QPI_LL POWER_TX Events", "QPI_LL RXQ Events", "QPI_LL RX_CREDITS_CONSUMED Events", "QPI_LL TXQ Events", "QPI_LL VNA_CREDIT_RETURN Events", "R2PCIe EGRESS Events", "R2PCIe INGRESS Events", "R2PCIe RING Events", "R2PCIe UCLK Events", "R3QPI EGRESS Events", "R3QPI IIO_CREDITS Events", "R3QPI INGRESS Events", "R3QPI LINK_VN0_CREDITS Events", "R3QPI LINK_VNA_CREDITS Events", "R3QPI RING Events", "R3QPI UCLK Events", "UBOX EVENT_MSG Events", "UBOX FILTER_MATCH Events", "UBOX LOCK Events", "iMC ACT Events", "iMC CAS Events", "iMC DRAM_PRE_ALL Events", "iMC DRAM_REFRESH Events", "iMC ECC Events", "iMC MAJOR_MODES Events", "iMC POWER Events", "iMC PRE Events", "iMC PREEMPTION Events", "iMC RPQ Events", "iMC WPQ Events", );
350,803
Python
.py
5,162
58.176095
1,362
0.634063
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,962
uctopy.pl
andikleen_pmu-tools/ucevent/uctopy.pl
#!/usr/bin/perl # generate python data files from perl input # uctopl.pl CPU-ACRONYM events.pl derived.pl >cpu_uc.py use File::Basename; $cpu = $ARGV[0]; shift(@ARGV); foreach (@ARGV) { do $_; } $code = <<END; \$aliases = \\%CPU_UCFilterAliases; \$events = \\%CPU_UCEventList; \$derived = \\%CPU_UCDerivedList; END $code =~ s/CPU/$cpu/g; eval($code); print "# $cpu "; foreach $j (@ARGV) { $f = basename($j); $f =~ s/\.pl//; print "$f "; } print "\n\n"; %categories = {}; @catlit = (); %global = {}; $indent = " "; $quote = "\""; $nquote = "\"\"\""; sub addquote($) { my($data) = (@_); return $nquote . $data . $nquote if ($data =~ /\n/); return $quote . $data . $quote; } print "# aliases\n"; print "aliases = {\n"; foreach $i (keys(%{ $aliases } )) { print $indent,$quote,$i,$quote,": ",addquote($aliases->{$i}),",\n"; } print "}\n\n"; sub format_data($) { my($data) = (@_); return $data if ($data =~ /^[0-9]+$/ || $data =~ /^0x[0-9a-fA-F]+$/); $data =~ s/"/\\"/g; return addquote($data); } sub to_list($) { my($l) = (@_); return $l; ($a, $b) = $l =~ /(\d+)-(\d+)/; $o = ""; for (; $a <= $b; $a++) { $o += "$a,"; } return $o; } sub print_event($$) { my($name, $ev) = (@_); #return if $ev->{'Public'} ne "Y"; push(@catlist, $ev->{"Category"}); print $indent,$quote,$name,$quote,": {\n"; foreach $w (sort(keys(%{$ev}))) { next if $w =~ /Sub[cC]at/; next if $w eq "Subevents"; next if $ev->{$w} eq "" && $w ne "Category"; next if $w eq "OrigName"; next if $w =~ /([A-Z]+)Status/; next if $w eq "RTLSignal"; next if $w eq "Public"; if ($w eq "Internal") { $w = "ExtSel"; } $val = $ev->{$w}; next if $w eq "MaxIncCyc" && ($val == "1" || $val == "0"); next if $w eq "SubCtr" && $val == "0"; $val = to_list($val) if $w eq "Counters" && $val =~ /-/; print $indent,$indent, addquote($w),": ",format_data($val),",\n"; } print $indent,"},\n"; } sub print_sub($$$) { my($box, $j, $sub) = (@_); foreach $k (keys(%{$sub})) { $subev = $sub->{$k}; # put all the fields from the parent # into the sub event to normalize foreach $o (keys(%{$ev})) { next if defined($sub->{$o}); $subev->{$o} = $ev->{$o}; } print_event("$box.$j.$k", $subev); } } sub print_list($$) { my($name, $evl) = (@_); print "$name = {\n"; foreach $box (keys(%{$evl})) { $evlist = $evl->{$box}; $box =~ s/ Box Events//; $box =~ s/ /_/g; print $indent,"\n# $box:\n"; foreach $j (sort(keys(%{$evlist}))) { $ev = $evlist->{$j}; $ev->{"Box"} = $box; $ev->{"Category"} = $box . " " . $ev->{"Category"}; print_event("$box.$j", $ev); print_sub($box, $j, $ev->{"Subcat"}); print_sub($box, $j, $ev->{"SubCat"}); print_sub($box, $j, $ev->{"Subevents"}); } } print "}\n"; } print_list("events", $events); print_list("derived", $derived); print "categories = (\n"; $prev = ""; foreach $i (sort @catlist) { next if $i eq $prev; $prev = $i; print $indent,addquote($i),",\n"; } print ");\n";
2,989
Python
.py
124
21.862903
70
0.523558
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,963
skx_uc.py
andikleen_pmu-tools/ucevent/skx_uc.py
# SKX skx_uc_events.v0.50p.txt skx_uc_derived.v0.50p.txt # aliases aliases = { "CHAFilter1": "C{i}_MSR_PMON_BOX_FILTER1", "M2MAddrMask0": "M2M{i}_PCI_PMON_ADDRMASK0", "M2MAddrMask1": "M2M{i}_PCI_PMON_ADDRMASK1", "M2MOpcMask": "M2M{i}_PCI_PMON_OPCMASK", "M2MAddrMatch0": "M2M{i}_PCI_PMON_ADDRMATCH1", "PCUFilter": "PCU_MSR_PMON_BOX_FILTER", "UBoxFilter": "U_MSR_PMON_BOX_FILTER", "CHAFilter0": "C{i}_MSR_PMON_BOX_FILTER0", } events = { # M3UPI: "M3UPI.AG0_AD_CRD_ACQUIRED": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", }, "M3UPI.AG0_AD_CRD_ACQUIRED.TGR1": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxxxx1x", }, "M3UPI.AG0_AD_CRD_ACQUIRED.TGR2": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxxx1xx", }, "M3UPI.AG0_AD_CRD_ACQUIRED.TGR5": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxx1xxxxx", }, "M3UPI.AG0_AD_CRD_ACQUIRED.TGR0": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxxxxx1", }, "M3UPI.AG0_AD_CRD_ACQUIRED.TGR3": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxx1xxx", }, "M3UPI.AG0_AD_CRD_ACQUIRED.TGR4": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxx1xxxx", }, "M3UPI.AG0_AD_CRD_OCCUPANCY": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", }, "M3UPI.AG0_AD_CRD_OCCUPANCY.TGR1": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "bxxxxxx1x", }, "M3UPI.AG0_AD_CRD_OCCUPANCY.TGR5": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "bxx1xxxxx", }, "M3UPI.AG0_AD_CRD_OCCUPANCY.TGR2": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "bxxxxx1xx", }, "M3UPI.AG0_AD_CRD_OCCUPANCY.TGR0": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "bxxxxxxx1", }, "M3UPI.AG0_AD_CRD_OCCUPANCY.TGR4": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "bxxx1xxxx", }, "M3UPI.AG0_AD_CRD_OCCUPANCY.TGR3": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "bxxxx1xxx", }, "M3UPI.AG0_BL_CRD_ACQUIRED": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", }, "M3UPI.AG0_BL_CRD_ACQUIRED.TGR1": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.AG0_BL_CRD_ACQUIRED.TGR5": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.AG0_BL_CRD_ACQUIRED.TGR2": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.AG0_BL_CRD_ACQUIRED.TGR0": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.AG0_BL_CRD_ACQUIRED.TGR3": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.AG0_BL_CRD_ACQUIRED.TGR4": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.AG0_BL_CRD_OCCUPANCY": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", }, "M3UPI.AG0_BL_CRD_OCCUPANCY.TGR4": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.AG0_BL_CRD_OCCUPANCY.TGR3": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.AG0_BL_CRD_OCCUPANCY.TGR0": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.AG0_BL_CRD_OCCUPANCY.TGR2": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.AG0_BL_CRD_OCCUPANCY.TGR5": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.AG0_BL_CRD_OCCUPANCY.TGR1": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.AG1_AD_CRD_ACQUIRED": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", }, "M3UPI.AG1_AD_CRD_ACQUIRED.TGR3": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxx1xxx", }, "M3UPI.AG1_AD_CRD_ACQUIRED.TGR4": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxx1xxxx", }, "M3UPI.AG1_AD_CRD_ACQUIRED.TGR0": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxxxxx1", }, "M3UPI.AG1_AD_CRD_ACQUIRED.TGR5": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxx1xxxxx", }, "M3UPI.AG1_AD_CRD_ACQUIRED.TGR2": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxxx1xx", }, "M3UPI.AG1_AD_CRD_ACQUIRED.TGR1": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxxxx1x", }, "M3UPI.AG1_AD_CRD_OCCUPANCY": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", }, "M3UPI.AG1_AD_CRD_OCCUPANCY.TGR1": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "bxxxxxx1x", }, "M3UPI.AG1_AD_CRD_OCCUPANCY.TGR5": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "bxx1xxxxx", }, "M3UPI.AG1_AD_CRD_OCCUPANCY.TGR2": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "bxxxxx1xx", }, "M3UPI.AG1_AD_CRD_OCCUPANCY.TGR0": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "bxxxxxxx1", }, "M3UPI.AG1_AD_CRD_OCCUPANCY.TGR4": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "bxxx1xxxx", }, "M3UPI.AG1_AD_CRD_OCCUPANCY.TGR3": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "bxxxx1xxx", }, "M3UPI.AG1_BL_CRD_OCCUPANCY": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", }, "M3UPI.AG1_BL_CRD_OCCUPANCY.TGR5": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.AG1_BL_CRD_OCCUPANCY.TGR2": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.AG1_BL_CRD_OCCUPANCY.TGR1": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.AG1_BL_CRD_OCCUPANCY.TGR4": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.AG1_BL_CRD_OCCUPANCY.TGR3": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.AG1_BL_CRD_OCCUPANCY.TGR0": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.AG1_BL_CREDITS_ACQUIRED": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", }, "M3UPI.AG1_BL_CREDITS_ACQUIRED.TGR0": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.AG1_BL_CREDITS_ACQUIRED.TGR3": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.AG1_BL_CREDITS_ACQUIRED.TGR4": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.AG1_BL_CREDITS_ACQUIRED.TGR1": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.AG1_BL_CREDITS_ACQUIRED.TGR2": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.AG1_BL_CREDITS_ACQUIRED.TGR5": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.CHA_AD_CREDITS_EMPTY": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 34, "ExtSel": "", }, "M3UPI.CHA_AD_CREDITS_EMPTY.VNA": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 34, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.CHA_AD_CREDITS_EMPTY.WB": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 34, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.CHA_AD_CREDITS_EMPTY.REQ": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 34, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.CHA_AD_CREDITS_EMPTY.SNP": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)", "Desc": "CBox AD Credits Empty", "EvSel": 34, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.CLOCKTICKS": { "Box": "M3UPI", "Category": "M3UPI UCLK Events", "Counters": "0-3", "Defn": "Counts the number of uclks in the M3 uclk domain. This could be slightly different than the count in the Ubox because of enable/freeze delays. However, because the M3 is close to the Ubox, they generally should not diverge by more than a handful of cycles.", "Desc": "Number of uclks in domain", "EvSel": 1, "ExtSel": "", }, "M3UPI.CMS_CLOCKTICKS": { "Box": "M3UPI", "Category": "M3UPI Misc Events", "Desc": "CMS Clockticks", "EvSel": 192, "ExtSel": "", }, "M3UPI.D2C_SENT": { "Box": "M3UPI", "Category": "M3UPI Special Egress Events", "Counters": "0-3", "Defn": "Count cases BL sends direct to core", "Desc": "D2C Sent", "EvSel": 43, "ExtSel": "", }, "M3UPI.D2U_SENT": { "Box": "M3UPI", "Category": "M3UPI Special Egress Events", "Counters": "0-3", "Defn": "Cases where SMI3 sends D2U command", "Desc": "D2U Sent", "EvSel": 42, "ExtSel": "", "Notes": "NOT required anymore", }, "M3UPI.EGRESS_ORDERING": { "Box": "M3UPI", "Category": "M3UPI Horizontal In Use RING Events", "Defn": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements", "Desc": "Egress Blocking due to Ordering requirements", "EvSel": 174, "ExtSel": "", }, "M3UPI.EGRESS_ORDERING.IV_SNOOPGO_DN": { "Box": "M3UPI", "Category": "M3UPI Horizontal In Use RING Events", "Defn": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements", "Desc": "Egress Blocking due to Ordering requirements", "EvSel": 174, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.EGRESS_ORDERING.IV_SNOOPGO_UP": { "Box": "M3UPI", "Category": "M3UPI Horizontal In Use RING Events", "Defn": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements", "Desc": "Egress Blocking due to Ordering requirements", "EvSel": 174, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.FAST_ASSERTED": { "Box": "M3UPI", "Category": "M3UPI Horizontal RING Events", "Defn": "Counts the number of cycles either the local or incoming distress signals are asserted. Incoming distress includes up, dn and across.", "Desc": "FaST wire asserted", "EvSel": 165, "ExtSel": "", }, "M3UPI.FAST_ASSERTED.HORZ": { "Box": "M3UPI", "Category": "M3UPI Horizontal RING Events", "Defn": "Counts the number of cycles either the local or incoming distress signals are asserted. Incoming distress includes up, dn and across.", "Desc": "FaST wire asserted", "EvSel": 165, "ExtSel": "", "Umask": "b00000010", }, "M3UPI.FAST_ASSERTED.VERT": { "Box": "M3UPI", "Category": "M3UPI Horizontal RING Events", "Defn": "Counts the number of cycles either the local or incoming distress signals are asserted. Incoming distress includes up, dn and across.", "Desc": "FaST wire asserted", "EvSel": 165, "ExtSel": "", "Umask": "b00000001", }, "M3UPI.HORZ_RING_AD_IN_USE": { "Box": "M3UPI", "Category": "M3UPI Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AD Ring In Use", "EvSel": 167, "ExtSel": "", }, "M3UPI.HORZ_RING_AD_IN_USE.LEFT_EVEN": { "Box": "M3UPI", "Category": "M3UPI Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AD Ring In Use", "EvSel": 167, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.HORZ_RING_AD_IN_USE.RIGHT_ODD": { "Box": "M3UPI", "Category": "M3UPI Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AD Ring In Use", "EvSel": 167, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.HORZ_RING_AD_IN_USE.LEFT_ODD": { "Box": "M3UPI", "Category": "M3UPI Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AD Ring In Use", "EvSel": 167, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.HORZ_RING_AD_IN_USE.RIGHT_EVEN": { "Box": "M3UPI", "Category": "M3UPI Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AD Ring In Use", "EvSel": 167, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.HORZ_RING_AK_IN_USE": { "Box": "M3UPI", "Category": "M3UPI Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 169, "ExtSel": "", }, "M3UPI.HORZ_RING_AK_IN_USE.RIGHT_ODD": { "Box": "M3UPI", "Category": "M3UPI Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 169, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.HORZ_RING_AK_IN_USE.LEFT_EVEN": { "Box": "M3UPI", "Category": "M3UPI Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 169, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.HORZ_RING_AK_IN_USE.LEFT_ODD": { "Box": "M3UPI", "Category": "M3UPI Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 169, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.HORZ_RING_AK_IN_USE.RIGHT_EVEN": { "Box": "M3UPI", "Category": "M3UPI Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 169, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.HORZ_RING_BL_IN_USE": { "Box": "M3UPI", "Category": "M3UPI Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal BL Ring in Use", "EvSel": 171, "ExtSel": "", }, "M3UPI.HORZ_RING_BL_IN_USE.LEFT_EVEN": { "Box": "M3UPI", "Category": "M3UPI Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal BL Ring in Use", "EvSel": 171, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.HORZ_RING_BL_IN_USE.RIGHT_ODD": { "Box": "M3UPI", "Category": "M3UPI Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal BL Ring in Use", "EvSel": 171, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.HORZ_RING_BL_IN_USE.LEFT_ODD": { "Box": "M3UPI", "Category": "M3UPI Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal BL Ring in Use", "EvSel": 171, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.HORZ_RING_BL_IN_USE.RIGHT_EVEN": { "Box": "M3UPI", "Category": "M3UPI Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal BL Ring in Use", "EvSel": 171, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.HORZ_RING_IV_IN_USE": { "Box": "M3UPI", "Category": "M3UPI Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "Horizontal IV Ring in Use", "EvSel": 173, "ExtSel": "", }, "M3UPI.HORZ_RING_IV_IN_USE.RIGHT": { "Box": "M3UPI", "Category": "M3UPI Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "Horizontal IV Ring in Use", "EvSel": 173, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.HORZ_RING_IV_IN_USE.LEFT": { "Box": "M3UPI", "Category": "M3UPI Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "Horizontal IV Ring in Use", "EvSel": 173, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.M2_BL_CREDITS_EMPTY": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No vn0 and vna credits available to send to M2", "Desc": "M2 BL Credits Empty", "EvSel": 35, "ExtSel": "", }, "M3UPI.M2_BL_CREDITS_EMPTY.NCS_SEL": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No vn0 and vna credits available to send to M2", "Desc": "M2 BL Credits Empty", "EvSel": 35, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.M2_BL_CREDITS_EMPTY.IIO5_NCB": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No vn0 and vna credits available to send to M2", "Desc": "M2 BL Credits Empty", "EvSel": 35, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.M2_BL_CREDITS_EMPTY.IIO2_NCB": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No vn0 and vna credits available to send to M2", "Desc": "M2 BL Credits Empty", "EvSel": 35, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.M2_BL_CREDITS_EMPTY.IIO0_IIO1_NCB": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No vn0 and vna credits available to send to M2", "Desc": "M2 BL Credits Empty", "EvSel": 35, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.M2_BL_CREDITS_EMPTY.NCS": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No vn0 and vna credits available to send to M2", "Desc": "M2 BL Credits Empty", "EvSel": 35, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.M2_BL_CREDITS_EMPTY.IIO3_NCB": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No vn0 and vna credits available to send to M2", "Desc": "M2 BL Credits Empty", "EvSel": 35, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.M2_BL_CREDITS_EMPTY.IIO4_NCB": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No vn0 and vna credits available to send to M2", "Desc": "M2 BL Credits Empty", "EvSel": 35, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.MULTI_SLOT_RCVD": { "Box": "M3UPI", "Category": "M3UPI Special Egress Events", "Counters": "0-3", "Defn": "Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)", "Desc": "Multi Slot Flit Received", "EvSel": 62, "ExtSel": "", }, "M3UPI.MULTI_SLOT_RCVD.AD_SLOT0": { "Box": "M3UPI", "Category": "M3UPI Special Egress Events", "Counters": "0-3", "Defn": "Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)", "Desc": "Multi Slot Flit Received", "EvSel": 62, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.MULTI_SLOT_RCVD.AD_SLOT2": { "Box": "M3UPI", "Category": "M3UPI Special Egress Events", "Counters": "0-3", "Defn": "Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)", "Desc": "Multi Slot Flit Received", "EvSel": 62, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.MULTI_SLOT_RCVD.AD_SLOT1": { "Box": "M3UPI", "Category": "M3UPI Special Egress Events", "Counters": "0-3", "Defn": "Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)", "Desc": "Multi Slot Flit Received", "EvSel": 62, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.MULTI_SLOT_RCVD.BL_SLOT0": { "Box": "M3UPI", "Category": "M3UPI Special Egress Events", "Counters": "0-3", "Defn": "Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)", "Desc": "Multi Slot Flit Received", "EvSel": 62, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.MULTI_SLOT_RCVD.AK_SLOT2": { "Box": "M3UPI", "Category": "M3UPI Special Egress Events", "Counters": "0-3", "Defn": "Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)", "Desc": "Multi Slot Flit Received", "EvSel": 62, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.MULTI_SLOT_RCVD.AK_SLOT0": { "Box": "M3UPI", "Category": "M3UPI Special Egress Events", "Counters": "0-3", "Defn": "Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)", "Desc": "Multi Slot Flit Received", "EvSel": 62, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RING_BOUNCES_HORZ": { "Box": "M3UPI", "Category": "M3UPI Horizontal RING Events", "Defn": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Horizontal Ring.", "EvSel": 161, "ExtSel": "", }, "M3UPI.RING_BOUNCES_HORZ.BL": { "Box": "M3UPI", "Category": "M3UPI Horizontal RING Events", "Defn": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Horizontal Ring.", "EvSel": 161, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RING_BOUNCES_HORZ.AD": { "Box": "M3UPI", "Category": "M3UPI Horizontal RING Events", "Defn": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Horizontal Ring.", "EvSel": 161, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RING_BOUNCES_HORZ.AK": { "Box": "M3UPI", "Category": "M3UPI Horizontal RING Events", "Defn": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Horizontal Ring.", "EvSel": 161, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RING_BOUNCES_HORZ.IV": { "Box": "M3UPI", "Category": "M3UPI Horizontal RING Events", "Defn": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Horizontal Ring.", "EvSel": 161, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RING_BOUNCES_VERT": { "Box": "M3UPI", "Category": "M3UPI Vertical RING Events", "Defn": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Vertical Ring.", "EvSel": 160, "ExtSel": "", }, "M3UPI.RING_BOUNCES_VERT.IV": { "Box": "M3UPI", "Category": "M3UPI Vertical RING Events", "Defn": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Vertical Ring.", "EvSel": 160, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RING_BOUNCES_VERT.BL": { "Box": "M3UPI", "Category": "M3UPI Vertical RING Events", "Defn": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Vertical Ring.", "EvSel": 160, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RING_BOUNCES_VERT.AD": { "Box": "M3UPI", "Category": "M3UPI Vertical RING Events", "Defn": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Vertical Ring.", "EvSel": 160, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RING_BOUNCES_VERT.AK": { "Box": "M3UPI", "Category": "M3UPI Vertical RING Events", "Defn": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Vertical Ring.", "EvSel": 160, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RING_SINK_STARVED_HORZ": { "Box": "M3UPI", "Category": "M3UPI Horizontal RING Events", "Desc": "Sink Starvation on Horizontal Ring", "EvSel": 163, "ExtSel": "", }, "M3UPI.RING_SINK_STARVED_HORZ.IV": { "Box": "M3UPI", "Category": "M3UPI Horizontal RING Events", "Desc": "Sink Starvation on Horizontal Ring", "EvSel": 163, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RING_SINK_STARVED_HORZ.AK_AG1": { "Box": "M3UPI", "Category": "M3UPI Horizontal RING Events", "Desc": "Sink Starvation on Horizontal Ring", "EvSel": 163, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RING_SINK_STARVED_HORZ.BL": { "Box": "M3UPI", "Category": "M3UPI Horizontal RING Events", "Desc": "Sink Starvation on Horizontal Ring", "EvSel": 163, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RING_SINK_STARVED_HORZ.AD": { "Box": "M3UPI", "Category": "M3UPI Horizontal RING Events", "Desc": "Sink Starvation on Horizontal Ring", "EvSel": 163, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RING_SINK_STARVED_HORZ.AK": { "Box": "M3UPI", "Category": "M3UPI Horizontal RING Events", "Desc": "Sink Starvation on Horizontal Ring", "EvSel": 163, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RING_SINK_STARVED_VERT": { "Box": "M3UPI", "Category": "M3UPI Vertical RING Events", "Desc": "Sink Starvation on Vertical Ring", "EvSel": 162, "ExtSel": "", }, "M3UPI.RING_SINK_STARVED_VERT.IV": { "Box": "M3UPI", "Category": "M3UPI Vertical RING Events", "Desc": "Sink Starvation on Vertical Ring", "EvSel": 162, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RING_SINK_STARVED_VERT.AK": { "Box": "M3UPI", "Category": "M3UPI Vertical RING Events", "Desc": "Sink Starvation on Vertical Ring", "EvSel": 162, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RING_SINK_STARVED_VERT.BL": { "Box": "M3UPI", "Category": "M3UPI Vertical RING Events", "Desc": "Sink Starvation on Vertical Ring", "EvSel": 162, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RING_SINK_STARVED_VERT.AD": { "Box": "M3UPI", "Category": "M3UPI Vertical RING Events", "Desc": "Sink Starvation on Vertical Ring", "EvSel": 162, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RING_SRC_THRTL": { "Box": "M3UPI", "Category": "M3UPI Horizontal RING Events", "Desc": "Source Throttle", "EvSel": 164, "ExtSel": "", }, "M3UPI.RxC_ARB_LOST_VN0": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN0 message requested but lost arbitration", "Desc": "Lost Arb for VN0", "EvSel": 75, "ExtSel": "", }, "M3UPI.RxC_ARB_LOST_VN0.BL_NCB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN0 message requested but lost arbitration", "Desc": "Lost Arb for VN0", "EvSel": 75, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_ARB_LOST_VN0.AD_REQ": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN0 message requested but lost arbitration", "Desc": "Lost Arb for VN0", "EvSel": 75, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_ARB_LOST_VN0.AD_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN0 message requested but lost arbitration", "Desc": "Lost Arb for VN0", "EvSel": 75, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_ARB_LOST_VN0.AD_SNP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN0 message requested but lost arbitration", "Desc": "Lost Arb for VN0", "EvSel": 75, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_ARB_LOST_VN0.BL_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN0 message requested but lost arbitration", "Desc": "Lost Arb for VN0", "EvSel": 75, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_ARB_LOST_VN0.BL_WB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN0 message requested but lost arbitration", "Desc": "Lost Arb for VN0", "EvSel": 75, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_ARB_LOST_VN0.BL_NCS": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN0 message requested but lost arbitration", "Desc": "Lost Arb for VN0", "EvSel": 75, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.RxC_ARB_LOST_VN1": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN1 message requested but lost arbitration", "Desc": "Lost Arb for VN1", "EvSel": 76, "ExtSel": "", }, "M3UPI.RxC_ARB_LOST_VN1.BL_NCB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN1 message requested but lost arbitration", "Desc": "Lost Arb for VN1", "EvSel": 76, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_ARB_LOST_VN1.AD_REQ": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN1 message requested but lost arbitration", "Desc": "Lost Arb for VN1", "EvSel": 76, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_ARB_LOST_VN1.AD_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN1 message requested but lost arbitration", "Desc": "Lost Arb for VN1", "EvSel": 76, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_ARB_LOST_VN1.BL_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN1 message requested but lost arbitration", "Desc": "Lost Arb for VN1", "EvSel": 76, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_ARB_LOST_VN1.AD_SNP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN1 message requested but lost arbitration", "Desc": "Lost Arb for VN1", "EvSel": 76, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_ARB_LOST_VN1.BL_WB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN1 message requested but lost arbitration", "Desc": "Lost Arb for VN1", "EvSel": 76, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_ARB_LOST_VN1.BL_NCS": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN1 message requested but lost arbitration", "Desc": "Lost Arb for VN1", "EvSel": 76, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.RxC_ARB_MISC": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Desc": "Arb Miscellaneous", "EvSel": 77, "ExtSel": "", }, "M3UPI.RxC_ARB_MISC.NO_PROG_AD_VN0": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Desc": "Arb Miscellaneous", "EvSel": 77, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_ARB_MISC.NO_PROG_AD_VN1": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Desc": "Arb Miscellaneous", "EvSel": 77, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_ARB_MISC.NO_PROG_BL_VN0": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Desc": "Arb Miscellaneous", "EvSel": 77, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_ARB_MISC.NO_PROG_BL_VN1": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Desc": "Arb Miscellaneous", "EvSel": 77, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_ARB_MISC.PAR_BIAS_VN0": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Desc": "Arb Miscellaneous", "EvSel": 77, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_ARB_MISC.PAR_BIAS_VN1": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Desc": "Arb Miscellaneous", "EvSel": 77, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_ARB_MISC.ADBL_PARALLEL_WIN": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Desc": "Arb Miscellaneous", "EvSel": 77, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.RxC_ARB_NOAD_REQ_VN0": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN0 message was not able to request arbitration while some other message won arbitration", "Desc": "Can't Arb for VN0", "EvSel": 73, "ExtSel": "", }, "M3UPI.RxC_ARB_NOAD_REQ_VN0.BL_NCB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN0 message was not able to request arbitration while some other message won arbitration", "Desc": "Can't Arb for VN0", "EvSel": 73, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_ARB_NOAD_REQ_VN0.BL_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN0 message was not able to request arbitration while some other message won arbitration", "Desc": "Can't Arb for VN0", "EvSel": 73, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_ARB_NOAD_REQ_VN0.AD_SNP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN0 message was not able to request arbitration while some other message won arbitration", "Desc": "Can't Arb for VN0", "EvSel": 73, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_ARB_NOAD_REQ_VN0.AD_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN0 message was not able to request arbitration while some other message won arbitration", "Desc": "Can't Arb for VN0", "EvSel": 73, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_ARB_NOAD_REQ_VN0.AD_REQ": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN0 message was not able to request arbitration while some other message won arbitration", "Desc": "Can't Arb for VN0", "EvSel": 73, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_ARB_NOAD_REQ_VN0.BL_WB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN0 message was not able to request arbitration while some other message won arbitration", "Desc": "Can't Arb for VN0", "EvSel": 73, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_ARB_NOAD_REQ_VN0.BL_NCS": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN0 message was not able to request arbitration while some other message won arbitration", "Desc": "Can't Arb for VN0", "EvSel": 73, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.RxC_ARB_NOAD_REQ_VN1": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN1 message was not able to request arbitration while some other message won arbitration", "Desc": "Can't Arb for VN1", "EvSel": 74, "ExtSel": "", }, "M3UPI.RxC_ARB_NOAD_REQ_VN1.AD_REQ": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN1 message was not able to request arbitration while some other message won arbitration", "Desc": "Can't Arb for VN1", "EvSel": 74, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_ARB_NOAD_REQ_VN1.AD_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN1 message was not able to request arbitration while some other message won arbitration", "Desc": "Can't Arb for VN1", "EvSel": 74, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_ARB_NOAD_REQ_VN1.BL_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN1 message was not able to request arbitration while some other message won arbitration", "Desc": "Can't Arb for VN1", "EvSel": 74, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_ARB_NOAD_REQ_VN1.AD_SNP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN1 message was not able to request arbitration while some other message won arbitration", "Desc": "Can't Arb for VN1", "EvSel": 74, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_ARB_NOAD_REQ_VN1.BL_NCB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN1 message was not able to request arbitration while some other message won arbitration", "Desc": "Can't Arb for VN1", "EvSel": 74, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_ARB_NOAD_REQ_VN1.BL_NCS": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN1 message was not able to request arbitration while some other message won arbitration", "Desc": "Can't Arb for VN1", "EvSel": 74, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.RxC_ARB_NOAD_REQ_VN1.BL_WB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN1 message was not able to request arbitration while some other message won arbitration", "Desc": "Can't Arb for VN1", "EvSel": 74, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_ARB_NOCRED_VN0": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN0 message is blocked from requesting arbitration due to lack of remote UPI credits", "Desc": "No Credits to Arb for VN0", "EvSel": 71, "ExtSel": "", }, "M3UPI.RxC_ARB_NOCRED_VN0.BL_WB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN0 message is blocked from requesting arbitration due to lack of remote UPI credits", "Desc": "No Credits to Arb for VN0", "EvSel": 71, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_ARB_NOCRED_VN0.BL_NCS": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN0 message is blocked from requesting arbitration due to lack of remote UPI credits", "Desc": "No Credits to Arb for VN0", "EvSel": 71, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.RxC_ARB_NOCRED_VN0.BL_NCB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN0 message is blocked from requesting arbitration due to lack of remote UPI credits", "Desc": "No Credits to Arb for VN0", "EvSel": 71, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_ARB_NOCRED_VN0.BL_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN0 message is blocked from requesting arbitration due to lack of remote UPI credits", "Desc": "No Credits to Arb for VN0", "EvSel": 71, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_ARB_NOCRED_VN0.AD_SNP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN0 message is blocked from requesting arbitration due to lack of remote UPI credits", "Desc": "No Credits to Arb for VN0", "EvSel": 71, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_ARB_NOCRED_VN0.AD_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN0 message is blocked from requesting arbitration due to lack of remote UPI credits", "Desc": "No Credits to Arb for VN0", "EvSel": 71, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_ARB_NOCRED_VN0.AD_REQ": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN0 message is blocked from requesting arbitration due to lack of remote UPI credits", "Desc": "No Credits to Arb for VN0", "EvSel": 71, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_ARB_NOCRED_VN1": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN1 message is blocked from requesting arbitration due to lack of remote UPI credits", "Desc": "No Credits to Arb for VN1", "EvSel": 72, "ExtSel": "", }, "M3UPI.RxC_ARB_NOCRED_VN1.BL_WB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN1 message is blocked from requesting arbitration due to lack of remote UPI credits", "Desc": "No Credits to Arb for VN1", "EvSel": 72, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_ARB_NOCRED_VN1.BL_NCS": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN1 message is blocked from requesting arbitration due to lack of remote UPI credits", "Desc": "No Credits to Arb for VN1", "EvSel": 72, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.RxC_ARB_NOCRED_VN1.BL_NCB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN1 message is blocked from requesting arbitration due to lack of remote UPI credits", "Desc": "No Credits to Arb for VN1", "EvSel": 72, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_ARB_NOCRED_VN1.AD_SNP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN1 message is blocked from requesting arbitration due to lack of remote UPI credits", "Desc": "No Credits to Arb for VN1", "EvSel": 72, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_ARB_NOCRED_VN1.BL_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN1 message is blocked from requesting arbitration due to lack of remote UPI credits", "Desc": "No Credits to Arb for VN1", "EvSel": 72, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_ARB_NOCRED_VN1.AD_REQ": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN1 message is blocked from requesting arbitration due to lack of remote UPI credits", "Desc": "No Credits to Arb for VN1", "EvSel": 72, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_ARB_NOCRED_VN1.AD_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Arbitration Events", "Defn": "VN1 message is blocked from requesting arbitration due to lack of remote UPI credits", "Desc": "No Credits to Arb for VN1", "EvSel": 72, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_BYPASSED": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Counters": "0-2", "Defn": "Number ot times message is bypassed around the Ingress Queue", "Desc": "Ingress Queue Bypasses", "EvSel": 64, "ExtSel": "", }, "M3UPI.RxC_BYPASSED.AD_S1_BL_SLOT": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Counters": "0-2", "Defn": "Number ot times message is bypassed around the Ingress Queue", "Desc": "Ingress Queue Bypasses", "EvSel": 64, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_BYPASSED.AD_S2_BL_SLOT": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Counters": "0-2", "Defn": "Number ot times message is bypassed around the Ingress Queue", "Desc": "Ingress Queue Bypasses", "EvSel": 64, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_BYPASSED.AD_S0_BL_ARB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Counters": "0-2", "Defn": "Number ot times message is bypassed around the Ingress Queue", "Desc": "Ingress Queue Bypasses", "EvSel": 64, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_BYPASSED.AD_S0_IDLE": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Counters": "0-2", "Defn": "Number ot times message is bypassed around the Ingress Queue", "Desc": "Ingress Queue Bypasses", "EvSel": 64, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_COLLISION_VN0": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Defn": "Count cases where Ingress VN0 packets lost the contest for Flit Slot 0.", "Desc": "VN0 message lost contest for flit", "EvSel": 80, "ExtSel": "", }, "M3UPI.RxC_COLLISION_VN0.BL_WB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Defn": "Count cases where Ingress VN0 packets lost the contest for Flit Slot 0.", "Desc": "VN0 message lost contest for flit", "EvSel": 80, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_COLLISION_VN0.BL_NCS": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Defn": "Count cases where Ingress VN0 packets lost the contest for Flit Slot 0.", "Desc": "VN0 message lost contest for flit", "EvSel": 80, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.RxC_COLLISION_VN0.BL_NCB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Defn": "Count cases where Ingress VN0 packets lost the contest for Flit Slot 0.", "Desc": "VN0 message lost contest for flit", "EvSel": 80, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_COLLISION_VN0.AD_SNP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Defn": "Count cases where Ingress VN0 packets lost the contest for Flit Slot 0.", "Desc": "VN0 message lost contest for flit", "EvSel": 80, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_COLLISION_VN0.BL_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Defn": "Count cases where Ingress VN0 packets lost the contest for Flit Slot 0.", "Desc": "VN0 message lost contest for flit", "EvSel": 80, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_COLLISION_VN0.AD_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Defn": "Count cases where Ingress VN0 packets lost the contest for Flit Slot 0.", "Desc": "VN0 message lost contest for flit", "EvSel": 80, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_COLLISION_VN0.AD_REQ": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Defn": "Count cases where Ingress VN0 packets lost the contest for Flit Slot 0.", "Desc": "VN0 message lost contest for flit", "EvSel": 80, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_COLLISION_VN1": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Defn": "Count cases where Ingress VN1 packets lost the contest for Flit Slot 0.", "Desc": "VN1 message lost contest for flit", "EvSel": 81, "ExtSel": "", }, "M3UPI.RxC_COLLISION_VN1.BL_NCB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Defn": "Count cases where Ingress VN1 packets lost the contest for Flit Slot 0.", "Desc": "VN1 message lost contest for flit", "EvSel": 81, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_COLLISION_VN1.AD_SNP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Defn": "Count cases where Ingress VN1 packets lost the contest for Flit Slot 0.", "Desc": "VN1 message lost contest for flit", "EvSel": 81, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_COLLISION_VN1.BL_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Defn": "Count cases where Ingress VN1 packets lost the contest for Flit Slot 0.", "Desc": "VN1 message lost contest for flit", "EvSel": 81, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_COLLISION_VN1.AD_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Defn": "Count cases where Ingress VN1 packets lost the contest for Flit Slot 0.", "Desc": "VN1 message lost contest for flit", "EvSel": 81, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_COLLISION_VN1.AD_REQ": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Defn": "Count cases where Ingress VN1 packets lost the contest for Flit Slot 0.", "Desc": "VN1 message lost contest for flit", "EvSel": 81, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_COLLISION_VN1.BL_WB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Defn": "Count cases where Ingress VN1 packets lost the contest for Flit Slot 0.", "Desc": "VN1 message lost contest for flit", "EvSel": 81, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_COLLISION_VN1.BL_NCS": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Defn": "Count cases where Ingress VN1 packets lost the contest for Flit Slot 0.", "Desc": "VN1 message lost contest for flit", "EvSel": 81, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.RxC_CRD_MISC": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "Desc": "Miscellaneous Credit Events", "EvSel": 96, "ExtSel": "", }, "M3UPI.RxC_CRD_MISC.ANY_BGF_FIFO": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "Desc": "Miscellaneous Credit Events", "EvSel": 96, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_CRD_MISC.NO_D2K_FOR_ARB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "Desc": "Miscellaneous Credit Events", "EvSel": 96, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_CRD_MISC.ANY_BGF_PATH": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "Desc": "Miscellaneous Credit Events", "EvSel": 96, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_CRD_OCC": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "Desc": "Credit Occupancy", "EvSel": 97, "ExtSel": "", }, "M3UPI.RxC_CRD_OCC.P1P_TOTAL": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "Desc": "Credit Occupancy", "EvSel": 97, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_CRD_OCC.TxQ_CRD": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "Desc": "Credit Occupancy", "EvSel": 97, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_CRD_OCC.P1P_FIFO": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "Desc": "Credit Occupancy", "EvSel": 97, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.RxC_CRD_OCC.VNA_IN_USE": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "Desc": "Credit Occupancy", "EvSel": 97, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_CRD_OCC.D2K_CRD": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "Desc": "Credit Occupancy", "EvSel": 97, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_CRD_OCC.FLITS_IN_FIFO": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "Desc": "Credit Occupancy", "EvSel": 97, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_CRD_OCC.FLITS_IN_PATH": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "Desc": "Credit Occupancy", "EvSel": 97, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_CYCLES_NE_VN0": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN0 Ingress (from CMS) Queue - Cycles Not Empty", "EvSel": 67, "ExtSel": "", }, "M3UPI.RxC_CYCLES_NE_VN0.BL_WB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN0 Ingress (from CMS) Queue - Cycles Not Empty", "EvSel": 67, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_CYCLES_NE_VN0.BL_NCS": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN0 Ingress (from CMS) Queue - Cycles Not Empty", "EvSel": 67, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.RxC_CYCLES_NE_VN0.BL_NCB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN0 Ingress (from CMS) Queue - Cycles Not Empty", "EvSel": 67, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_CYCLES_NE_VN0.AD_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN0 Ingress (from CMS) Queue - Cycles Not Empty", "EvSel": 67, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_CYCLES_NE_VN0.AD_REQ": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN0 Ingress (from CMS) Queue - Cycles Not Empty", "EvSel": 67, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_CYCLES_NE_VN0.AD_SNP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN0 Ingress (from CMS) Queue - Cycles Not Empty", "EvSel": 67, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_CYCLES_NE_VN0.BL_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN0 Ingress (from CMS) Queue - Cycles Not Empty", "EvSel": 67, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_CYCLES_NE_VN1": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress (from CMS) Queue - Cycles Not Empty", "EvSel": 68, "ExtSel": "", }, "M3UPI.RxC_CYCLES_NE_VN1.BL_NCB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress (from CMS) Queue - Cycles Not Empty", "EvSel": 68, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_CYCLES_NE_VN1.AD_SNP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress (from CMS) Queue - Cycles Not Empty", "EvSel": 68, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_CYCLES_NE_VN1.BL_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress (from CMS) Queue - Cycles Not Empty", "EvSel": 68, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_CYCLES_NE_VN1.AD_REQ": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress (from CMS) Queue - Cycles Not Empty", "EvSel": 68, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_CYCLES_NE_VN1.AD_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress (from CMS) Queue - Cycles Not Empty", "EvSel": 68, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_CYCLES_NE_VN1.BL_WB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress (from CMS) Queue - Cycles Not Empty", "EvSel": 68, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_CYCLES_NE_VN1.BL_NCS": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress (from CMS) Queue - Cycles Not Empty", "EvSel": 68, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.RxC_FLITS_DATA_NOT_SENT": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "Data flit is ready for transmission but could not be sent", "Desc": "Data Flit Not Sent", "EvSel": 87, "ExtSel": "", }, "M3UPI.RxC_FLITS_DATA_NOT_SENT.NO_BGF": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "Data flit is ready for transmission but could not be sent", "Desc": "Data Flit Not Sent", "EvSel": 87, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_FLITS_DATA_NOT_SENT.NO_TXQ": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "Data flit is ready for transmission but could not be sent", "Desc": "Data Flit Not Sent", "EvSel": 87, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_FLITS_DATA_NOT_SENT.ALL": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "Data flit is ready for transmission but could not be sent", "Desc": "Data Flit Not Sent", "EvSel": 87, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_FLITS_GEN_BL": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Desc": "Generating BL Data Flit Sequence", "EvSel": 89, "ExtSel": "", }, "M3UPI.RxC_FLITS_GEN_BL.P1P_BUSY": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Desc": "Generating BL Data Flit Sequence", "EvSel": 89, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_FLITS_GEN_BL.P1_WAIT": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Desc": "Generating BL Data Flit Sequence", "EvSel": 89, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_FLITS_GEN_BL.P1P_HOLD_P0": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Desc": "Generating BL Data Flit Sequence", "EvSel": 89, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_FLITS_GEN_BL.P0_WAIT": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Desc": "Generating BL Data Flit Sequence", "EvSel": 89, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_FLITS_GEN_BL.P1P_AT_LIMIT": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Desc": "Generating BL Data Flit Sequence", "EvSel": 89, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_FLITS_GEN_BL.P1P_FIFO_FULL": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Desc": "Generating BL Data Flit Sequence", "EvSel": 89, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.RxC_FLITS_GEN_BL.P1P_TO_LIMBO": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Desc": "Generating BL Data Flit Sequence", "EvSel": 89, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_FLITS_MISC": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "EvSel": 90, "ExtSel": "", }, "M3UPI.RxC_FLITS_SENT": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Desc": "Sent Header Flit", "EvSel": 86, "ExtSel": "", }, "M3UPI.RxC_FLITS_SENT.3_MSGS": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Desc": "Sent Header Flit", "EvSel": 86, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_FLITS_SENT.2_MSGS": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Desc": "Sent Header Flit", "EvSel": 86, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_FLITS_SENT.1_MSG": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Desc": "Sent Header Flit", "EvSel": 86, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_FLITS_SENT.SLOTS_2": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Desc": "Sent Header Flit", "EvSel": 86, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_FLITS_SENT.1_MSG_VNX": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Desc": "Sent Header Flit", "EvSel": 86, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_FLITS_SENT.SLOTS_1": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Desc": "Sent Header Flit", "EvSel": 86, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_FLITS_SENT.SLOTS_3": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Desc": "Sent Header Flit", "EvSel": 86, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.RxC_FLITS_SLOT_BL": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Desc": "Slotting BL Message Into Header Flit", "EvSel": 88, "ExtSel": "", }, "M3UPI.RxC_FLITS_SLOT_BL.P0_WAIT": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Desc": "Slotting BL Message Into Header Flit", "EvSel": 88, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_FLITS_SLOT_BL.P1_NOT_REQ_NOT_AVAIL": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Desc": "Slotting BL Message Into Header Flit", "EvSel": 88, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.RxC_FLITS_SLOT_BL.P1_NOT_REQ": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Desc": "Slotting BL Message Into Header Flit", "EvSel": 88, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_FLITS_SLOT_BL.P1_NOT_REQ_BUT_BUBBLE": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Desc": "Slotting BL Message Into Header Flit", "EvSel": 88, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_FLITS_SLOT_BL.P1_WAIT": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Desc": "Slotting BL Message Into Header Flit", "EvSel": 88, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_FLITS_SLOT_BL.ALL": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Desc": "Slotting BL Message Into Header Flit", "EvSel": 88, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_FLITS_SLOT_BL.NEED_DATA": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Desc": "Slotting BL Message Into Header Flit", "EvSel": 88, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_FLIT_GEN_HDR1": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "Events related to Header Flit Generation - Set 1", "Desc": "Flit Gen - Header 1", "EvSel": 83, "ExtSel": "", }, "M3UPI.RxC_FLIT_GEN_HDR1.AHEAD_BLOCKED": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "Events related to Header Flit Generation - Set 1", "Desc": "Flit Gen - Header 1", "EvSel": 83, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_FLIT_GEN_HDR1.PAR_MSG": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "Events related to Header Flit Generation - Set 1", "Desc": "Flit Gen - Header 1", "EvSel": 83, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.RxC_FLIT_GEN_HDR1.PAR_FLIT": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "Events related to Header Flit Generation - Set 1", "Desc": "Flit Gen - Header 1", "EvSel": 83, "ExtSel": "", "Umask": "b1xxxxxxx", }, "M3UPI.RxC_FLIT_GEN_HDR1.AHEAD_MSG": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "Events related to Header Flit Generation - Set 1", "Desc": "Flit Gen - Header 1", "EvSel": 83, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_FLIT_GEN_HDR1.PAR": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "Events related to Header Flit Generation - Set 1", "Desc": "Flit Gen - Header 1", "EvSel": 83, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_FLIT_GEN_HDR1.ACCUM_WASTED": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "Events related to Header Flit Generation - Set 1", "Desc": "Flit Gen - Header 1", "EvSel": 83, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_FLIT_GEN_HDR1.ACCUM_READ": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "Events related to Header Flit Generation - Set 1", "Desc": "Flit Gen - Header 1", "EvSel": 83, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_FLIT_GEN_HDR1.ACCUM": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "Events related to Header Flit Generation - Set 1", "Desc": "Flit Gen - Header 1", "EvSel": 83, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_FLIT_GEN_HDR2": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "Events related to Header Flit Generation - Set 2", "Desc": "Flit Gen - Header 2", "EvSel": 84, "ExtSel": "", }, "M3UPI.RxC_FLIT_GEN_HDR2.RMSTALL": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "Events related to Header Flit Generation - Set 2", "Desc": "Flit Gen - Header 2", "EvSel": 84, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_FLIT_GEN_HDR2.RMSTALL_NOMSG": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "Events related to Header Flit Generation - Set 2", "Desc": "Flit Gen - Header 2", "EvSel": 84, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_FLIT_NOT_SENT": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "header flit is ready for transmission but could not be sent", "Desc": "Header Not Sent", "EvSel": 85, "ExtSel": "", }, "M3UPI.RxC_FLIT_NOT_SENT.NO_TXQ_CRD": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "header flit is ready for transmission but could not be sent", "Desc": "Header Not Sent", "EvSel": 85, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_FLIT_NOT_SENT.THREE_TAKEN": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "header flit is ready for transmission but could not be sent", "Desc": "Header Not Sent", "EvSel": 85, "ExtSel": "", "Umask": "b1xxxxxxx", }, "M3UPI.RxC_FLIT_NOT_SENT.TWO_TAKEN": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "header flit is ready for transmission but could not be sent", "Desc": "Header Not Sent", "EvSel": 85, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.RxC_FLIT_NOT_SENT.ONE_TAKEN": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "header flit is ready for transmission but could not be sent", "Desc": "Header Not Sent", "EvSel": 85, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_FLIT_NOT_SENT.ALL": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "header flit is ready for transmission but could not be sent", "Desc": "Header Not Sent", "EvSel": 85, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_FLIT_NOT_SENT.NO_TXQ_NO_MSG": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "header flit is ready for transmission but could not be sent", "Desc": "Header Not Sent", "EvSel": 85, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_FLIT_NOT_SENT.NO_BGF_CRD": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "header flit is ready for transmission but could not be sent", "Desc": "Header Not Sent", "EvSel": 85, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_FLIT_NOT_SENT.NO_BGF_NO_MSG": { "Box": "M3UPI", "Category": "M3UPI INGRESS Flit Events", "Defn": "header flit is ready for transmission but could not be sent", "Desc": "Header Not Sent", "EvSel": 85, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_HELD": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Desc": "Message Held", "EvSel": 82, "ExtSel": "", }, "M3UPI.RxC_HELD.VN1": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Desc": "Message Held", "EvSel": 82, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_HELD.CANT_SLOT_BL": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Desc": "Message Held", "EvSel": 82, "ExtSel": "", "Umask": "b1xxxxxxx", }, "M3UPI.RxC_HELD.PARALLEL_AD_LOST": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Desc": "Message Held", "EvSel": 82, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_HELD.PARALLEL_ATTEMPT": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Desc": "Message Held", "EvSel": 82, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_HELD.CANT_SLOT_AD": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Desc": "Message Held", "EvSel": 82, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.RxC_HELD.PARALLEL_SUCCESS": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Desc": "Message Held", "EvSel": 82, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_HELD.PARALLEL_BL_LOST": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Desc": "Message Held", "EvSel": 82, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_HELD.VN0": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Desc": "Message Held", "EvSel": 82, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_INSERTS_VN0": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN0 Ingress (from CMS) Queue - Inserts", "EvSel": 65, "ExtSel": "", }, "M3UPI.RxC_INSERTS_VN0.BL_NCB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN0 Ingress (from CMS) Queue - Inserts", "EvSel": 65, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_INSERTS_VN0.AD_REQ": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN0 Ingress (from CMS) Queue - Inserts", "EvSel": 65, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_INSERTS_VN0.AD_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN0 Ingress (from CMS) Queue - Inserts", "EvSel": 65, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_INSERTS_VN0.BL_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN0 Ingress (from CMS) Queue - Inserts", "EvSel": 65, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_INSERTS_VN0.AD_SNP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN0 Ingress (from CMS) Queue - Inserts", "EvSel": 65, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_INSERTS_VN0.BL_WB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN0 Ingress (from CMS) Queue - Inserts", "EvSel": 65, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_INSERTS_VN0.BL_NCS": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN0 Ingress (from CMS) Queue - Inserts", "EvSel": 65, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.RxC_INSERTS_VN1": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress (from CMS) Queue - Inserts", "EvSel": 66, "ExtSel": "", }, "M3UPI.RxC_INSERTS_VN1.BL_NCS": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress (from CMS) Queue - Inserts", "EvSel": 66, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.RxC_INSERTS_VN1.BL_WB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress (from CMS) Queue - Inserts", "EvSel": 66, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_INSERTS_VN1.AD_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress (from CMS) Queue - Inserts", "EvSel": 66, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_INSERTS_VN1.AD_REQ": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress (from CMS) Queue - Inserts", "EvSel": 66, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_INSERTS_VN1.BL_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress (from CMS) Queue - Inserts", "EvSel": 66, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_INSERTS_VN1.AD_SNP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress (from CMS) Queue - Inserts", "EvSel": 66, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_INSERTS_VN1.BL_NCB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.", "Desc": "VN1 Ingress (from CMS) Queue - Inserts", "EvSel": 66, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_OCCUPANCY_VN0": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.", "Desc": "VN0 Ingress (from CMS) Queue - Occupancy", "EvSel": 69, "ExtSel": "", }, "M3UPI.RxC_OCCUPANCY_VN0.BL_NCB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.", "Desc": "VN0 Ingress (from CMS) Queue - Occupancy", "EvSel": 69, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_OCCUPANCY_VN0.AD_REQ": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.", "Desc": "VN0 Ingress (from CMS) Queue - Occupancy", "EvSel": 69, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_OCCUPANCY_VN0.AD_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.", "Desc": "VN0 Ingress (from CMS) Queue - Occupancy", "EvSel": 69, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_OCCUPANCY_VN0.BL_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.", "Desc": "VN0 Ingress (from CMS) Queue - Occupancy", "EvSel": 69, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_OCCUPANCY_VN0.AD_SNP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.", "Desc": "VN0 Ingress (from CMS) Queue - Occupancy", "EvSel": 69, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_OCCUPANCY_VN0.BL_WB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.", "Desc": "VN0 Ingress (from CMS) Queue - Occupancy", "EvSel": 69, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_OCCUPANCY_VN0.BL_NCS": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.", "Desc": "VN0 Ingress (from CMS) Queue - Occupancy", "EvSel": 69, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.RxC_OCCUPANCY_VN1": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.", "Desc": "VN1 Ingress (from CMS) Queue - Occupancy", "EvSel": 70, "ExtSel": "", }, "M3UPI.RxC_OCCUPANCY_VN1.AD_SNP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.", "Desc": "VN1 Ingress (from CMS) Queue - Occupancy", "EvSel": 70, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_OCCUPANCY_VN1.BL_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.", "Desc": "VN1 Ingress (from CMS) Queue - Occupancy", "EvSel": 70, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_OCCUPANCY_VN1.AD_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.", "Desc": "VN1 Ingress (from CMS) Queue - Occupancy", "EvSel": 70, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_OCCUPANCY_VN1.AD_REQ": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.", "Desc": "VN1 Ingress (from CMS) Queue - Occupancy", "EvSel": 70, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_OCCUPANCY_VN1.BL_NCB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.", "Desc": "VN1 Ingress (from CMS) Queue - Occupancy", "EvSel": 70, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_OCCUPANCY_VN1.BL_NCS": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.", "Desc": "VN1 Ingress (from CMS) Queue - Occupancy", "EvSel": 70, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.RxC_OCCUPANCY_VN1.BL_WB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Events", "Defn": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.", "Desc": "VN1 Ingress (from CMS) Queue - Occupancy", "EvSel": 70, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_PACKING_MISS_VN0": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Defn": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.", "Desc": "VN0 message can't slot into flit", "EvSel": 78, "ExtSel": "", }, "M3UPI.RxC_PACKING_MISS_VN0.BL_WB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Defn": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.", "Desc": "VN0 message can't slot into flit", "EvSel": 78, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_PACKING_MISS_VN0.BL_NCS": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Defn": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.", "Desc": "VN0 message can't slot into flit", "EvSel": 78, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.RxC_PACKING_MISS_VN0.BL_NCB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Defn": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.", "Desc": "VN0 message can't slot into flit", "EvSel": 78, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_PACKING_MISS_VN0.AD_REQ": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Defn": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.", "Desc": "VN0 message can't slot into flit", "EvSel": 78, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_PACKING_MISS_VN0.AD_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Defn": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.", "Desc": "VN0 message can't slot into flit", "EvSel": 78, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_PACKING_MISS_VN0.AD_SNP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Defn": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.", "Desc": "VN0 message can't slot into flit", "EvSel": 78, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_PACKING_MISS_VN0.BL_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Defn": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.", "Desc": "VN0 message can't slot into flit", "EvSel": 78, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_PACKING_MISS_VN1": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Defn": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.", "Desc": "VN1 message can't slot into flit", "EvSel": 79, "ExtSel": "", }, "M3UPI.RxC_PACKING_MISS_VN1.BL_WB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Defn": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.", "Desc": "VN1 message can't slot into flit", "EvSel": 79, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_PACKING_MISS_VN1.BL_NCS": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Defn": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.", "Desc": "VN1 message can't slot into flit", "EvSel": 79, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.RxC_PACKING_MISS_VN1.BL_NCB": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Defn": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.", "Desc": "VN1 message can't slot into flit", "EvSel": 79, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_PACKING_MISS_VN1.AD_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Defn": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.", "Desc": "VN1 message can't slot into flit", "EvSel": 79, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_PACKING_MISS_VN1.AD_REQ": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Defn": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.", "Desc": "VN1 message can't slot into flit", "EvSel": 79, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_PACKING_MISS_VN1.AD_SNP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Defn": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.", "Desc": "VN1 message can't slot into flit", "EvSel": 79, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_PACKING_MISS_VN1.BL_RSP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Sloting Events", "Counters": "0-2", "Defn": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.", "Desc": "VN1 message can't slot into flit", "EvSel": 79, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_SMI3_PFTCH": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "Desc": "SMI3 Prefetch Messages", "EvSel": 98, "ExtSel": "", }, "M3UPI.RxC_SMI3_PFTCH.ARB_LOST": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "Desc": "SMI3 Prefetch Messages", "EvSel": 98, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_SMI3_PFTCH.DROP_OLD": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "Desc": "SMI3 Prefetch Messages", "EvSel": 98, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_SMI3_PFTCH.SLOTTED": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "Desc": "SMI3 Prefetch Messages", "EvSel": 98, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxC_SMI3_PFTCH.DROP_WRAP": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "Desc": "SMI3 Prefetch Messages", "EvSel": 98, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_SMI3_PFTCH.ARRIVED": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "Desc": "SMI3 Prefetch Messages", "EvSel": 98, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_VNA_CRD": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "Desc": "Remote VNA Credits", "EvSel": 91, "ExtSel": "", }, "M3UPI.RxC_VNA_CRD.LT4": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "Desc": "Remote VNA Credits", "EvSel": 91, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.RxC_VNA_CRD.USED": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "Desc": "Remote VNA Credits", "EvSel": 91, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.RxC_VNA_CRD.LT5": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "Desc": "Remote VNA Credits", "EvSel": 91, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.RxC_VNA_CRD.ANY_IN_USE": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "Desc": "Remote VNA Credits", "EvSel": 91, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.RxC_VNA_CRD.CORRECTED": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "Desc": "Remote VNA Credits", "EvSel": 91, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.RxC_VNA_CRD.LT1": { "Box": "M3UPI", "Category": "M3UPI INGRESS Credit Events", "Desc": "Remote VNA Credits", "EvSel": 91, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.RxR_BUSY_STARVED": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority", "Desc": "Transgress Injection Starvation", "EvSel": 180, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. Selecting multiple ring types NOT supported", }, "M3UPI.RxR_BUSY_STARVED.AD_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority", "Desc": "Transgress Injection Starvation", "EvSel": 180, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. Selecting multiple ring types NOT supported", "Umask": "bxxxxxxx1", }, "M3UPI.RxR_BUSY_STARVED.AD_CRD": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority", "Desc": "Transgress Injection Starvation", "EvSel": 180, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. Selecting multiple ring types NOT supported", "Umask": "bxxx1xxxx", }, "M3UPI.RxR_BUSY_STARVED.BL_CRD": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority", "Desc": "Transgress Injection Starvation", "EvSel": 180, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. Selecting multiple ring types NOT supported", "Umask": "bx1xxxxxx", }, "M3UPI.RxR_BUSY_STARVED.BL_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority", "Desc": "Transgress Injection Starvation", "EvSel": 180, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. Selecting multiple ring types NOT supported", "Umask": "bxxxxx1xx", }, "M3UPI.RxR_BYPASS": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 178, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", }, "M3UPI.RxR_BYPASS.AD_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 178, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bxxxxxxx1", }, "M3UPI.RxR_BYPASS.AK_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 178, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bxxxxxx1x", }, "M3UPI.RxR_BYPASS.BL_CRD": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 178, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bx1xxxxxx", }, "M3UPI.RxR_BYPASS.IV_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 178, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bxxxx1xxx", }, "M3UPI.RxR_BYPASS.AD_CRD": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 178, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bxxx1xxxx", }, "M3UPI.RxR_BYPASS.BL_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 178, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bxxxxx1xx", }, "M3UPI.RxR_CRD_STARVED": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 179, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", }, "M3UPI.RxR_CRD_STARVED.AK_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 179, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "bxxxxxx1x", }, "M3UPI.RxR_CRD_STARVED.AD_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 179, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "bxxxxxxx1", }, "M3UPI.RxR_CRD_STARVED.AD_CRD": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 179, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "bxxx1xxxx", }, "M3UPI.RxR_CRD_STARVED.IV_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 179, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "bxxxx1xxx", }, "M3UPI.RxR_CRD_STARVED.BL_CRD": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 179, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "bx1xxxxxx", }, "M3UPI.RxR_CRD_STARVED.IFV": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 179, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "b1xxxxxxx", }, "M3UPI.RxR_CRD_STARVED.BL_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 179, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "bxxxxx1xx", }, "M3UPI.RxR_INSERTS": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 177, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", }, "M3UPI.RxR_INSERTS.BL_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 177, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bxxxxx1xx", }, "M3UPI.RxR_INSERTS.AD_CRD": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 177, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bxxx1xxxx", }, "M3UPI.RxR_INSERTS.BL_CRD": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 177, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bx1xxxxxx", }, "M3UPI.RxR_INSERTS.IV_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 177, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bxxxx1xxx", }, "M3UPI.RxR_INSERTS.AD_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 177, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bxxxxxxx1", }, "M3UPI.RxR_INSERTS.AK_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 177, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bxxxxxx1x", }, "M3UPI.RxR_OCCUPANCY": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 176, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", }, "M3UPI.RxR_OCCUPANCY.BL_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 176, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bxxxxx1xx", }, "M3UPI.RxR_OCCUPANCY.AK_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 176, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bxxxxxx1x", }, "M3UPI.RxR_OCCUPANCY.AD_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 176, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bxxxxxxx1", }, "M3UPI.RxR_OCCUPANCY.BL_CRD": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 176, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bx1xxxxxx", }, "M3UPI.RxR_OCCUPANCY.IV_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 176, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bxxxx1xxx", }, "M3UPI.RxR_OCCUPANCY.AD_CRD": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 176, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bxxx1xxxx", }, "M3UPI.STALL_NO_TxR_HORZ_CRD_AD_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", }, "M3UPI.STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR5": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR2": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR1": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR3": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR4": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR0": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.STALL_NO_TxR_HORZ_CRD_AD_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", }, "M3UPI.STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR5": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR2": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR1": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR4": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR3": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR0": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.STALL_NO_TxR_HORZ_CRD_BL_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", }, "M3UPI.STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR5": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR2": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR1": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR3": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR4": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR0": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.STALL_NO_TxR_HORZ_CRD_BL_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", }, "M3UPI.STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR5": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR2": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR1": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR3": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR4": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR0": { "Box": "M3UPI", "Category": "M3UPI CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.TxC_AD_ARB_FAIL": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "AD arb but no win; arb request asserted but not won", "Desc": "Failed ARB for AD", "EvSel": 48, "ExtSel": "", }, "M3UPI.TxC_AD_ARB_FAIL.VN1_WB": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "AD arb but no win; arb request asserted but not won", "Desc": "Failed ARB for AD", "EvSel": 48, "ExtSel": "", "Umask": "b1xxxxxxx", }, "M3UPI.TxC_AD_ARB_FAIL.VN1_SNP": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "AD arb but no win; arb request asserted but not won", "Desc": "Failed ARB for AD", "EvSel": 48, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.TxC_AD_ARB_FAIL.VN0_WB": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "AD arb but no win; arb request asserted but not won", "Desc": "Failed ARB for AD", "EvSel": 48, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.TxC_AD_ARB_FAIL.VN1_REQ": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "AD arb but no win; arb request asserted but not won", "Desc": "Failed ARB for AD", "EvSel": 48, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.TxC_AD_ARB_FAIL.VN0_REQ": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "AD arb but no win; arb request asserted but not won", "Desc": "Failed ARB for AD", "EvSel": 48, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.TxC_AD_ARB_FAIL.VN0_SNP": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "AD arb but no win; arb request asserted but not won", "Desc": "Failed ARB for AD", "EvSel": 48, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.TxC_AD_ARB_FAIL.VN0_RSP": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "AD arb but no win; arb request asserted but not won", "Desc": "Failed ARB for AD", "EvSel": 48, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.TxC_AD_ARB_FAIL.VN1_RSP": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "AD arb but no win; arb request asserted but not won", "Desc": "Failed ARB for AD", "EvSel": 48, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.TxC_AD_FLQ_BYPASS": { "Box": "M3UPI", "Category": "M3UPI Special Egress Events", "Counters": "0-3", "Defn": "Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)", "Desc": "AD FlowQ Bypass", "EvSel": 44, "ExtSel": "", }, "M3UPI.TxC_AD_FLQ_BYPASS.AD_SLOT2": { "Box": "M3UPI", "Category": "M3UPI Special Egress Events", "Counters": "0-3", "Defn": "Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)", "Desc": "AD FlowQ Bypass", "EvSel": 44, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.TxC_AD_FLQ_BYPASS.AD_SLOT1": { "Box": "M3UPI", "Category": "M3UPI Special Egress Events", "Counters": "0-3", "Defn": "Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)", "Desc": "AD FlowQ Bypass", "EvSel": 44, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.TxC_AD_FLQ_BYPASS.AD_SLOT0": { "Box": "M3UPI", "Category": "M3UPI Special Egress Events", "Counters": "0-3", "Defn": "Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)", "Desc": "AD FlowQ Bypass", "EvSel": 44, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.TxC_AD_FLQ_BYPASS.BL_EARLY_RSP": { "Box": "M3UPI", "Category": "M3UPI Special Egress Events", "Counters": "0-3", "Defn": "Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)", "Desc": "AD FlowQ Bypass", "EvSel": 44, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.TxC_AD_FLQ_CYCLES_NE": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Number of cycles the AD Egress queue is Not Empty", "Desc": "AD Flow Q Not Empty", "EvSel": 39, "ExtSel": "", "Notes": "Counts the number of cycles when the QPI FlowQ is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue occupancy. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", }, "M3UPI.TxC_AD_FLQ_CYCLES_NE.VN0_REQ": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Number of cycles the AD Egress queue is Not Empty", "Desc": "AD Flow Q Not Empty", "EvSel": 39, "ExtSel": "", "Notes": "Counts the number of cycles when the QPI FlowQ is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue occupancy. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Umask": "bxxxxxxx1", }, "M3UPI.TxC_AD_FLQ_CYCLES_NE.VN0_SNP": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Number of cycles the AD Egress queue is Not Empty", "Desc": "AD Flow Q Not Empty", "EvSel": 39, "ExtSel": "", "Notes": "Counts the number of cycles when the QPI FlowQ is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue occupancy. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Umask": "bxxxxxx1x", }, "M3UPI.TxC_AD_FLQ_CYCLES_NE.VN0_RSP": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Number of cycles the AD Egress queue is Not Empty", "Desc": "AD Flow Q Not Empty", "EvSel": 39, "ExtSel": "", "Notes": "Counts the number of cycles when the QPI FlowQ is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue occupancy. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Umask": "bxxxxx1xx", }, "M3UPI.TxC_AD_FLQ_CYCLES_NE.VN1_RSP": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Number of cycles the AD Egress queue is Not Empty", "Desc": "AD Flow Q Not Empty", "EvSel": 39, "ExtSel": "", "Notes": "Counts the number of cycles when the QPI FlowQ is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue occupancy. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Umask": "bx1xxxxxx", }, "M3UPI.TxC_AD_FLQ_CYCLES_NE.VN0_WB": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Number of cycles the AD Egress queue is Not Empty", "Desc": "AD Flow Q Not Empty", "EvSel": 39, "ExtSel": "", "Notes": "Counts the number of cycles when the QPI FlowQ is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue occupancy. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Umask": "bxxxx1xxx", }, "M3UPI.TxC_AD_FLQ_CYCLES_NE.VN1_REQ": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Number of cycles the AD Egress queue is Not Empty", "Desc": "AD Flow Q Not Empty", "EvSel": 39, "ExtSel": "", "Notes": "Counts the number of cycles when the QPI FlowQ is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue occupancy. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Umask": "bxxx1xxxx", }, "M3UPI.TxC_AD_FLQ_CYCLES_NE.VN1_WB": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Number of cycles the AD Egress queue is Not Empty", "Desc": "AD Flow Q Not Empty", "EvSel": 39, "ExtSel": "", "Notes": "Counts the number of cycles when the QPI FlowQ is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue occupancy. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Umask": "b1xxxxxxx", }, "M3UPI.TxC_AD_FLQ_CYCLES_NE.VN1_SNP": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Number of cycles the AD Egress queue is Not Empty", "Desc": "AD Flow Q Not Empty", "EvSel": 39, "ExtSel": "", "Notes": "Counts the number of cycles when the QPI FlowQ is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue occupancy. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Umask": "bxx1xxxxx", }, "M3UPI.TxC_AD_FLQ_INSERTS": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "AD Flow Q Inserts", "EvSel": 45, "ExtSel": "", }, "M3UPI.TxC_AD_FLQ_INSERTS.VN0_RSP": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "AD Flow Q Inserts", "EvSel": 45, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.TxC_AD_FLQ_INSERTS.VN1_SNP": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "AD Flow Q Inserts", "EvSel": 45, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.TxC_AD_FLQ_INSERTS.VN1_RSP": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "AD Flow Q Inserts", "EvSel": 45, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.TxC_AD_FLQ_INSERTS.VN0_REQ": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "AD Flow Q Inserts", "EvSel": 45, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.TxC_AD_FLQ_INSERTS.VN0_SNP": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "AD Flow Q Inserts", "EvSel": 45, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.TxC_AD_FLQ_INSERTS.VN1_REQ": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "AD Flow Q Inserts", "EvSel": 45, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.TxC_AD_FLQ_INSERTS.VN0_WB": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "AD Flow Q Inserts", "EvSel": 45, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.TxC_AD_FLQ_OCCUPANCY": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": 0, "Desc": "AD Flow Q Occupancy", "EvSel": 28, "ExtSel": "", }, "M3UPI.TxC_AD_FLQ_OCCUPANCY.VN1_REQ": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": 0, "Desc": "AD Flow Q Occupancy", "EvSel": 28, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.TxC_AD_FLQ_OCCUPANCY.VN0_WB": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": 0, "Desc": "AD Flow Q Occupancy", "EvSel": 28, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.TxC_AD_FLQ_OCCUPANCY.VN1_SNP": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": 0, "Desc": "AD Flow Q Occupancy", "EvSel": 28, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.TxC_AD_FLQ_OCCUPANCY.VN0_RSP": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": 0, "Desc": "AD Flow Q Occupancy", "EvSel": 28, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.TxC_AD_FLQ_OCCUPANCY.VN1_RSP": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": 0, "Desc": "AD Flow Q Occupancy", "EvSel": 28, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.TxC_AD_FLQ_OCCUPANCY.VN0_REQ": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": 0, "Desc": "AD Flow Q Occupancy", "EvSel": 28, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.TxC_AD_FLQ_OCCUPANCY.VN0_SNP": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": 0, "Desc": "AD Flow Q Occupancy", "EvSel": 28, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.TxC_AD_SNPF_GRP1_VN1": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Counters": 0, "Defn": "Number of snpfanout targets and non-idle cycles can be used to calculate average snpfanout latency", "Desc": "Number of Snoop Targets", "EvSel": 60, "ExtSel": "", }, "M3UPI.TxC_AD_SNPF_GRP1_VN1.VN0_PEER_UPI0": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Counters": 0, "Defn": "Number of snpfanout targets and non-idle cycles can be used to calculate average snpfanout latency", "Desc": "Number of Snoop Targets", "EvSel": 60, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.TxC_AD_SNPF_GRP1_VN1.VN1_PEER_UPI0": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Counters": 0, "Defn": "Number of snpfanout targets and non-idle cycles can be used to calculate average snpfanout latency", "Desc": "Number of Snoop Targets", "EvSel": 60, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.TxC_AD_SNPF_GRP1_VN1.VN1_NON_IDLE": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Counters": 0, "Defn": "Number of snpfanout targets and non-idle cycles can be used to calculate average snpfanout latency", "Desc": "Number of Snoop Targets", "EvSel": 60, "ExtSel": "", "Umask": "b1xxxxxxx", }, "M3UPI.TxC_AD_SNPF_GRP1_VN1.VN0_CHA": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Counters": 0, "Defn": "Number of snpfanout targets and non-idle cycles can be used to calculate average snpfanout latency", "Desc": "Number of Snoop Targets", "EvSel": 60, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.TxC_AD_SNPF_GRP1_VN1.VN0_NON_IDLE": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Counters": 0, "Defn": "Number of snpfanout targets and non-idle cycles can be used to calculate average snpfanout latency", "Desc": "Number of Snoop Targets", "EvSel": 60, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.TxC_AD_SNPF_GRP1_VN1.VN0_PEER_UPI1": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Counters": 0, "Defn": "Number of snpfanout targets and non-idle cycles can be used to calculate average snpfanout latency", "Desc": "Number of Snoop Targets", "EvSel": 60, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.TxC_AD_SNPF_GRP1_VN1.VN1_PEER_UPI1": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Counters": 0, "Defn": "Number of snpfanout targets and non-idle cycles can be used to calculate average snpfanout latency", "Desc": "Number of Snoop Targets", "EvSel": 60, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.TxC_AD_SNPF_GRP1_VN1.VN1_CHA": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Counters": 0, "Defn": "Number of snpfanout targets and non-idle cycles can be used to calculate average snpfanout latency", "Desc": "Number of Snoop Targets", "EvSel": 60, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.TxC_AD_SNPF_GRP2_VN1": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "Outcome of SnpF pending arbitration", "Desc": "Snoop Arbitration", "EvSel": 61, "ExtSel": "", }, "M3UPI.TxC_AD_SNPF_GRP2_VN1.VN1_SNPFP_VN0SNP": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "Outcome of SnpF pending arbitration", "Desc": "Snoop Arbitration", "EvSel": 61, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.TxC_AD_SNPF_GRP2_VN1.VN0_SNPFP_VN2SNP": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "Outcome of SnpF pending arbitration", "Desc": "Snoop Arbitration", "EvSel": 61, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.TxC_AD_SNPF_GRP2_VN1.VN0_SNPFP_NONSNP": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "Outcome of SnpF pending arbitration", "Desc": "Snoop Arbitration", "EvSel": 61, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.TxC_AD_SNPF_GRP2_VN1.VN1_SNPFP_NONSNP": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "Outcome of SnpF pending arbitration", "Desc": "Snoop Arbitration", "EvSel": 61, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.TxC_AD_SPEC_ARB_CRD_AVAIL": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "AD speculative arb request with prior cycle credit check complete and credit avail", "Desc": "Speculative ARB for AD - Credit Available", "EvSel": 52, "ExtSel": "", }, "M3UPI.TxC_AD_SPEC_ARB_CRD_AVAIL.VN1_SNP": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "AD speculative arb request with prior cycle credit check complete and credit avail", "Desc": "Speculative ARB for AD - Credit Available", "EvSel": 52, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.TxC_AD_SPEC_ARB_CRD_AVAIL.VN0_SNP": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "AD speculative arb request with prior cycle credit check complete and credit avail", "Desc": "Speculative ARB for AD - Credit Available", "EvSel": 52, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.TxC_AD_SPEC_ARB_CRD_AVAIL.VN1_WB": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "AD speculative arb request with prior cycle credit check complete and credit avail", "Desc": "Speculative ARB for AD - Credit Available", "EvSel": 52, "ExtSel": "", "Umask": "b1xxxxxxx", }, "M3UPI.TxC_AD_SPEC_ARB_CRD_AVAIL.VN0_REQ": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "AD speculative arb request with prior cycle credit check complete and credit avail", "Desc": "Speculative ARB for AD - Credit Available", "EvSel": 52, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.TxC_AD_SPEC_ARB_CRD_AVAIL.VN1_REQ": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "AD speculative arb request with prior cycle credit check complete and credit avail", "Desc": "Speculative ARB for AD - Credit Available", "EvSel": 52, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.TxC_AD_SPEC_ARB_CRD_AVAIL.VN0_WB": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "AD speculative arb request with prior cycle credit check complete and credit avail", "Desc": "Speculative ARB for AD - Credit Available", "EvSel": 52, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.TxC_AD_SPEC_ARB_NEW_MSG": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "AD speculative arb request due to new message arriving on a specific channel (MC/VN)", "Desc": "Speculative ARB for AD - New Message", "EvSel": 51, "ExtSel": "", }, "M3UPI.TxC_AD_SPEC_ARB_NEW_MSG.VN0_SNP": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "AD speculative arb request due to new message arriving on a specific channel (MC/VN)", "Desc": "Speculative ARB for AD - New Message", "EvSel": 51, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.TxC_AD_SPEC_ARB_NEW_MSG.VN1_WB": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "AD speculative arb request due to new message arriving on a specific channel (MC/VN)", "Desc": "Speculative ARB for AD - New Message", "EvSel": 51, "ExtSel": "", "Umask": "b1xxxxxxx", }, "M3UPI.TxC_AD_SPEC_ARB_NEW_MSG.VN0_REQ": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "AD speculative arb request due to new message arriving on a specific channel (MC/VN)", "Desc": "Speculative ARB for AD - New Message", "EvSel": 51, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.TxC_AD_SPEC_ARB_NEW_MSG.VN1_SNP": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "AD speculative arb request due to new message arriving on a specific channel (MC/VN)", "Desc": "Speculative ARB for AD - New Message", "EvSel": 51, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.TxC_AD_SPEC_ARB_NEW_MSG.VN0_WB": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "AD speculative arb request due to new message arriving on a specific channel (MC/VN)", "Desc": "Speculative ARB for AD - New Message", "EvSel": 51, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.TxC_AD_SPEC_ARB_NEW_MSG.VN1_REQ": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "AD speculative arb request due to new message arriving on a specific channel (MC/VN)", "Desc": "Speculative ARB for AD - New Message", "EvSel": 51, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.TxC_AD_SPEC_ARB_NO_OTHER_PEND": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "AD speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)", "Desc": "Speculative ARB for AD - No Credit", "EvSel": 50, "ExtSel": "", }, "M3UPI.TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN1_WB": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "AD speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)", "Desc": "Speculative ARB for AD - No Credit", "EvSel": 50, "ExtSel": "", "Umask": "b1xxxxxxx", }, "M3UPI.TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN1_SNP": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "AD speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)", "Desc": "Speculative ARB for AD - No Credit", "EvSel": 50, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN0_WB": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "AD speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)", "Desc": "Speculative ARB for AD - No Credit", "EvSel": 50, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN1_REQ": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "AD speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)", "Desc": "Speculative ARB for AD - No Credit", "EvSel": 50, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN0_SNP": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "AD speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)", "Desc": "Speculative ARB for AD - No Credit", "EvSel": 50, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN0_REQ": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "AD speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)", "Desc": "Speculative ARB for AD - No Credit", "EvSel": 50, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN1_RSP": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "AD speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)", "Desc": "Speculative ARB for AD - No Credit", "EvSel": 50, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN0_RSP": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "AD speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)", "Desc": "Speculative ARB for AD - No Credit", "EvSel": 50, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.TxC_AK_FLQ_INSERTS": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Desc": "AK Flow Q Inserts", "EvSel": 47, "ExtSel": "", }, "M3UPI.TxC_AK_FLQ_OCCUPANCY": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": 0, "Desc": "AK Flow Q Occupancy", "EvSel": 30, "ExtSel": "", }, "M3UPI.TxC_BL_ARB_FAIL": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "BL arb but no win; arb request asserted but not won", "Desc": "Failed ARB for BL", "EvSel": 53, "ExtSel": "", }, "M3UPI.TxC_BL_ARB_FAIL.VN0_NCS": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "BL arb but no win; arb request asserted but not won", "Desc": "Failed ARB for BL", "EvSel": 53, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.TxC_BL_ARB_FAIL.VN0_NCB": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "BL arb but no win; arb request asserted but not won", "Desc": "Failed ARB for BL", "EvSel": 53, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.TxC_BL_ARB_FAIL.VN0_WB": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "BL arb but no win; arb request asserted but not won", "Desc": "Failed ARB for BL", "EvSel": 53, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.TxC_BL_ARB_FAIL.VN1_NCB": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "BL arb but no win; arb request asserted but not won", "Desc": "Failed ARB for BL", "EvSel": 53, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.TxC_BL_ARB_FAIL.VN1_WB": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "BL arb but no win; arb request asserted but not won", "Desc": "Failed ARB for BL", "EvSel": 53, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.TxC_BL_ARB_FAIL.VN1_RSP": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "BL arb but no win; arb request asserted but not won", "Desc": "Failed ARB for BL", "EvSel": 53, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.TxC_BL_ARB_FAIL.VN0_RSP": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "BL arb but no win; arb request asserted but not won", "Desc": "Failed ARB for BL", "EvSel": 53, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.TxC_BL_ARB_FAIL.VN1_NCS": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "BL arb but no win; arb request asserted but not won", "Desc": "Failed ARB for BL", "EvSel": 53, "ExtSel": "", "Umask": "b1xxxxxxx", }, "M3UPI.TxC_BL_FLQ_CYCLES_NE": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Number of cycles the BL Egress queue is Not Empty", "Desc": "BL Flow Q Not Empty", "EvSel": 40, "ExtSel": "", "Notes": "Counts the number of cycles when the QPI FlowQ is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue occupancy. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", }, "M3UPI.TxC_BL_FLQ_CYCLES_NE.VN1_SNP": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Number of cycles the BL Egress queue is Not Empty", "Desc": "BL Flow Q Not Empty", "EvSel": 40, "ExtSel": "", "Notes": "Counts the number of cycles when the QPI FlowQ is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue occupancy. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Umask": "bxx1xxxxx", }, "M3UPI.TxC_BL_FLQ_CYCLES_NE.VN1_WB": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Number of cycles the BL Egress queue is Not Empty", "Desc": "BL Flow Q Not Empty", "EvSel": 40, "ExtSel": "", "Notes": "Counts the number of cycles when the QPI FlowQ is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue occupancy. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Umask": "b1xxxxxxx", }, "M3UPI.TxC_BL_FLQ_CYCLES_NE.VN1_REQ": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Number of cycles the BL Egress queue is Not Empty", "Desc": "BL Flow Q Not Empty", "EvSel": 40, "ExtSel": "", "Notes": "Counts the number of cycles when the QPI FlowQ is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue occupancy. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Umask": "bxxx1xxxx", }, "M3UPI.TxC_BL_FLQ_CYCLES_NE.VN0_WB": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Number of cycles the BL Egress queue is Not Empty", "Desc": "BL Flow Q Not Empty", "EvSel": 40, "ExtSel": "", "Notes": "Counts the number of cycles when the QPI FlowQ is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue occupancy. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Umask": "bxxxx1xxx", }, "M3UPI.TxC_BL_FLQ_CYCLES_NE.VN0_RSP": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Number of cycles the BL Egress queue is Not Empty", "Desc": "BL Flow Q Not Empty", "EvSel": 40, "ExtSel": "", "Notes": "Counts the number of cycles when the QPI FlowQ is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue occupancy. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Umask": "bxxxxx1xx", }, "M3UPI.TxC_BL_FLQ_CYCLES_NE.VN1_RSP": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Number of cycles the BL Egress queue is Not Empty", "Desc": "BL Flow Q Not Empty", "EvSel": 40, "ExtSel": "", "Notes": "Counts the number of cycles when the QPI FlowQ is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue occupancy. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Umask": "bx1xxxxxx", }, "M3UPI.TxC_BL_FLQ_CYCLES_NE.VN0_REQ": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Number of cycles the BL Egress queue is Not Empty", "Desc": "BL Flow Q Not Empty", "EvSel": 40, "ExtSel": "", "Notes": "Counts the number of cycles when the QPI FlowQ is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue occupancy. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Umask": "bxxxxxxx1", }, "M3UPI.TxC_BL_FLQ_CYCLES_NE.VN0_SNP": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Number of cycles the BL Egress queue is Not Empty", "Desc": "BL Flow Q Not Empty", "EvSel": 40, "ExtSel": "", "Notes": "Counts the number of cycles when the QPI FlowQ is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue occupancy. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Umask": "bxxxxxx1x", }, "M3UPI.TxC_BL_FLQ_INSERTS": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "BL Flow Q Inserts", "EvSel": 46, "ExtSel": "", }, "M3UPI.TxC_BL_FLQ_INSERTS.VN1_NCS": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "BL Flow Q Inserts", "EvSel": 46, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.TxC_BL_FLQ_INSERTS.VN0_RSP": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "BL Flow Q Inserts", "EvSel": 46, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.TxC_BL_FLQ_INSERTS.VN1_RSP": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "BL Flow Q Inserts", "EvSel": 46, "ExtSel": "", "Umask": "b1xxxxxxx", }, "M3UPI.TxC_BL_FLQ_INSERTS.VN1_WB": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "BL Flow Q Inserts", "EvSel": 46, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.TxC_BL_FLQ_INSERTS.VN1_NCB": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "BL Flow Q Inserts", "EvSel": 46, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.TxC_BL_FLQ_INSERTS.VN0_NCB": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "BL Flow Q Inserts", "EvSel": 46, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.TxC_BL_FLQ_INSERTS.VN0_WB": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "BL Flow Q Inserts", "EvSel": 46, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.TxC_BL_FLQ_INSERTS.VN0_NCS": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.", "Desc": "BL Flow Q Inserts", "EvSel": 46, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.TxC_BL_FLQ_OCCUPANCY": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": 0, "Desc": "BL Flow Q Occupancy", "EvSel": 29, "ExtSel": "", }, "M3UPI.TxC_BL_FLQ_OCCUPANCY.VN0_RSP": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": 0, "Desc": "BL Flow Q Occupancy", "EvSel": 29, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.TxC_BL_FLQ_OCCUPANCY.VN1_RSP": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": 0, "Desc": "BL Flow Q Occupancy", "EvSel": 29, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.TxC_BL_FLQ_OCCUPANCY.VN1_NCS": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": 0, "Desc": "BL Flow Q Occupancy", "EvSel": 29, "ExtSel": "", "Umask": "b1xxxxxxx", }, "M3UPI.TxC_BL_FLQ_OCCUPANCY.VN0_NCS": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": 0, "Desc": "BL Flow Q Occupancy", "EvSel": 29, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.TxC_BL_FLQ_OCCUPANCY.VN0_WB": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": 0, "Desc": "BL Flow Q Occupancy", "EvSel": 29, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.TxC_BL_FLQ_OCCUPANCY.VN0_NCB": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": 0, "Desc": "BL Flow Q Occupancy", "EvSel": 29, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.TxC_BL_FLQ_OCCUPANCY.VN1_WB": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": 0, "Desc": "BL Flow Q Occupancy", "EvSel": 29, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.TxC_BL_FLQ_OCCUPANCY.VN1_NCB": { "Box": "M3UPI", "Category": "M3UPI FlowQ Events", "Counters": 0, "Desc": "BL Flow Q Occupancy", "EvSel": 29, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.TxC_BL_SPEC_ARB_NEW_MSG": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "BL speculative arb request due to new message arriving on a specific channel (MC/VN)", "Desc": "Speculative ARB for BL - New Message", "EvSel": 56, "ExtSel": "", }, "M3UPI.TxC_BL_SPEC_ARB_NEW_MSG.VN1_NCS": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "BL speculative arb request due to new message arriving on a specific channel (MC/VN)", "Desc": "Speculative ARB for BL - New Message", "EvSel": 56, "ExtSel": "", "Umask": "b1xxxxxxx", }, "M3UPI.TxC_BL_SPEC_ARB_NEW_MSG.VN1_WB": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "BL speculative arb request due to new message arriving on a specific channel (MC/VN)", "Desc": "Speculative ARB for BL - New Message", "EvSel": 56, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.TxC_BL_SPEC_ARB_NEW_MSG.VN1_NCB": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "BL speculative arb request due to new message arriving on a specific channel (MC/VN)", "Desc": "Speculative ARB for BL - New Message", "EvSel": 56, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.TxC_BL_SPEC_ARB_NEW_MSG.VN0_NCB": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "BL speculative arb request due to new message arriving on a specific channel (MC/VN)", "Desc": "Speculative ARB for BL - New Message", "EvSel": 56, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.TxC_BL_SPEC_ARB_NEW_MSG.VN0_WB": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "BL speculative arb request due to new message arriving on a specific channel (MC/VN)", "Desc": "Speculative ARB for BL - New Message", "EvSel": 56, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.TxC_BL_SPEC_ARB_NEW_MSG.VN0_NCS": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "BL speculative arb request due to new message arriving on a specific channel (MC/VN)", "Desc": "Speculative ARB for BL - New Message", "EvSel": 56, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.TxC_BL_SPEC_ARB_NO_OTHER_PEND": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "BL speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)", "Desc": "Speculative ARB for AD Failed - No Credit", "EvSel": 55, "ExtSel": "", }, "M3UPI.TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN1_RSP": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "BL speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)", "Desc": "Speculative ARB for AD Failed - No Credit", "EvSel": 55, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN0_RSP": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "BL speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)", "Desc": "Speculative ARB for AD Failed - No Credit", "EvSel": 55, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN1_NCS": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "BL speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)", "Desc": "Speculative ARB for AD Failed - No Credit", "EvSel": 55, "ExtSel": "", "Umask": "b1xxxxxxx", }, "M3UPI.TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN1_WB": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "BL speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)", "Desc": "Speculative ARB for AD Failed - No Credit", "EvSel": 55, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN1_NCB": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "BL speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)", "Desc": "Speculative ARB for AD Failed - No Credit", "EvSel": 55, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN0_NCS": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "BL speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)", "Desc": "Speculative ARB for AD Failed - No Credit", "EvSel": 55, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN0_NCB": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "BL speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)", "Desc": "Speculative ARB for AD Failed - No Credit", "EvSel": 55, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN0_WB": { "Box": "M3UPI", "Category": "M3UPI ARB Events", "Defn": "BL speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)", "Desc": "Speculative ARB for AD Failed - No Credit", "EvSel": 55, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.TxR_HORZ_ADS_USED": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal ADS Used", "EvSel": 157, "ExtSel": "", }, "M3UPI.TxR_HORZ_ADS_USED.BL_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal ADS Used", "EvSel": 157, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.TxR_HORZ_ADS_USED.AD_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal ADS Used", "EvSel": 157, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.TxR_HORZ_ADS_USED.AK_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal ADS Used", "EvSel": 157, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.TxR_HORZ_ADS_USED.BL_CRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal ADS Used", "EvSel": 157, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.TxR_HORZ_ADS_USED.AD_CRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal ADS Used", "EvSel": 157, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.TxR_HORZ_BYPASS": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 159, "ExtSel": "", }, "M3UPI.TxR_HORZ_BYPASS.AD_CRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 159, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.TxR_HORZ_BYPASS.IV_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 159, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.TxR_HORZ_BYPASS.BL_CRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 159, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.TxR_HORZ_BYPASS.AD_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 159, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.TxR_HORZ_BYPASS.AK_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 159, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.TxR_HORZ_BYPASS.BL_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 159, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.TxR_HORZ_CYCLES_FULL": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 150, "ExtSel": "", }, "M3UPI.TxR_HORZ_CYCLES_FULL.BL_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 150, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.TxR_HORZ_CYCLES_FULL.AD_CRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 150, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.TxR_HORZ_CYCLES_FULL.IV_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 150, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.TxR_HORZ_CYCLES_FULL.BL_CRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 150, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.TxR_HORZ_CYCLES_FULL.AK_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 150, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.TxR_HORZ_CYCLES_FULL.AD_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 150, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.TxR_HORZ_CYCLES_NE": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 151, "ExtSel": "", }, "M3UPI.TxR_HORZ_CYCLES_NE.AD_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 151, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.TxR_HORZ_CYCLES_NE.AK_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 151, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.TxR_HORZ_CYCLES_NE.IV_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 151, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.TxR_HORZ_CYCLES_NE.BL_CRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 151, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.TxR_HORZ_CYCLES_NE.AD_CRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 151, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.TxR_HORZ_CYCLES_NE.BL_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 151, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.TxR_HORZ_INSERTS": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 149, "ExtSel": "", }, "M3UPI.TxR_HORZ_INSERTS.AD_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 149, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.TxR_HORZ_INSERTS.AK_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 149, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.TxR_HORZ_INSERTS.AD_CRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 149, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.TxR_HORZ_INSERTS.BL_CRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 149, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.TxR_HORZ_INSERTS.IV_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 149, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.TxR_HORZ_INSERTS.BL_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 149, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.TxR_HORZ_NACK": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 153, "ExtSel": "", }, "M3UPI.TxR_HORZ_NACK.BL_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 153, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.TxR_HORZ_NACK.AD_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 153, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.TxR_HORZ_NACK.AK_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 153, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.TxR_HORZ_NACK.AD_CRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 153, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.TxR_HORZ_NACK.IV_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 153, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.TxR_HORZ_NACK.BL_CRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 153, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.TxR_HORZ_OCCUPANCY": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 148, "ExtSel": "", }, "M3UPI.TxR_HORZ_OCCUPANCY.BL_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 148, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.TxR_HORZ_OCCUPANCY.AD_CRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 148, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.TxR_HORZ_OCCUPANCY.IV_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 148, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.TxR_HORZ_OCCUPANCY.BL_CRD": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 148, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.TxR_HORZ_OCCUPANCY.AK_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 148, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.TxR_HORZ_OCCUPANCY.AD_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 148, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.TxR_HORZ_STARVED": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.", "Desc": "CMS Horizontal Egress Injection Starvation", "EvSel": 155, "ExtSel": "", }, "M3UPI.TxR_HORZ_STARVED.AD_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.", "Desc": "CMS Horizontal Egress Injection Starvation", "EvSel": 155, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.TxR_HORZ_STARVED.AK_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.", "Desc": "CMS Horizontal Egress Injection Starvation", "EvSel": 155, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.TxR_HORZ_STARVED.IV_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.", "Desc": "CMS Horizontal Egress Injection Starvation", "EvSel": 155, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.TxR_HORZ_STARVED.BL_BNC": { "Box": "M3UPI", "Category": "M3UPI CMS Horizontal EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.", "Desc": "CMS Horizontal Egress Injection Starvation", "EvSel": 155, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.TxR_VERT_ADS_USED": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 156, "ExtSel": "", }, "M3UPI.TxR_VERT_ADS_USED.AK_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 156, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.TxR_VERT_ADS_USED.AK_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 156, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.TxR_VERT_ADS_USED.AD_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 156, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.TxR_VERT_ADS_USED.BL_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 156, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.TxR_VERT_ADS_USED.AD_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 156, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.TxR_VERT_ADS_USED.BL_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 156, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.TxR_VERT_BYPASS": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 158, "ExtSel": "", }, "M3UPI.TxR_VERT_BYPASS.IV": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 158, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.TxR_VERT_BYPASS.AK_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 158, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.TxR_VERT_BYPASS.AD_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 158, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.TxR_VERT_BYPASS.AK_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 158, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.TxR_VERT_BYPASS.BL_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 158, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.TxR_VERT_BYPASS.AD_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 158, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.TxR_VERT_BYPASS.BL_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 158, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.TxR_VERT_CYCLES_FULL": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 146, "ExtSel": "", }, "M3UPI.TxR_VERT_CYCLES_FULL.IV": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 146, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.TxR_VERT_CYCLES_FULL.AK_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 146, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.TxR_VERT_CYCLES_FULL.AD_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 146, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.TxR_VERT_CYCLES_FULL.AK_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 146, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.TxR_VERT_CYCLES_FULL.AD_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 146, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.TxR_VERT_CYCLES_FULL.BL_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 146, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.TxR_VERT_CYCLES_FULL.BL_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 146, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.TxR_VERT_CYCLES_NE": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 147, "ExtSel": "", }, "M3UPI.TxR_VERT_CYCLES_NE.IV": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 147, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.TxR_VERT_CYCLES_NE.AD_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 147, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.TxR_VERT_CYCLES_NE.AK_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 147, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.TxR_VERT_CYCLES_NE.AK_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 147, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.TxR_VERT_CYCLES_NE.BL_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 147, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.TxR_VERT_CYCLES_NE.AD_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 147, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.TxR_VERT_CYCLES_NE.BL_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 147, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.TxR_VERT_INSERTS": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 145, "ExtSel": "", }, "M3UPI.TxR_VERT_INSERTS.IV": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 145, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.TxR_VERT_INSERTS.AK_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 145, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.TxR_VERT_INSERTS.AD_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 145, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.TxR_VERT_INSERTS.AK_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 145, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.TxR_VERT_INSERTS.AD_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 145, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.TxR_VERT_INSERTS.BL_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 145, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.TxR_VERT_INSERTS.BL_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 145, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.TxR_VERT_NACK": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", }, "M3UPI.TxR_VERT_NACK.AD_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.TxR_VERT_NACK.BL_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.TxR_VERT_NACK.BL_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.TxR_VERT_NACK.IV": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.TxR_VERT_NACK.AD_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.TxR_VERT_NACK.AK_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.TxR_VERT_NACK.AK_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.TxR_VERT_OCCUPANCY": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", }, "M3UPI.TxR_VERT_OCCUPANCY.IV": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.TxR_VERT_OCCUPANCY.AK_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.TxR_VERT_OCCUPANCY.AK_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.TxR_VERT_OCCUPANCY.AD_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.TxR_VERT_OCCUPANCY.BL_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.TxR_VERT_OCCUPANCY.AD_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.TxR_VERT_OCCUPANCY.BL_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.TxR_VERT_STARVED": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", }, "M3UPI.TxR_VERT_STARVED.IV": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.TxR_VERT_STARVED.AK_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.TxR_VERT_STARVED.AD_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.TxR_VERT_STARVED.AK_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.TxR_VERT_STARVED.BL_AG1": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M3UPI.TxR_VERT_STARVED.AD_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.TxR_VERT_STARVED.BL_AG0": { "Box": "M3UPI", "Category": "M3UPI CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.UPI_PEER_AD_CREDITS_EMPTY": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No credits available to send to UPIs on the AD Ring", "Desc": "UPI0 AD Credits Empty", "EvSel": 32, "ExtSel": "", "Notes": "2 cases for non-smi3 mode and 3 cases for smi3 mode", }, "M3UPI.UPI_PEER_AD_CREDITS_EMPTY.VN0_REQ": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No credits available to send to UPIs on the AD Ring", "Desc": "UPI0 AD Credits Empty", "EvSel": 32, "ExtSel": "", "Notes": "2 cases for non-smi3 mode and 3 cases for smi3 mode", "Umask": "bxxxxxx1x", }, "M3UPI.UPI_PEER_AD_CREDITS_EMPTY.VN0_SNP": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No credits available to send to UPIs on the AD Ring", "Desc": "UPI0 AD Credits Empty", "EvSel": 32, "ExtSel": "", "Notes": "2 cases for non-smi3 mode and 3 cases for smi3 mode", "Umask": "bxxxxx1xx", }, "M3UPI.UPI_PEER_AD_CREDITS_EMPTY.VN0_RSP": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No credits available to send to UPIs on the AD Ring", "Desc": "UPI0 AD Credits Empty", "EvSel": 32, "ExtSel": "", "Notes": "2 cases for non-smi3 mode and 3 cases for smi3 mode", "Umask": "bxxxx1xxx", }, "M3UPI.UPI_PEER_AD_CREDITS_EMPTY.VN1_SNP": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No credits available to send to UPIs on the AD Ring", "Desc": "UPI0 AD Credits Empty", "EvSel": 32, "ExtSel": "", "Notes": "2 cases for non-smi3 mode and 3 cases for smi3 mode", "Umask": "bxx1xxxxx", }, "M3UPI.UPI_PEER_AD_CREDITS_EMPTY.VN1_RSP": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No credits available to send to UPIs on the AD Ring", "Desc": "UPI0 AD Credits Empty", "EvSel": 32, "ExtSel": "", "Notes": "2 cases for non-smi3 mode and 3 cases for smi3 mode", "Umask": "bx1xxxxxx", }, "M3UPI.UPI_PEER_AD_CREDITS_EMPTY.VN1_REQ": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No credits available to send to UPIs on the AD Ring", "Desc": "UPI0 AD Credits Empty", "EvSel": 32, "ExtSel": "", "Notes": "2 cases for non-smi3 mode and 3 cases for smi3 mode", "Umask": "bxxx1xxxx", }, "M3UPI.UPI_PEER_AD_CREDITS_EMPTY.VNA": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No credits available to send to UPIs on the AD Ring", "Desc": "UPI0 AD Credits Empty", "EvSel": 32, "ExtSel": "", "Notes": "2 cases for non-smi3 mode and 3 cases for smi3 mode", "Umask": "bxxxxxxx1", }, "M3UPI.UPI_PEER_BL_CREDITS_EMPTY": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)", "Desc": "UPI0 BL Credits Empty", "EvSel": 33, "ExtSel": "", "Notes": "smi and non-smi modes", }, "M3UPI.UPI_PEER_BL_CREDITS_EMPTY.VN1_NCS_NCB": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)", "Desc": "UPI0 BL Credits Empty", "EvSel": 33, "ExtSel": "", "Notes": "smi and non-smi modes", "Umask": "bxx1xxxxx", }, "M3UPI.UPI_PEER_BL_CREDITS_EMPTY.VN0_WB": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)", "Desc": "UPI0 BL Credits Empty", "EvSel": 33, "ExtSel": "", "Notes": "smi and non-smi modes", "Umask": "bxxxx1xxx", }, "M3UPI.UPI_PEER_BL_CREDITS_EMPTY.VN0_NCS_NCB": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)", "Desc": "UPI0 BL Credits Empty", "EvSel": 33, "ExtSel": "", "Notes": "smi and non-smi modes", "Umask": "bxxxxx1xx", }, "M3UPI.UPI_PEER_BL_CREDITS_EMPTY.VNA": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)", "Desc": "UPI0 BL Credits Empty", "EvSel": 33, "ExtSel": "", "Notes": "smi and non-smi modes", "Umask": "bxxxxxxx1", }, "M3UPI.UPI_PEER_BL_CREDITS_EMPTY.VN1_WB": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)", "Desc": "UPI0 BL Credits Empty", "EvSel": 33, "ExtSel": "", "Notes": "smi and non-smi modes", "Umask": "bx1xxxxxx", }, "M3UPI.UPI_PEER_BL_CREDITS_EMPTY.VN0_RSP": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)", "Desc": "UPI0 BL Credits Empty", "EvSel": 33, "ExtSel": "", "Notes": "smi and non-smi modes", "Umask": "bxxxxxx1x", }, "M3UPI.UPI_PEER_BL_CREDITS_EMPTY.VN1_RSP": { "Box": "M3UPI", "Category": "M3UPI EGRESS Credit Events", "Counters": "0-3", "Defn": "No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)", "Desc": "UPI0 BL Credits Empty", "EvSel": 33, "ExtSel": "", "Notes": "smi and non-smi modes", "Umask": "bxxx1xxxx", }, "M3UPI.UPI_PREFETCH_SPAWN": { "Box": "M3UPI", "Category": "M3UPI Special Egress Events", "Counters": "0-3", "Defn": "Count cases where FlowQ causes spawn of Prefetch to iMC/SMI3 target", "Desc": "FlowQ Generated Prefetch", "EvSel": 41, "ExtSel": "", }, "M3UPI.VERT_RING_AD_IN_USE": { "Box": "M3UPI", "Category": "M3UPI Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AD Ring In Use", "EvSel": 166, "ExtSel": "", }, "M3UPI.VERT_RING_AD_IN_USE.DN_ODD": { "Box": "M3UPI", "Category": "M3UPI Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AD Ring In Use", "EvSel": 166, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.VERT_RING_AD_IN_USE.UP_EVEN": { "Box": "M3UPI", "Category": "M3UPI Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AD Ring In Use", "EvSel": 166, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.VERT_RING_AD_IN_USE.UP_ODD": { "Box": "M3UPI", "Category": "M3UPI Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AD Ring In Use", "EvSel": 166, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.VERT_RING_AD_IN_USE.DN_EVEN": { "Box": "M3UPI", "Category": "M3UPI Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AD Ring In Use", "EvSel": 166, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.VERT_RING_AK_IN_USE": { "Box": "M3UPI", "Category": "M3UPI Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AK Ring In Use", "EvSel": 168, "ExtSel": "", }, "M3UPI.VERT_RING_AK_IN_USE.UP_EVEN": { "Box": "M3UPI", "Category": "M3UPI Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AK Ring In Use", "EvSel": 168, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.VERT_RING_AK_IN_USE.DN_ODD": { "Box": "M3UPI", "Category": "M3UPI Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AK Ring In Use", "EvSel": 168, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.VERT_RING_AK_IN_USE.UP_ODD": { "Box": "M3UPI", "Category": "M3UPI Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AK Ring In Use", "EvSel": 168, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.VERT_RING_AK_IN_USE.DN_EVEN": { "Box": "M3UPI", "Category": "M3UPI Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AK Ring In Use", "EvSel": 168, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.VERT_RING_BL_IN_USE": { "Box": "M3UPI", "Category": "M3UPI Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical BL Ring in Use", "EvSel": 170, "ExtSel": "", }, "M3UPI.VERT_RING_BL_IN_USE.DN_ODD": { "Box": "M3UPI", "Category": "M3UPI Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical BL Ring in Use", "EvSel": 170, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.VERT_RING_BL_IN_USE.UP_EVEN": { "Box": "M3UPI", "Category": "M3UPI Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical BL Ring in Use", "EvSel": 170, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.VERT_RING_BL_IN_USE.UP_ODD": { "Box": "M3UPI", "Category": "M3UPI Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical BL Ring in Use", "EvSel": 170, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.VERT_RING_BL_IN_USE.DN_EVEN": { "Box": "M3UPI", "Category": "M3UPI Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical BL Ring in Use", "EvSel": 170, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.VERT_RING_IV_IN_USE": { "Box": "M3UPI", "Category": "M3UPI Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "Vertical IV Ring in Use", "EvSel": 172, "ExtSel": "", }, "M3UPI.VERT_RING_IV_IN_USE.DN": { "Box": "M3UPI", "Category": "M3UPI Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "Vertical IV Ring in Use", "EvSel": 172, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.VERT_RING_IV_IN_USE.UP": { "Box": "M3UPI", "Category": "M3UPI Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "Vertical IV Ring in Use", "EvSel": 172, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.VN0_CREDITS_USED": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.", "Desc": "VN0 Credit Used", "EvSel": 92, "ExtSel": "", }, "M3UPI.VN0_CREDITS_USED.SNP": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.", "Desc": "VN0 Credit Used", "EvSel": 92, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.VN0_CREDITS_USED.NCB": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.", "Desc": "VN0 Credit Used", "EvSel": 92, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.VN0_CREDITS_USED.RSP": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.", "Desc": "VN0 Credit Used", "EvSel": 92, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.VN0_CREDITS_USED.REQ": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.", "Desc": "VN0 Credit Used", "EvSel": 92, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.VN0_CREDITS_USED.WB": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.", "Desc": "VN0 Credit Used", "EvSel": 92, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.VN0_CREDITS_USED.NCS": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.", "Desc": "VN0 Credit Used", "EvSel": 92, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.VN0_NO_CREDITS": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of Cycles there were no VN0 Credits", "Desc": "VN0 No Credits", "EvSel": 94, "ExtSel": "", }, "M3UPI.VN0_NO_CREDITS.NCS": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of Cycles there were no VN0 Credits", "Desc": "VN0 No Credits", "EvSel": 94, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.VN0_NO_CREDITS.WB": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of Cycles there were no VN0 Credits", "Desc": "VN0 No Credits", "EvSel": 94, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.VN0_NO_CREDITS.REQ": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of Cycles there were no VN0 Credits", "Desc": "VN0 No Credits", "EvSel": 94, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.VN0_NO_CREDITS.RSP": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of Cycles there were no VN0 Credits", "Desc": "VN0 No Credits", "EvSel": 94, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.VN0_NO_CREDITS.NCB": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of Cycles there were no VN0 Credits", "Desc": "VN0 No Credits", "EvSel": 94, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.VN0_NO_CREDITS.SNP": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of Cycles there were no VN0 Credits", "Desc": "VN0 No Credits", "EvSel": 94, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.VN1_CREDITS_USED": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.", "Desc": "VN1 Credit Used", "EvSel": 93, "ExtSel": "", }, "M3UPI.VN1_CREDITS_USED.NCS": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.", "Desc": "VN1 Credit Used", "EvSel": 93, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.VN1_CREDITS_USED.WB": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.", "Desc": "VN1 Credit Used", "EvSel": 93, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.VN1_CREDITS_USED.REQ": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.", "Desc": "VN1 Credit Used", "EvSel": 93, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M3UPI.VN1_CREDITS_USED.RSP": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.", "Desc": "VN1 Credit Used", "EvSel": 93, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.VN1_CREDITS_USED.NCB": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.", "Desc": "VN1 Credit Used", "EvSel": 93, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.VN1_CREDITS_USED.SNP": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.", "Desc": "VN1 Credit Used", "EvSel": 93, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.VN1_NO_CREDITS": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of Cycles there were no VN1 Credits", "Desc": "VN1 No Credits", "EvSel": 95, "ExtSel": "", }, "M3UPI.VN1_NO_CREDITS.WB": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of Cycles there were no VN1 Credits", "Desc": "VN1 No Credits", "EvSel": 95, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M3UPI.VN1_NO_CREDITS.NCS": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of Cycles there were no VN1 Credits", "Desc": "VN1 No Credits", "EvSel": 95, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M3UPI.VN1_NO_CREDITS.NCB": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of Cycles there were no VN1 Credits", "Desc": "VN1 No Credits", "EvSel": 95, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M3UPI.VN1_NO_CREDITS.SNP": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of Cycles there were no VN1 Credits", "Desc": "VN1 No Credits", "EvSel": 95, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M3UPI.VN1_NO_CREDITS.RSP": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of Cycles there were no VN1 Credits", "Desc": "VN1 No Credits", "EvSel": 95, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M3UPI.VN1_NO_CREDITS.REQ": { "Box": "M3UPI", "Category": "M3UPI Link VN Credit Events", "Defn": "Number of Cycles there were no VN1 Credits", "Desc": "VN1 No Credits", "EvSel": 95, "ExtSel": "", "Umask": "bxxxxxxx1", }, # M2M: "M2M.AG0_AD_CRD_ACQUIRED": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", }, "M2M.AG0_AD_CRD_ACQUIRED.TGR4": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxx1xxxx", }, "M2M.AG0_AD_CRD_ACQUIRED.TGR3": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxx1xxx", }, "M2M.AG0_AD_CRD_ACQUIRED.TGR0": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxxxxx1", }, "M2M.AG0_AD_CRD_ACQUIRED.TGR2": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxxx1xx", }, "M2M.AG0_AD_CRD_ACQUIRED.TGR5": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxx1xxxxx", }, "M2M.AG0_AD_CRD_ACQUIRED.TGR1": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxxxx1x", }, "M2M.AG0_AD_CRD_OCCUPANCY": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", }, "M2M.AG0_AD_CRD_OCCUPANCY.TGR3": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "bxxxx1xxx", }, "M2M.AG0_AD_CRD_OCCUPANCY.TGR4": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "bxxx1xxxx", }, "M2M.AG0_AD_CRD_OCCUPANCY.TGR0": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "bxxxxxxx1", }, "M2M.AG0_AD_CRD_OCCUPANCY.TGR5": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "bxx1xxxxx", }, "M2M.AG0_AD_CRD_OCCUPANCY.TGR2": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "bxxxxx1xx", }, "M2M.AG0_AD_CRD_OCCUPANCY.TGR1": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "bxxxxxx1x", }, "M2M.AG0_BL_CRD_ACQUIRED": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", }, "M2M.AG0_BL_CRD_ACQUIRED.TGR1": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.AG0_BL_CRD_ACQUIRED.TGR5": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2M.AG0_BL_CRD_ACQUIRED.TGR2": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.AG0_BL_CRD_ACQUIRED.TGR0": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.AG0_BL_CRD_ACQUIRED.TGR3": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.AG0_BL_CRD_ACQUIRED.TGR4": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.AG0_BL_CRD_OCCUPANCY": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", }, "M2M.AG0_BL_CRD_OCCUPANCY.TGR4": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.AG0_BL_CRD_OCCUPANCY.TGR3": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.AG0_BL_CRD_OCCUPANCY.TGR0": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.AG0_BL_CRD_OCCUPANCY.TGR2": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.AG0_BL_CRD_OCCUPANCY.TGR5": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2M.AG0_BL_CRD_OCCUPANCY.TGR1": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.AG1_AD_CRD_ACQUIRED": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", }, "M2M.AG1_AD_CRD_ACQUIRED.TGR1": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxxxx1x", }, "M2M.AG1_AD_CRD_ACQUIRED.TGR2": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxxx1xx", }, "M2M.AG1_AD_CRD_ACQUIRED.TGR5": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxx1xxxxx", }, "M2M.AG1_AD_CRD_ACQUIRED.TGR0": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxxxxx1", }, "M2M.AG1_AD_CRD_ACQUIRED.TGR4": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxx1xxxx", }, "M2M.AG1_AD_CRD_ACQUIRED.TGR3": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxx1xxx", }, "M2M.AG1_AD_CRD_OCCUPANCY": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", }, "M2M.AG1_AD_CRD_OCCUPANCY.TGR2": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "bxxxxx1xx", }, "M2M.AG1_AD_CRD_OCCUPANCY.TGR5": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "bxx1xxxxx", }, "M2M.AG1_AD_CRD_OCCUPANCY.TGR1": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "bxxxxxx1x", }, "M2M.AG1_AD_CRD_OCCUPANCY.TGR3": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "bxxxx1xxx", }, "M2M.AG1_AD_CRD_OCCUPANCY.TGR4": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "bxxx1xxxx", }, "M2M.AG1_AD_CRD_OCCUPANCY.TGR0": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "bxxxxxxx1", }, "M2M.AG1_BL_CRD_OCCUPANCY": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", }, "M2M.AG1_BL_CRD_OCCUPANCY.TGR0": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.AG1_BL_CRD_OCCUPANCY.TGR4": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.AG1_BL_CRD_OCCUPANCY.TGR3": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.AG1_BL_CRD_OCCUPANCY.TGR1": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.AG1_BL_CRD_OCCUPANCY.TGR5": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2M.AG1_BL_CRD_OCCUPANCY.TGR2": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.AG1_BL_CREDITS_ACQUIRED": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", }, "M2M.AG1_BL_CREDITS_ACQUIRED.TGR1": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.AG1_BL_CREDITS_ACQUIRED.TGR2": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.AG1_BL_CREDITS_ACQUIRED.TGR5": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2M.AG1_BL_CREDITS_ACQUIRED.TGR0": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.AG1_BL_CREDITS_ACQUIRED.TGR4": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.AG1_BL_CREDITS_ACQUIRED.TGR3": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.BYPASS_M2M_EGRESS": { "Box": "M2M", "Category": "M2M BL Egress Events", "Desc": "M2M to iMC Bypass", "EvSel": 34, "ExtSel": "", }, "M2M.BYPASS_M2M_EGRESS.TAKEN": { "Box": "M2M", "Category": "M2M BL Egress Events", "Desc": "M2M to iMC Bypass", "EvSel": 34, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.BYPASS_M2M_EGRESS.NOT_TAKEN": { "Box": "M2M", "Category": "M2M BL Egress Events", "Desc": "M2M to iMC Bypass", "EvSel": 34, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.BYPASS_M2M_INGRESS": { "Box": "M2M", "Category": "M2M BL Ingress Events", "Desc": "M2M to iMC Bypass", "EvSel": 33, "ExtSel": "", }, "M2M.BYPASS_M2M_INGRESS.NOT_TAKEN": { "Box": "M2M", "Category": "M2M BL Ingress Events", "Desc": "M2M to iMC Bypass", "EvSel": 33, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.BYPASS_M2M_INGRESS.TAKEN": { "Box": "M2M", "Category": "M2M BL Ingress Events", "Desc": "M2M to iMC Bypass", "EvSel": 33, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.CLOCKTICKS": { "Box": "M2M", "Category": "M2M UCLK Events", "Desc": "Cycles - at UCLK", "EvSel": 0, "ExtSel": "", }, "M2M.CMS_CLOCKTICKS": { "Box": "M2M", "Category": "M2M Misc Events", "Desc": "CMS Clockticks", "EvSel": 192, "ExtSel": "", }, "M2M.DIRECT2CORE_NOT_TAKEN_DIRSTATE": { "Box": "M2M", "Category": "M2M DIRECT2CORE Events", "Desc": "Cycles when Direct2Core was Disabled", "EvSel": 36, "ExtSel": "", }, "M2M.DIRECT2CORE_TAKEN": { "Box": "M2M", "Category": "M2M DIRECT2CORE Events", "Desc": "Direct2Core Messages Sent", "EvSel": 35, "ExtSel": "", }, "M2M.DIRECT2CORE_TXN_OVERRIDE": { "Box": "M2M", "Category": "M2M DIRECT2CORE Events", "Desc": "Number of Reads that had Direct2Core Overridden", "EvSel": 37, "ExtSel": "", }, "M2M.DIRECT2UPI_NOT_TAKEN_CREDITS": { "Box": "M2M", "Category": "M2M DIRECT2UPI Events", "Desc": "Number of Reads that had Direct2UPI Overridden", "EvSel": 40, "ExtSel": "", }, "M2M.DIRECT2UPI_NOT_TAKEN_DIRSTATE": { "Box": "M2M", "Category": "M2M DIRECT2UPI Events", "Desc": "Cycles when Direct2UPI was Disabled", "EvSel": 39, "ExtSel": "", }, "M2M.DIRECT2UPI_TAKEN": { "Box": "M2M", "Category": "M2M DIRECT2UPI Events", "Desc": "Direct2UPI Messages Sent", "EvSel": 38, "ExtSel": "", }, "M2M.DIRECT2UPI_TXN_OVERRIDE": { "Box": "M2M", "Category": "M2M DIRECT2UPI Events", "Desc": "Number of Reads that had Direct2UPI Overridden", "EvSel": 41, "ExtSel": "", }, "M2M.DIRECTORY_HIT": { "Box": "M2M", "Category": "M2M Directory State Events", "Desc": "Directory Hit", "EvSel": 42, "ExtSel": "", "Notes": "Covers NearMem Reads (Demand and Underfill).", }, "M2M.DIRECTORY_HIT.CLEAN_A": { "Box": "M2M", "Category": "M2M Directory State Events", "Desc": "Directory Hit", "EvSel": 42, "ExtSel": "", "Notes": "Covers NearMem Reads (Demand and Underfill).", "Umask": "b1xxxxxxx", }, "M2M.DIRECTORY_HIT.DIRTY_I": { "Box": "M2M", "Category": "M2M Directory State Events", "Desc": "Directory Hit", "EvSel": 42, "ExtSel": "", "Notes": "Covers NearMem Reads (Demand and Underfill).", "Umask": "bxxxxxxx1", }, "M2M.DIRECTORY_HIT.DIRTY_S": { "Box": "M2M", "Category": "M2M Directory State Events", "Desc": "Directory Hit", "EvSel": 42, "ExtSel": "", "Notes": "Covers NearMem Reads (Demand and Underfill).", "Umask": "bxxxxxx1x", }, "M2M.DIRECTORY_HIT.CLEAN_P": { "Box": "M2M", "Category": "M2M Directory State Events", "Desc": "Directory Hit", "EvSel": 42, "ExtSel": "", "Notes": "Covers NearMem Reads (Demand and Underfill).", "Umask": "bx1xxxxxx", }, "M2M.DIRECTORY_HIT.CLEAN_S": { "Box": "M2M", "Category": "M2M Directory State Events", "Desc": "Directory Hit", "EvSel": 42, "ExtSel": "", "Notes": "Covers NearMem Reads (Demand and Underfill).", "Umask": "bxx1xxxxx", }, "M2M.DIRECTORY_HIT.DIRTY_P": { "Box": "M2M", "Category": "M2M Directory State Events", "Desc": "Directory Hit", "EvSel": 42, "ExtSel": "", "Notes": "Covers NearMem Reads (Demand and Underfill).", "Umask": "bxxxxx1xx", }, "M2M.DIRECTORY_HIT.CLEAN_I": { "Box": "M2M", "Category": "M2M Directory State Events", "Desc": "Directory Hit", "EvSel": 42, "ExtSel": "", "Notes": "Covers NearMem Reads (Demand and Underfill).", "Umask": "bxxx1xxxx", }, "M2M.DIRECTORY_HIT.DIRTY_A": { "Box": "M2M", "Category": "M2M Directory State Events", "Desc": "Directory Hit", "EvSel": 42, "ExtSel": "", "Notes": "Covers NearMem Reads (Demand and Underfill).", "Umask": "bxxxx1xxx", }, "M2M.DIRECTORY_LOOKUP": { "Box": "M2M", "Category": "M2M DIRECTORY Events", "Desc": "Directory Lookups", "EvSel": 45, "ExtSel": "", }, "M2M.DIRECTORY_LOOKUP.ANY": { "Box": "M2M", "Category": "M2M DIRECTORY Events", "Desc": "Directory Lookups", "EvSel": 45, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.DIRECTORY_LOOKUP.STATE_A": { "Box": "M2M", "Category": "M2M DIRECTORY Events", "Desc": "Directory Lookups", "EvSel": 45, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.DIRECTORY_LOOKUP.STATE_S": { "Box": "M2M", "Category": "M2M DIRECTORY Events", "Desc": "Directory Lookups", "EvSel": 45, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.DIRECTORY_LOOKUP.STATE_I": { "Box": "M2M", "Category": "M2M DIRECTORY Events", "Desc": "Directory Lookups", "EvSel": 45, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.DIRECTORY_MISS": { "Box": "M2M", "Category": "M2M Directory State Events", "Desc": "Directory Miss", "EvSel": 43, "ExtSel": "", "Notes": "Covers NearMem Reads (Demand and Underfill).", }, "M2M.DIRECTORY_MISS.DIRTY_I": { "Box": "M2M", "Category": "M2M Directory State Events", "Desc": "Directory Miss", "EvSel": 43, "ExtSel": "", "Notes": "Covers NearMem Reads (Demand and Underfill).", "Umask": "bxxxxxxx1", }, "M2M.DIRECTORY_MISS.CLEAN_A": { "Box": "M2M", "Category": "M2M Directory State Events", "Desc": "Directory Miss", "EvSel": 43, "ExtSel": "", "Notes": "Covers NearMem Reads (Demand and Underfill).", "Umask": "b1xxxxxxx", }, "M2M.DIRECTORY_MISS.DIRTY_P": { "Box": "M2M", "Category": "M2M Directory State Events", "Desc": "Directory Miss", "EvSel": 43, "ExtSel": "", "Notes": "Covers NearMem Reads (Demand and Underfill).", "Umask": "bxxxxx1xx", }, "M2M.DIRECTORY_MISS.DIRTY_A": { "Box": "M2M", "Category": "M2M Directory State Events", "Desc": "Directory Miss", "EvSel": 43, "ExtSel": "", "Notes": "Covers NearMem Reads (Demand and Underfill).", "Umask": "bxxxx1xxx", }, "M2M.DIRECTORY_MISS.CLEAN_S": { "Box": "M2M", "Category": "M2M Directory State Events", "Desc": "Directory Miss", "EvSel": 43, "ExtSel": "", "Notes": "Covers NearMem Reads (Demand and Underfill).", "Umask": "bxx1xxxxx", }, "M2M.DIRECTORY_MISS.CLEAN_I": { "Box": "M2M", "Category": "M2M Directory State Events", "Desc": "Directory Miss", "EvSel": 43, "ExtSel": "", "Notes": "Covers NearMem Reads (Demand and Underfill).", "Umask": "bxxx1xxxx", }, "M2M.DIRECTORY_MISS.DIRTY_S": { "Box": "M2M", "Category": "M2M Directory State Events", "Desc": "Directory Miss", "EvSel": 43, "ExtSel": "", "Notes": "Covers NearMem Reads (Demand and Underfill).", "Umask": "bxxxxxx1x", }, "M2M.DIRECTORY_MISS.CLEAN_P": { "Box": "M2M", "Category": "M2M Directory State Events", "Desc": "Directory Miss", "EvSel": 43, "ExtSel": "", "Notes": "Covers NearMem Reads (Demand and Underfill).", "Umask": "bx1xxxxxx", }, "M2M.DIRECTORY_UPDATE": { "Box": "M2M", "Category": "M2M DIRECTORY Events", "Desc": "Directory Updates", "EvSel": 46, "ExtSel": "", "Notes": "Tie to Packet Mask/Match?", }, "M2M.DIRECTORY_UPDATE.A2S": { "Box": "M2M", "Category": "M2M DIRECTORY Events", "Desc": "Directory Updates", "EvSel": 46, "ExtSel": "", "Notes": "Tie to Packet Mask/Match?", "Umask": "bx1xxxxxx", }, "M2M.DIRECTORY_UPDATE.S2I": { "Box": "M2M", "Category": "M2M DIRECTORY Events", "Desc": "Directory Updates", "EvSel": 46, "ExtSel": "", "Notes": "Tie to Packet Mask/Match?", "Umask": "bxxxx1xxx", }, "M2M.DIRECTORY_UPDATE.ANY": { "Box": "M2M", "Category": "M2M DIRECTORY Events", "Desc": "Directory Updates", "EvSel": 46, "ExtSel": "", "Notes": "Tie to Packet Mask/Match?", "Umask": "bxxxxxxx1", }, "M2M.DIRECTORY_UPDATE.A2I": { "Box": "M2M", "Category": "M2M DIRECTORY Events", "Desc": "Directory Updates", "EvSel": 46, "ExtSel": "", "Notes": "Tie to Packet Mask/Match?", "Umask": "bxx1xxxxx", }, "M2M.DIRECTORY_UPDATE.S2A": { "Box": "M2M", "Category": "M2M DIRECTORY Events", "Desc": "Directory Updates", "EvSel": 46, "ExtSel": "", "Notes": "Tie to Packet Mask/Match?", "Umask": "bxxx1xxxx", }, "M2M.DIRECTORY_UPDATE.I2A": { "Box": "M2M", "Category": "M2M DIRECTORY Events", "Desc": "Directory Updates", "EvSel": 46, "ExtSel": "", "Notes": "Tie to Packet Mask/Match?", "Umask": "bxxxxx1xx", }, "M2M.DIRECTORY_UPDATE.I2S": { "Box": "M2M", "Category": "M2M DIRECTORY Events", "Desc": "Directory Updates", "EvSel": 46, "ExtSel": "", "Notes": "Tie to Packet Mask/Match?", "Umask": "bxxxxxx1x", }, "M2M.EGRESS_ORDERING": { "Box": "M2M", "Category": "M2M Horizontal In Use RING Events", "Defn": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements", "Desc": "Egress Blocking due to Ordering requirements", "EvSel": 174, "ExtSel": "", }, "M2M.EGRESS_ORDERING.IV_SNOOPGO_DN": { "Box": "M2M", "Category": "M2M Horizontal In Use RING Events", "Defn": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements", "Desc": "Egress Blocking due to Ordering requirements", "EvSel": 174, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.EGRESS_ORDERING.IV_SNOOPGO_UP": { "Box": "M2M", "Category": "M2M Horizontal In Use RING Events", "Defn": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements", "Desc": "Egress Blocking due to Ordering requirements", "EvSel": 174, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.FAST_ASSERTED": { "Box": "M2M", "Category": "M2M Horizontal RING Events", "Defn": "Counts the number of cycles either the local or incoming distress signals are asserted. Incoming distress includes up, dn and across.", "Desc": "FaST wire asserted", "EvSel": 165, "ExtSel": "", }, "M2M.FAST_ASSERTED.HORZ": { "Box": "M2M", "Category": "M2M Horizontal RING Events", "Defn": "Counts the number of cycles either the local or incoming distress signals are asserted. Incoming distress includes up, dn and across.", "Desc": "FaST wire asserted", "EvSel": 165, "ExtSel": "", "Umask": "b00000010", }, "M2M.FAST_ASSERTED.VERT": { "Box": "M2M", "Category": "M2M Horizontal RING Events", "Defn": "Counts the number of cycles either the local or incoming distress signals are asserted. Incoming distress includes up, dn and across.", "Desc": "FaST wire asserted", "EvSel": 165, "ExtSel": "", "Umask": "b00000001", }, "M2M.HORZ_RING_AD_IN_USE": { "Box": "M2M", "Category": "M2M Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AD Ring In Use", "EvSel": 167, "ExtSel": "", }, "M2M.HORZ_RING_AD_IN_USE.RIGHT_EVEN": { "Box": "M2M", "Category": "M2M Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AD Ring In Use", "EvSel": 167, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.HORZ_RING_AD_IN_USE.LEFT_ODD": { "Box": "M2M", "Category": "M2M Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AD Ring In Use", "EvSel": 167, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.HORZ_RING_AD_IN_USE.RIGHT_ODD": { "Box": "M2M", "Category": "M2M Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AD Ring In Use", "EvSel": 167, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.HORZ_RING_AD_IN_USE.LEFT_EVEN": { "Box": "M2M", "Category": "M2M Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AD Ring In Use", "EvSel": 167, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.HORZ_RING_AK_IN_USE": { "Box": "M2M", "Category": "M2M Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 169, "ExtSel": "", }, "M2M.HORZ_RING_AK_IN_USE.LEFT_EVEN": { "Box": "M2M", "Category": "M2M Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 169, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.HORZ_RING_AK_IN_USE.RIGHT_ODD": { "Box": "M2M", "Category": "M2M Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 169, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.HORZ_RING_AK_IN_USE.LEFT_ODD": { "Box": "M2M", "Category": "M2M Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 169, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.HORZ_RING_AK_IN_USE.RIGHT_EVEN": { "Box": "M2M", "Category": "M2M Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 169, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.HORZ_RING_BL_IN_USE": { "Box": "M2M", "Category": "M2M Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal BL Ring in Use", "EvSel": 171, "ExtSel": "", }, "M2M.HORZ_RING_BL_IN_USE.RIGHT_ODD": { "Box": "M2M", "Category": "M2M Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal BL Ring in Use", "EvSel": 171, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.HORZ_RING_BL_IN_USE.LEFT_EVEN": { "Box": "M2M", "Category": "M2M Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal BL Ring in Use", "EvSel": 171, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.HORZ_RING_BL_IN_USE.RIGHT_EVEN": { "Box": "M2M", "Category": "M2M Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal BL Ring in Use", "EvSel": 171, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.HORZ_RING_BL_IN_USE.LEFT_ODD": { "Box": "M2M", "Category": "M2M Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal BL Ring in Use", "EvSel": 171, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.HORZ_RING_IV_IN_USE": { "Box": "M2M", "Category": "M2M Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "Horizontal IV Ring in Use", "EvSel": 173, "ExtSel": "", }, "M2M.HORZ_RING_IV_IN_USE.RIGHT": { "Box": "M2M", "Category": "M2M Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "Horizontal IV Ring in Use", "EvSel": 173, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.HORZ_RING_IV_IN_USE.LEFT": { "Box": "M2M", "Category": "M2M Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "Horizontal IV Ring in Use", "EvSel": 173, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.IMC_READS": { "Box": "M2M", "Category": "M2M IMC Events", "Desc": "M2M Reads Issued to iMC", "EvSel": 55, "ExtSel": "", "Notes": "Scrub Reads due to ECC errors not currently included", }, "M2M.IMC_READS.ISOCH": { "Box": "M2M", "Category": "M2M IMC Events", "Desc": "M2M Reads Issued to iMC", "EvSel": 55, "ExtSel": "", "Notes": "Scrub Reads due to ECC errors not currently included", "Umask": "bxxxxxx1x", }, "M2M.IMC_READS.NORMAL": { "Box": "M2M", "Category": "M2M IMC Events", "Desc": "M2M Reads Issued to iMC", "EvSel": 55, "ExtSel": "", "Notes": "Scrub Reads due to ECC errors not currently included", "Umask": "bxxxxxxx1", }, "M2M.IMC_READS.FROM_TRANSGRESS": { "Box": "M2M", "Category": "M2M IMC Events", "Desc": "M2M Reads Issued to iMC", "EvSel": 55, "ExtSel": "", "Notes": "Scrub Reads due to ECC errors not currently included", "Umask": "bxxx1xxxx", }, "M2M.IMC_READS.ALL": { "Box": "M2M", "Category": "M2M IMC Events", "Desc": "M2M Reads Issued to iMC", "EvSel": 55, "ExtSel": "", "Notes": "Scrub Reads due to ECC errors not currently included", "Umask": "bxxxxx1xx", }, "M2M.IMC_WRITES": { "Box": "M2M", "Category": "M2M IMC Events", "Desc": "M2M Writes Issued to iMC", "EvSel": 56, "ExtSel": "", "Notes": "Scrub Writes due to ECC errors not currently included", }, "M2M.IMC_WRITES.FULL": { "Box": "M2M", "Category": "M2M IMC Events", "Desc": "M2M Writes Issued to iMC", "EvSel": 56, "ExtSel": "", "Notes": "Scrub Writes due to ECC errors not currently included", "Umask": "bxxxxxxx1", }, "M2M.IMC_WRITES.FULL_ISOCH": { "Box": "M2M", "Category": "M2M IMC Events", "Desc": "M2M Writes Issued to iMC", "EvSel": 56, "ExtSel": "", "Notes": "Scrub Writes due to ECC errors not currently included", "Umask": "bxxxxx1xx", }, "M2M.IMC_WRITES.PARTIAL": { "Box": "M2M", "Category": "M2M IMC Events", "Desc": "M2M Writes Issued to iMC", "EvSel": 56, "ExtSel": "", "Notes": "Scrub Writes due to ECC errors not currently included", "Umask": "bxxxxxx1x", }, "M2M.IMC_WRITES.PARTIAL_ISOCH": { "Box": "M2M", "Category": "M2M IMC Events", "Desc": "M2M Writes Issued to iMC", "EvSel": 56, "ExtSel": "", "Notes": "Scrub Writes due to ECC errors not currently included", "Umask": "bxxxx1xxx", }, "M2M.IMC_WRITES.ALL": { "Box": "M2M", "Category": "M2M IMC Events", "Desc": "M2M Writes Issued to iMC", "EvSel": 56, "ExtSel": "", "Notes": "Scrub Writes due to ECC errors not currently included", "Umask": "bxxx1xxxx", }, "M2M.IMC_WRITES.NI": { "Box": "M2M", "Category": "M2M IMC Events", "Desc": "M2M Writes Issued to iMC", "EvSel": 56, "ExtSel": "", "Notes": "Scrub Writes due to ECC errors not currently included", "Umask": "b1xxxxxxx", }, "M2M.IMC_WRITES.FROM_TRANSGRESS": { "Box": "M2M", "Category": "M2M IMC Events", "Desc": "M2M Writes Issued to iMC", "EvSel": 56, "ExtSel": "", "Notes": "Scrub Writes due to ECC errors not currently included", "Umask": "bx1xxxxxx", }, "M2M.PKT_MATCH": { "Box": "M2M", "Category": "M2M PACKET MATCH Events", "Desc": "Number Packet Header Matches", "EvSel": 76, "ExtSel": "", }, "M2M.PKT_MATCH.MC": { "Box": "M2M", "Category": "M2M PACKET MATCH Events", "Desc": "Number Packet Header Matches", "EvSel": 76, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.PKT_MATCH.MESH": { "Box": "M2M", "Category": "M2M PACKET MATCH Events", "Desc": "Number Packet Header Matches", "EvSel": 76, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.PREFCAM_CYCLES_FULL": { "Box": "M2M", "Category": "M2M CAM Prefetch Events", "Desc": "Prefetch CAM Cycles Full", "EvSel": 83, "ExtSel": "", }, "M2M.PREFCAM_CYCLES_NE": { "Box": "M2M", "Category": "M2M CAM Prefetch Events", "Desc": "Prefetch CAM Cycles Not Empty", "EvSel": 84, "ExtSel": "", }, "M2M.PREFCAM_DEMAND_PROMOTIONS": { "Box": "M2M", "Category": "M2M CAM Prefetch Events", "Desc": "Prefetch CAM Demand Promotions", "EvSel": 86, "ExtSel": "", }, "M2M.PREFCAM_INSERTS": { "Box": "M2M", "Category": "M2M CAM Prefetch Events", "Desc": "Prefetch CAM Inserts", "EvSel": 87, "ExtSel": "", }, "M2M.PREFCAM_OCCUPANCY": { "Box": "M2M", "Category": "M2M CAM Prefetch Events", "Desc": "Prefetch CAM Occupancy", "EvSel": 85, "ExtSel": "", }, "M2M.RING_BOUNCES_HORZ": { "Box": "M2M", "Category": "M2M Horizontal RING Events", "Defn": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Horizontal Ring.", "EvSel": 161, "ExtSel": "", }, "M2M.RING_BOUNCES_HORZ.AK": { "Box": "M2M", "Category": "M2M Horizontal RING Events", "Defn": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Horizontal Ring.", "EvSel": 161, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.RING_BOUNCES_HORZ.BL": { "Box": "M2M", "Category": "M2M Horizontal RING Events", "Defn": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Horizontal Ring.", "EvSel": 161, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.RING_BOUNCES_HORZ.AD": { "Box": "M2M", "Category": "M2M Horizontal RING Events", "Defn": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Horizontal Ring.", "EvSel": 161, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.RING_BOUNCES_HORZ.IV": { "Box": "M2M", "Category": "M2M Horizontal RING Events", "Defn": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Horizontal Ring.", "EvSel": 161, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.RING_BOUNCES_VERT": { "Box": "M2M", "Category": "M2M Vertical RING Events", "Defn": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Vertical Ring.", "EvSel": 160, "ExtSel": "", }, "M2M.RING_BOUNCES_VERT.BL": { "Box": "M2M", "Category": "M2M Vertical RING Events", "Defn": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Vertical Ring.", "EvSel": 160, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.RING_BOUNCES_VERT.AK": { "Box": "M2M", "Category": "M2M Vertical RING Events", "Defn": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Vertical Ring.", "EvSel": 160, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.RING_BOUNCES_VERT.AD": { "Box": "M2M", "Category": "M2M Vertical RING Events", "Defn": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Vertical Ring.", "EvSel": 160, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.RING_BOUNCES_VERT.IV": { "Box": "M2M", "Category": "M2M Vertical RING Events", "Defn": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Vertical Ring.", "EvSel": 160, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.RING_SINK_STARVED_HORZ": { "Box": "M2M", "Category": "M2M Horizontal RING Events", "Desc": "Sink Starvation on Horizontal Ring", "EvSel": 163, "ExtSel": "", }, "M2M.RING_SINK_STARVED_HORZ.IV": { "Box": "M2M", "Category": "M2M Horizontal RING Events", "Desc": "Sink Starvation on Horizontal Ring", "EvSel": 163, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.RING_SINK_STARVED_HORZ.AK_AG1": { "Box": "M2M", "Category": "M2M Horizontal RING Events", "Desc": "Sink Starvation on Horizontal Ring", "EvSel": 163, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2M.RING_SINK_STARVED_HORZ.BL": { "Box": "M2M", "Category": "M2M Horizontal RING Events", "Desc": "Sink Starvation on Horizontal Ring", "EvSel": 163, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.RING_SINK_STARVED_HORZ.AD": { "Box": "M2M", "Category": "M2M Horizontal RING Events", "Desc": "Sink Starvation on Horizontal Ring", "EvSel": 163, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.RING_SINK_STARVED_HORZ.AK": { "Box": "M2M", "Category": "M2M Horizontal RING Events", "Desc": "Sink Starvation on Horizontal Ring", "EvSel": 163, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.RING_SINK_STARVED_VERT": { "Box": "M2M", "Category": "M2M Vertical RING Events", "Desc": "Sink Starvation on Vertical Ring", "EvSel": 162, "ExtSel": "", }, "M2M.RING_SINK_STARVED_VERT.IV": { "Box": "M2M", "Category": "M2M Vertical RING Events", "Desc": "Sink Starvation on Vertical Ring", "EvSel": 162, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.RING_SINK_STARVED_VERT.BL": { "Box": "M2M", "Category": "M2M Vertical RING Events", "Desc": "Sink Starvation on Vertical Ring", "EvSel": 162, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.RING_SINK_STARVED_VERT.AD": { "Box": "M2M", "Category": "M2M Vertical RING Events", "Desc": "Sink Starvation on Vertical Ring", "EvSel": 162, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.RING_SINK_STARVED_VERT.AK": { "Box": "M2M", "Category": "M2M Vertical RING Events", "Desc": "Sink Starvation on Vertical Ring", "EvSel": 162, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.RING_SRC_THRTL": { "Box": "M2M", "Category": "M2M Horizontal RING Events", "Desc": "Source Throttle", "EvSel": 164, "ExtSel": "", }, "M2M.RPQ_CYCLES_REG_CREDITS": { "Box": "M2M", "Category": "M2M RPQ CREDIT Events", "Desc": "M2M to iMC RPQ Cycles w/Credits - Regular", "EvSel": 67, "ExtSel": "", "Notes": "To Count # cycles w/o credits, either set .thresh to 1 and .invert to 1 OR subtract this from total cycles", }, "M2M.RPQ_CYCLES_REG_CREDITS.CHN1": { "Box": "M2M", "Category": "M2M RPQ CREDIT Events", "Desc": "M2M to iMC RPQ Cycles w/Credits - Regular", "EvSel": 67, "ExtSel": "", "Notes": "To Count # cycles w/o credits, either set .thresh to 1 and .invert to 1 OR subtract this from total cycles", "Umask": "bxxxxxx1x", }, "M2M.RPQ_CYCLES_REG_CREDITS.CHN2": { "Box": "M2M", "Category": "M2M RPQ CREDIT Events", "Desc": "M2M to iMC RPQ Cycles w/Credits - Regular", "EvSel": 67, "ExtSel": "", "Notes": "To Count # cycles w/o credits, either set .thresh to 1 and .invert to 1 OR subtract this from total cycles", "Umask": "bxxxxx1xx", }, "M2M.RPQ_CYCLES_REG_CREDITS.CHN0": { "Box": "M2M", "Category": "M2M RPQ CREDIT Events", "Desc": "M2M to iMC RPQ Cycles w/Credits - Regular", "EvSel": 67, "ExtSel": "", "Notes": "To Count # cycles w/o credits, either set .thresh to 1 and .invert to 1 OR subtract this from total cycles", "Umask": "bxxxxxxx1", }, "M2M.RPQ_CYCLES_SPEC_CREDITS": { "Box": "M2M", "Category": "M2M RPQ CREDIT Events", "Desc": "M2M to iMC RPQ Cycles w/Credits - Special", "EvSel": 68, "ExtSel": "", "Notes": "To Count # cycles w/o credits, either set .thresh to 1 and .invert to 1 OR subtract this from total cycles", }, "M2M.RPQ_CYCLES_SPEC_CREDITS.CHN2": { "Box": "M2M", "Category": "M2M RPQ CREDIT Events", "Desc": "M2M to iMC RPQ Cycles w/Credits - Special", "EvSel": 68, "ExtSel": "", "Notes": "To Count # cycles w/o credits, either set .thresh to 1 and .invert to 1 OR subtract this from total cycles", "Umask": "bxxxxx1xx", }, "M2M.RPQ_CYCLES_SPEC_CREDITS.CHN0": { "Box": "M2M", "Category": "M2M RPQ CREDIT Events", "Desc": "M2M to iMC RPQ Cycles w/Credits - Special", "EvSel": 68, "ExtSel": "", "Notes": "To Count # cycles w/o credits, either set .thresh to 1 and .invert to 1 OR subtract this from total cycles", "Umask": "bxxxxxxx1", }, "M2M.RPQ_CYCLES_SPEC_CREDITS.CHN1": { "Box": "M2M", "Category": "M2M RPQ CREDIT Events", "Desc": "M2M to iMC RPQ Cycles w/Credits - Special", "EvSel": 68, "ExtSel": "", "Notes": "To Count # cycles w/o credits, either set .thresh to 1 and .invert to 1 OR subtract this from total cycles", "Umask": "bxxxxxx1x", }, "M2M.RxC_AD_CYCLES_FULL": { "Box": "M2M", "Category": "M2M AD Ingress Events", "Desc": "AD Ingress (from CMS) Full", "EvSel": 4, "ExtSel": "", }, "M2M.RxC_AD_CYCLES_NE": { "Box": "M2M", "Category": "M2M AD Ingress Events", "Desc": "AD Ingress (from CMS) Not Empty", "EvSel": 3, "ExtSel": "", }, "M2M.RxC_AD_INSERTS": { "Box": "M2M", "Category": "M2M AD Ingress Events", "Desc": "AD Ingress (from CMS) Allocations", "EvSel": 1, "ExtSel": "", }, "M2M.RxC_AD_OCCUPANCY": { "Box": "M2M", "Category": "M2M AD Ingress Events", "Desc": "AD Ingress (from CMS) Occupancy", "EvSel": 2, "ExtSel": "", }, "M2M.RxC_BL_CYCLES_FULL": { "Box": "M2M", "Category": "M2M BL Ingress Events", "Desc": "BL Ingress (from CMS) Full", "EvSel": 8, "ExtSel": "", }, "M2M.RxC_BL_CYCLES_NE": { "Box": "M2M", "Category": "M2M BL Ingress Events", "Desc": "BL Ingress (from CMS) Not Empty", "EvSel": 7, "ExtSel": "", }, "M2M.RxC_BL_INSERTS": { "Box": "M2M", "Category": "M2M BL Ingress Events", "Desc": "BL Ingress (from CMS) Allocations", "EvSel": 5, "ExtSel": "", }, "M2M.RxC_BL_OCCUPANCY": { "Box": "M2M", "Category": "M2M BL Ingress Events", "Desc": "BL Ingress (from CMS) Occupancy", "EvSel": 6, "ExtSel": "", }, "M2M.RxR_BUSY_STARVED": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority", "Desc": "Transgress Injection Starvation", "EvSel": 180, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. Selecting multiple ring types NOT supported", }, "M2M.RxR_BUSY_STARVED.BL_BNC": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority", "Desc": "Transgress Injection Starvation", "EvSel": 180, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. Selecting multiple ring types NOT supported", "Umask": "bxxxxx1xx", }, "M2M.RxR_BUSY_STARVED.AD_BNC": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority", "Desc": "Transgress Injection Starvation", "EvSel": 180, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. Selecting multiple ring types NOT supported", "Umask": "bxxxxxxx1", }, "M2M.RxR_BUSY_STARVED.AD_CRD": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority", "Desc": "Transgress Injection Starvation", "EvSel": 180, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. Selecting multiple ring types NOT supported", "Umask": "bxxx1xxxx", }, "M2M.RxR_BUSY_STARVED.BL_CRD": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority", "Desc": "Transgress Injection Starvation", "EvSel": 180, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. Selecting multiple ring types NOT supported", "Umask": "bx1xxxxxx", }, "M2M.RxR_BYPASS": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 178, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", }, "M2M.RxR_BYPASS.BL_BNC": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 178, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bxxxxx1xx", }, "M2M.RxR_BYPASS.AD_BNC": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 178, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bxxxxxxx1", }, "M2M.RxR_BYPASS.AK_BNC": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 178, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bxxxxxx1x", }, "M2M.RxR_BYPASS.AD_CRD": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 178, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bxxx1xxxx", }, "M2M.RxR_BYPASS.IV_BNC": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 178, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bxxxx1xxx", }, "M2M.RxR_BYPASS.BL_CRD": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 178, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bx1xxxxxx", }, "M2M.RxR_CRD_STARVED": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 179, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", }, "M2M.RxR_CRD_STARVED.AK_BNC": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 179, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "bxxxxxx1x", }, "M2M.RxR_CRD_STARVED.AD_BNC": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 179, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "bxxxxxxx1", }, "M2M.RxR_CRD_STARVED.AD_CRD": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 179, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "bxxx1xxxx", }, "M2M.RxR_CRD_STARVED.IV_BNC": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 179, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "bxxxx1xxx", }, "M2M.RxR_CRD_STARVED.BL_CRD": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 179, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "bx1xxxxxx", }, "M2M.RxR_CRD_STARVED.BL_BNC": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 179, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "bxxxxx1xx", }, "M2M.RxR_CRD_STARVED.IFV": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 179, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "b1xxxxxxx", }, "M2M.RxR_INSERTS": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 177, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", }, "M2M.RxR_INSERTS.BL_BNC": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 177, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bxxxxx1xx", }, "M2M.RxR_INSERTS.IV_BNC": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 177, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bxxxx1xxx", }, "M2M.RxR_INSERTS.BL_CRD": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 177, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bx1xxxxxx", }, "M2M.RxR_INSERTS.AD_CRD": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 177, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bxxx1xxxx", }, "M2M.RxR_INSERTS.AK_BNC": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 177, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bxxxxxx1x", }, "M2M.RxR_INSERTS.AD_BNC": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 177, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bxxxxxxx1", }, "M2M.RxR_OCCUPANCY": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 176, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", }, "M2M.RxR_OCCUPANCY.BL_BNC": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 176, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bxxxxx1xx", }, "M2M.RxR_OCCUPANCY.AD_CRD": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 176, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bxxx1xxxx", }, "M2M.RxR_OCCUPANCY.IV_BNC": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 176, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bxxxx1xxx", }, "M2M.RxR_OCCUPANCY.BL_CRD": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 176, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bx1xxxxxx", }, "M2M.RxR_OCCUPANCY.AD_BNC": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 176, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bxxxxxxx1", }, "M2M.RxR_OCCUPANCY.AK_BNC": { "Box": "M2M", "Category": "M2M CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 176, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bxxxxxx1x", }, "M2M.STALL_NO_TxR_HORZ_CRD_AD_AG0": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", }, "M2M.STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR5": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2M.STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR2": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR1": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR4": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR3": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR0": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.STALL_NO_TxR_HORZ_CRD_AD_AG1": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", }, "M2M.STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR1": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR2": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR5": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2M.STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR0": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR3": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR4": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.STALL_NO_TxR_HORZ_CRD_BL_AG0": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", }, "M2M.STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR3": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR4": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR0": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR5": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2M.STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR2": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR1": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.STALL_NO_TxR_HORZ_CRD_BL_AG1": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", }, "M2M.STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR4": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR3": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR0": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR2": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR5": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2M.STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR1": { "Box": "M2M", "Category": "M2M CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TGR_AD_CREDITS": { "Box": "M2M", "Category": "M2M Transgress/M2MIngress Credit Events", "Desc": "Number AD Ingress Credits", "EvSel": 65, "ExtSel": "", }, "M2M.TGR_BL_CREDITS": { "Box": "M2M", "Category": "M2M Transgress/M2MIngress Credit Events", "Desc": "Number BL Ingress Credits", "EvSel": 66, "ExtSel": "", }, "M2M.TRACKER_CYCLES_FULL": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Tracker Cycles Full", "EvSel": 69, "ExtSel": "", }, "M2M.TRACKER_CYCLES_FULL.CH1": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Tracker Cycles Full", "EvSel": 69, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TRACKER_CYCLES_FULL.CH2": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Tracker Cycles Full", "EvSel": 69, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.TRACKER_CYCLES_FULL.CH0": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Tracker Cycles Full", "EvSel": 69, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TRACKER_CYCLES_NE": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Tracker Cycles Not Empty", "EvSel": 70, "ExtSel": "", }, "M2M.TRACKER_CYCLES_NE.CH1": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Tracker Cycles Not Empty", "EvSel": 70, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TRACKER_CYCLES_NE.CH2": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Tracker Cycles Not Empty", "EvSel": 70, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.TRACKER_CYCLES_NE.CH0": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Tracker Cycles Not Empty", "EvSel": 70, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TRACKER_INSERTS": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Tracker Inserts", "EvSel": 73, "ExtSel": "", }, "M2M.TRACKER_INSERTS.CH1": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Tracker Inserts", "EvSel": 73, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TRACKER_INSERTS.CH2": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Tracker Inserts", "EvSel": 73, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.TRACKER_INSERTS.CH0": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Tracker Inserts", "EvSel": 73, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TRACKER_OCCUPANCY": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Tracker Occupancy", "EvSel": 71, "ExtSel": "", "Notes": "Tie to Packet Mask/Match?", }, "M2M.TRACKER_OCCUPANCY.CH0": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Tracker Occupancy", "EvSel": 71, "ExtSel": "", "Notes": "Tie to Packet Mask/Match?", "Umask": "bxxxxxxx1", }, "M2M.TRACKER_OCCUPANCY.CH1": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Tracker Occupancy", "EvSel": 71, "ExtSel": "", "Notes": "Tie to Packet Mask/Match?", "Umask": "bxxxxxx1x", }, "M2M.TRACKER_OCCUPANCY.CH2": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Tracker Occupancy", "EvSel": 71, "ExtSel": "", "Notes": "Tie to Packet Mask/Match?", "Umask": "bxxxxx1xx", }, "M2M.TRACKER_PENDING_OCCUPANCY": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Data Pending Occupancy", "EvSel": 72, "ExtSel": "", }, "M2M.TxC_AD_CREDITS_ACQUIRED": { "Box": "M2M", "Category": "M2M AD CMS/Mesh Egress Credit Events", "Desc": "AD Egress (to CMS) Credit Acquired", "EvSel": 13, "ExtSel": "", }, "M2M.TxC_AD_CREDIT_OCCUPANCY": { "Box": "M2M", "Category": "M2M AD CMS/Mesh Egress Credit Events", "Desc": "AD Egress (to CMS) Credits Occupancy", "EvSel": 14, "ExtSel": "", }, "M2M.TxC_AD_CYCLES_FULL": { "Box": "M2M", "Category": "M2M AD Egress Events", "Desc": "AD Egress (to CMS) Full", "EvSel": 12, "ExtSel": "", }, "M2M.TxC_AD_CYCLES_NE": { "Box": "M2M", "Category": "M2M AD Egress Events", "Desc": "AD Egress (to CMS) Not Empty", "EvSel": 11, "ExtSel": "", }, "M2M.TxC_AD_INSERTS": { "Box": "M2M", "Category": "M2M AD Egress Events", "Desc": "AD Egress (to CMS) Allocations", "EvSel": 9, "ExtSel": "", }, "M2M.TxC_AD_NO_CREDIT_CYCLES": { "Box": "M2M", "Category": "M2M AD CMS/Mesh Egress Credit Events", "Desc": "Cycles with No AD Egress (to CMS) Credits", "EvSel": 15, "ExtSel": "", }, "M2M.TxC_AD_NO_CREDIT_STALLED": { "Box": "M2M", "Category": "M2M AD CMS/Mesh Egress Credit Events", "Desc": "Cycles Stalled with No AD Egress (to CMS) Credits", "EvSel": 16, "ExtSel": "", }, "M2M.TxC_AD_OCCUPANCY": { "Box": "M2M", "Category": "M2M AD Egress Events", "Desc": "AD Egress (to CMS) Occupancy", "EvSel": 10, "ExtSel": "", }, "M2M.TxC_AK": { "Box": "M2M", "Category": "M2M OUTBOUND_TX Events", "Desc": "Outbound Ring Transactions on AK", "EvSel": 57, "ExtSel": "", }, "M2M.TxC_AK.NDR": { "Box": "M2M", "Category": "M2M OUTBOUND_TX Events", "Desc": "Outbound Ring Transactions on AK", "EvSel": 57, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxC_AK.CRD_CBO": { "Box": "M2M", "Category": "M2M OUTBOUND_TX Events", "Desc": "Outbound Ring Transactions on AK", "EvSel": 57, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxC_AK_CREDITS_ACQUIRED": { "Box": "M2M", "Category": "M2M AK CMS/Mesh Egress Credit Events", "Desc": "AK Egress (to CMS) Credit Acquired", "EvSel": 29, "ExtSel": "", }, "M2M.TxC_AK_CREDITS_ACQUIRED.CMS1": { "Box": "M2M", "Category": "M2M AK CMS/Mesh Egress Credit Events", "Desc": "AK Egress (to CMS) Credit Acquired", "EvSel": 29, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxC_AK_CREDITS_ACQUIRED.CMS0": { "Box": "M2M", "Category": "M2M AK CMS/Mesh Egress Credit Events", "Desc": "AK Egress (to CMS) Credit Acquired", "EvSel": 29, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxC_AK_CREDIT_OCCUPANCY": { "Box": "M2M", "Category": "M2M AK CMS/Mesh Egress Credit Events", "Desc": "AK Egress (to CMS) Credits Occupancy", "EvSel": 30, "ExtSel": "", }, "M2M.TxC_AK_CREDIT_OCCUPANCY.CMS1": { "Box": "M2M", "Category": "M2M AK CMS/Mesh Egress Credit Events", "Desc": "AK Egress (to CMS) Credits Occupancy", "EvSel": 30, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxC_AK_CREDIT_OCCUPANCY.CMS0": { "Box": "M2M", "Category": "M2M AK CMS/Mesh Egress Credit Events", "Desc": "AK Egress (to CMS) Credits Occupancy", "EvSel": 30, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxC_AK_CYCLES_FULL": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Full", "EvSel": 20, "ExtSel": "", "Notes": "Some extra wild guesses as to what the subevents count - and why does it seem to track 0 credits for each CMS agent, but the other related events don't?", }, "M2M.TxC_AK_CYCLES_FULL.RDCRD0": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Full", "EvSel": 20, "ExtSel": "", "Notes": "Some extra wild guesses as to what the subevents count - and why does it seem to track 0 credits for each CMS agent, but the other related events don't?", "Umask": "b0xxx1xxx", }, "M2M.TxC_AK_CYCLES_FULL.CMS1": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Full", "EvSel": 20, "ExtSel": "", "Notes": "Some extra wild guesses as to what the subevents count - and why does it seem to track 0 credits for each CMS agent, but the other related events don't?", "Umask": "bxxxxxx1x", }, "M2M.TxC_AK_CYCLES_FULL.WRCMP1": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Full", "EvSel": 20, "ExtSel": "", "Notes": "Some extra wild guesses as to what the subevents count - and why does it seem to track 0 credits for each CMS agent, but the other related events don't?", "Umask": "b1x1xxxxx", }, "M2M.TxC_AK_CYCLES_FULL.CMS0": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Full", "EvSel": 20, "ExtSel": "", "Notes": "Some extra wild guesses as to what the subevents count - and why does it seem to track 0 credits for each CMS agent, but the other related events don't?", "Umask": "bxxxxxxx1", }, "M2M.TxC_AK_CYCLES_FULL.WRCRD1": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Full", "EvSel": 20, "ExtSel": "", "Notes": "Some extra wild guesses as to what the subevents count - and why does it seem to track 0 credits for each CMS agent, but the other related events don't?", "Umask": "b1xx1xxxx", }, "M2M.TxC_AK_CYCLES_FULL.ALL": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Full", "EvSel": 20, "ExtSel": "", "Notes": "Some extra wild guesses as to what the subevents count - and why does it seem to track 0 credits for each CMS agent, but the other related events don't?", "Umask": "bxxxxxx11", }, "M2M.TxC_AK_CYCLES_FULL.RDCRD1": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Full", "EvSel": 20, "ExtSel": "", "Notes": "Some extra wild guesses as to what the subevents count - and why does it seem to track 0 credits for each CMS agent, but the other related events don't?", "Umask": "b1xxx1xxx", }, "M2M.TxC_AK_CYCLES_FULL.WRCMP0": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Full", "EvSel": 20, "ExtSel": "", "Notes": "Some extra wild guesses as to what the subevents count - and why does it seem to track 0 credits for each CMS agent, but the other related events don't?", "Umask": "b0x1xxxxx", }, "M2M.TxC_AK_CYCLES_FULL.WRCRD0": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Full", "EvSel": 20, "ExtSel": "", "Notes": "Some extra wild guesses as to what the subevents count - and why does it seem to track 0 credits for each CMS agent, but the other related events don't?", "Umask": "b0xx1xxxx", }, "M2M.TxC_AK_CYCLES_NE": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Not Empty", "EvSel": 19, "ExtSel": "", }, "M2M.TxC_AK_CYCLES_NE.ALL": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Not Empty", "EvSel": 19, "ExtSel": "", "Umask": "bxxxxxx11", }, "M2M.TxC_AK_CYCLES_NE.WRCRD": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Not Empty", "EvSel": 19, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.TxC_AK_CYCLES_NE.WRCMP": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Not Empty", "EvSel": 19, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2M.TxC_AK_CYCLES_NE.CMS0": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Not Empty", "EvSel": 19, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxC_AK_CYCLES_NE.RDCRD": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Not Empty", "EvSel": 19, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.TxC_AK_CYCLES_NE.CMS1": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Not Empty", "EvSel": 19, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxC_AK_INSERTS": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Allocations", "EvSel": 17, "ExtSel": "", }, "M2M.TxC_AK_INSERTS.CMS0": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Allocations", "EvSel": 17, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxC_AK_INSERTS.ALL": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Allocations", "EvSel": 17, "ExtSel": "", "Umask": "bxxxxxx11", }, "M2M.TxC_AK_INSERTS.PREF_RD_CAM_HIT": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Allocations", "EvSel": 17, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2M.TxC_AK_INSERTS.WRCRD": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Allocations", "EvSel": 17, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.TxC_AK_INSERTS.WRCMP": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Allocations", "EvSel": 17, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2M.TxC_AK_INSERTS.CMS1": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Allocations", "EvSel": 17, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxC_AK_INSERTS.RDCRD": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Allocations", "EvSel": 17, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.TxC_AK_NO_CREDIT_CYCLES": { "Box": "M2M", "Category": "M2M AK CMS/Mesh Egress Credit Events", "Desc": "Cycles with No AK Egress (to CMS) Credits", "EvSel": 31, "ExtSel": "", }, "M2M.TxC_AK_NO_CREDIT_CYCLES.CMS1": { "Box": "M2M", "Category": "M2M AK CMS/Mesh Egress Credit Events", "Desc": "Cycles with No AK Egress (to CMS) Credits", "EvSel": 31, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxC_AK_NO_CREDIT_CYCLES.CMS0": { "Box": "M2M", "Category": "M2M AK CMS/Mesh Egress Credit Events", "Desc": "Cycles with No AK Egress (to CMS) Credits", "EvSel": 31, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxC_AK_NO_CREDIT_STALLED": { "Box": "M2M", "Category": "M2M AK CMS/Mesh Egress Credit Events", "Desc": "Cycles Stalled with No AK Egress (to CMS) Credits", "EvSel": 32, "ExtSel": "", }, "M2M.TxC_AK_NO_CREDIT_STALLED.CMS0": { "Box": "M2M", "Category": "M2M AK CMS/Mesh Egress Credit Events", "Desc": "Cycles Stalled with No AK Egress (to CMS) Credits", "EvSel": 32, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxC_AK_NO_CREDIT_STALLED.CMS1": { "Box": "M2M", "Category": "M2M AK CMS/Mesh Egress Credit Events", "Desc": "Cycles Stalled with No AK Egress (to CMS) Credits", "EvSel": 32, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxC_AK_OCCUPANCY": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Occupancy", "EvSel": 18, "ExtSel": "", }, "M2M.TxC_AK_OCCUPANCY.RDCRD": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Occupancy", "EvSel": 18, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.TxC_AK_OCCUPANCY.CMS1": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Occupancy", "EvSel": 18, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxC_AK_OCCUPANCY.ALL": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Occupancy", "EvSel": 18, "ExtSel": "", "Umask": "bxxxxxx11", }, "M2M.TxC_AK_OCCUPANCY.WRCRD": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Occupancy", "EvSel": 18, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.TxC_AK_OCCUPANCY.WRCMP": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Occupancy", "EvSel": 18, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2M.TxC_AK_OCCUPANCY.CMS0": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Occupancy", "EvSel": 18, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxC_AK_SIDEBAND": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Sideband", "EvSel": 107, "ExtSel": "", }, "M2M.TxC_AK_SIDEBAND.WR": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Sideband", "EvSel": 107, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxC_AK_SIDEBAND.RD": { "Box": "M2M", "Category": "M2M AK Egress Events", "Desc": "AK Egress (to CMS) Sideband", "EvSel": 107, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxC_BL": { "Box": "M2M", "Category": "M2M OUTBOUND_TX Events", "Desc": "Outbound DRS Ring Transactions to Cache", "EvSel": 64, "ExtSel": "", }, "M2M.TxC_BL.DRS_CORE": { "Box": "M2M", "Category": "M2M OUTBOUND_TX Events", "Desc": "Outbound DRS Ring Transactions to Cache", "EvSel": 64, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxC_BL.DRS_CACHE": { "Box": "M2M", "Category": "M2M OUTBOUND_TX Events", "Desc": "Outbound DRS Ring Transactions to Cache", "EvSel": 64, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxC_BL.DRS_UPI": { "Box": "M2M", "Category": "M2M OUTBOUND_TX Events", "Desc": "Outbound DRS Ring Transactions to Cache", "EvSel": 64, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.TxC_BL_CREDITS_ACQUIRED": { "Box": "M2M", "Category": "M2M BL CMS/Mesh Egress Credit Events", "Desc": "BL Egress (to CMS) Credit Acquired", "EvSel": 25, "ExtSel": "", }, "M2M.TxC_BL_CREDITS_ACQUIRED.CMS0": { "Box": "M2M", "Category": "M2M BL CMS/Mesh Egress Credit Events", "Desc": "BL Egress (to CMS) Credit Acquired", "EvSel": 25, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxC_BL_CREDITS_ACQUIRED.CMS1": { "Box": "M2M", "Category": "M2M BL CMS/Mesh Egress Credit Events", "Desc": "BL Egress (to CMS) Credit Acquired", "EvSel": 25, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxC_BL_CREDIT_OCCUPANCY": { "Box": "M2M", "Category": "M2M BL CMS/Mesh Egress Credit Events", "Desc": "BL Egress (to CMS) Credits Occupancy", "EvSel": 26, "ExtSel": "", }, "M2M.TxC_BL_CREDIT_OCCUPANCY.CMS1": { "Box": "M2M", "Category": "M2M BL CMS/Mesh Egress Credit Events", "Desc": "BL Egress (to CMS) Credits Occupancy", "EvSel": 26, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxC_BL_CREDIT_OCCUPANCY.CMS0": { "Box": "M2M", "Category": "M2M BL CMS/Mesh Egress Credit Events", "Desc": "BL Egress (to CMS) Credits Occupancy", "EvSel": 26, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxC_BL_CYCLES_FULL": { "Box": "M2M", "Category": "M2M BL Egress Events", "Desc": "BL Egress (to CMS) Full", "EvSel": 24, "ExtSel": "", }, "M2M.TxC_BL_CYCLES_FULL.CMS1": { "Box": "M2M", "Category": "M2M BL Egress Events", "Desc": "BL Egress (to CMS) Full", "EvSel": 24, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxC_BL_CYCLES_FULL.CMS0": { "Box": "M2M", "Category": "M2M BL Egress Events", "Desc": "BL Egress (to CMS) Full", "EvSel": 24, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxC_BL_CYCLES_FULL.ALL": { "Box": "M2M", "Category": "M2M BL Egress Events", "Desc": "BL Egress (to CMS) Full", "EvSel": 24, "ExtSel": "", "Umask": "bxxxxxx11", }, "M2M.TxC_BL_CYCLES_NE": { "Box": "M2M", "Category": "M2M BL Egress Events", "Desc": "BL Egress (to CMS) Not Empty", "EvSel": 23, "ExtSel": "", }, "M2M.TxC_BL_CYCLES_NE.CMS1": { "Box": "M2M", "Category": "M2M BL Egress Events", "Desc": "BL Egress (to CMS) Not Empty", "EvSel": 23, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxC_BL_CYCLES_NE.ALL": { "Box": "M2M", "Category": "M2M BL Egress Events", "Desc": "BL Egress (to CMS) Not Empty", "EvSel": 23, "ExtSel": "", "Umask": "bxxxxxx11", }, "M2M.TxC_BL_CYCLES_NE.CMS0": { "Box": "M2M", "Category": "M2M BL Egress Events", "Desc": "BL Egress (to CMS) Not Empty", "EvSel": 23, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxC_BL_INSERTS": { "Box": "M2M", "Category": "M2M BL Egress Events", "Desc": "BL Egress (to CMS) Allocations", "EvSel": 21, "ExtSel": "", }, "M2M.TxC_BL_INSERTS.CMS1": { "Box": "M2M", "Category": "M2M BL Egress Events", "Desc": "BL Egress (to CMS) Allocations", "EvSel": 21, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxC_BL_INSERTS.CMS0": { "Box": "M2M", "Category": "M2M BL Egress Events", "Desc": "BL Egress (to CMS) Allocations", "EvSel": 21, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxC_BL_INSERTS.ALL": { "Box": "M2M", "Category": "M2M BL Egress Events", "Desc": "BL Egress (to CMS) Allocations", "EvSel": 21, "ExtSel": "", "Umask": "bxxxxxx11", }, "M2M.TxC_BL_NO_CREDIT_CYCLES": { "Box": "M2M", "Category": "M2M BL CMS/Mesh Egress Credit Events", "Desc": "Cycles with No BL Egress (to CMS) Credits", "EvSel": 27, "ExtSel": "", }, "M2M.TxC_BL_NO_CREDIT_CYCLES.CMS0": { "Box": "M2M", "Category": "M2M BL CMS/Mesh Egress Credit Events", "Desc": "Cycles with No BL Egress (to CMS) Credits", "EvSel": 27, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxC_BL_NO_CREDIT_CYCLES.CMS1": { "Box": "M2M", "Category": "M2M BL CMS/Mesh Egress Credit Events", "Desc": "Cycles with No BL Egress (to CMS) Credits", "EvSel": 27, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxC_BL_NO_CREDIT_STALLED": { "Box": "M2M", "Category": "M2M BL CMS/Mesh Egress Credit Events", "Desc": "Cycles Stalled with No BL Egress (to CMS) Credits", "EvSel": 28, "ExtSel": "", }, "M2M.TxC_BL_NO_CREDIT_STALLED.CMS1": { "Box": "M2M", "Category": "M2M BL CMS/Mesh Egress Credit Events", "Desc": "Cycles Stalled with No BL Egress (to CMS) Credits", "EvSel": 28, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxC_BL_NO_CREDIT_STALLED.CMS0": { "Box": "M2M", "Category": "M2M BL CMS/Mesh Egress Credit Events", "Desc": "Cycles Stalled with No BL Egress (to CMS) Credits", "EvSel": 28, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxC_BL_OCCUPANCY": { "Box": "M2M", "Category": "M2M BL Egress Events", "Desc": "BL Egress (to CMS) Occupancy", "EvSel": 22, "ExtSel": "", }, "M2M.TxC_BL_OCCUPANCY.CMS1": { "Box": "M2M", "Category": "M2M BL Egress Events", "Desc": "BL Egress (to CMS) Occupancy", "EvSel": 22, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxC_BL_OCCUPANCY.CMS0": { "Box": "M2M", "Category": "M2M BL Egress Events", "Desc": "BL Egress (to CMS) Occupancy", "EvSel": 22, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxC_BL_OCCUPANCY.ALL": { "Box": "M2M", "Category": "M2M BL Egress Events", "Desc": "BL Egress (to CMS) Occupancy", "EvSel": 22, "ExtSel": "", "Umask": "bxxxxxx11", }, "M2M.TxR_HORZ_ADS_USED": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal ADS Used", "EvSel": 157, "ExtSel": "", }, "M2M.TxR_HORZ_ADS_USED.AD_BNC": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal ADS Used", "EvSel": 157, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxR_HORZ_ADS_USED.AK_BNC": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal ADS Used", "EvSel": 157, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxR_HORZ_ADS_USED.AD_CRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal ADS Used", "EvSel": 157, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.TxR_HORZ_ADS_USED.BL_CRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal ADS Used", "EvSel": 157, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2M.TxR_HORZ_ADS_USED.BL_BNC": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal ADS Used", "EvSel": 157, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.TxR_HORZ_BYPASS": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 159, "ExtSel": "", }, "M2M.TxR_HORZ_BYPASS.BL_BNC": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 159, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.TxR_HORZ_BYPASS.AD_CRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 159, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.TxR_HORZ_BYPASS.IV_BNC": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 159, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.TxR_HORZ_BYPASS.BL_CRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 159, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2M.TxR_HORZ_BYPASS.AK_BNC": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 159, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxR_HORZ_BYPASS.AD_BNC": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 159, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxR_HORZ_CYCLES_FULL": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 150, "ExtSel": "", }, "M2M.TxR_HORZ_CYCLES_FULL.BL_BNC": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 150, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.TxR_HORZ_CYCLES_FULL.IV_BNC": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 150, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.TxR_HORZ_CYCLES_FULL.BL_CRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 150, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2M.TxR_HORZ_CYCLES_FULL.AD_CRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 150, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.TxR_HORZ_CYCLES_FULL.AD_BNC": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 150, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxR_HORZ_CYCLES_FULL.AK_BNC": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 150, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxR_HORZ_CYCLES_NE": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 151, "ExtSel": "", }, "M2M.TxR_HORZ_CYCLES_NE.AD_CRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 151, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.TxR_HORZ_CYCLES_NE.BL_CRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 151, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2M.TxR_HORZ_CYCLES_NE.IV_BNC": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 151, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.TxR_HORZ_CYCLES_NE.AK_BNC": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 151, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxR_HORZ_CYCLES_NE.AD_BNC": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 151, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxR_HORZ_CYCLES_NE.BL_BNC": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 151, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.TxR_HORZ_INSERTS": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 149, "ExtSel": "", }, "M2M.TxR_HORZ_INSERTS.AD_CRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 149, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.TxR_HORZ_INSERTS.IV_BNC": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 149, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.TxR_HORZ_INSERTS.BL_CRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 149, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2M.TxR_HORZ_INSERTS.AD_BNC": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 149, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxR_HORZ_INSERTS.AK_BNC": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 149, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxR_HORZ_INSERTS.BL_BNC": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 149, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.TxR_HORZ_NACK": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 153, "ExtSel": "", }, "M2M.TxR_HORZ_NACK.AD_BNC": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 153, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxR_HORZ_NACK.AK_BNC": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 153, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxR_HORZ_NACK.AD_CRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 153, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2M.TxR_HORZ_NACK.BL_CRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 153, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2M.TxR_HORZ_NACK.IV_BNC": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 153, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.TxR_HORZ_NACK.BL_BNC": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 153, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.TxR_HORZ_OCCUPANCY": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 148, "ExtSel": "", }, "M2M.TxR_HORZ_OCCUPANCY.IV_BNC": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 148, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.TxR_HORZ_OCCUPANCY.BL_CRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 148, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2M.TxR_HORZ_OCCUPANCY.AD_CRD": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 148, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.TxR_HORZ_OCCUPANCY.AK_BNC": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 148, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxR_HORZ_OCCUPANCY.AD_BNC": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 148, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxR_HORZ_OCCUPANCY.BL_BNC": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 148, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.TxR_HORZ_STARVED": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.", "Desc": "CMS Horizontal Egress Injection Starvation", "EvSel": 155, "ExtSel": "", }, "M2M.TxR_HORZ_STARVED.AK_BNC": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.", "Desc": "CMS Horizontal Egress Injection Starvation", "EvSel": 155, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxR_HORZ_STARVED.AD_BNC": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.", "Desc": "CMS Horizontal Egress Injection Starvation", "EvSel": 155, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxR_HORZ_STARVED.IV_BNC": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.", "Desc": "CMS Horizontal Egress Injection Starvation", "EvSel": 155, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.TxR_HORZ_STARVED.BL_BNC": { "Box": "M2M", "Category": "M2M CMS Horizontal EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.", "Desc": "CMS Horizontal Egress Injection Starvation", "EvSel": 155, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.TxR_VERT_ADS_USED": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 156, "ExtSel": "", }, "M2M.TxR_VERT_ADS_USED.BL_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 156, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.TxR_VERT_ADS_USED.AD_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 156, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxR_VERT_ADS_USED.BL_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 156, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2M.TxR_VERT_ADS_USED.AD_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 156, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.TxR_VERT_ADS_USED.AK_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 156, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxR_VERT_ADS_USED.AK_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 156, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2M.TxR_VERT_BYPASS": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 158, "ExtSel": "", }, "M2M.TxR_VERT_BYPASS.IV": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 158, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.TxR_VERT_BYPASS.AK_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 158, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2M.TxR_VERT_BYPASS.AD_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 158, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.TxR_VERT_BYPASS.AK_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 158, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxR_VERT_BYPASS.AD_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 158, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxR_VERT_BYPASS.BL_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 158, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2M.TxR_VERT_BYPASS.BL_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 158, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.TxR_VERT_CYCLES_FULL": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 146, "ExtSel": "", }, "M2M.TxR_VERT_CYCLES_FULL.BL_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 146, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.TxR_VERT_CYCLES_FULL.AD_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 146, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxR_VERT_CYCLES_FULL.BL_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 146, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2M.TxR_VERT_CYCLES_FULL.AK_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 146, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2M.TxR_VERT_CYCLES_FULL.AD_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 146, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.TxR_VERT_CYCLES_FULL.AK_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 146, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxR_VERT_CYCLES_FULL.IV": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 146, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.TxR_VERT_CYCLES_NE": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 147, "ExtSel": "", }, "M2M.TxR_VERT_CYCLES_NE.IV": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 147, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.TxR_VERT_CYCLES_NE.AK_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 147, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2M.TxR_VERT_CYCLES_NE.AK_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 147, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxR_VERT_CYCLES_NE.AD_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 147, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.TxR_VERT_CYCLES_NE.AD_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 147, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxR_VERT_CYCLES_NE.BL_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 147, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2M.TxR_VERT_CYCLES_NE.BL_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 147, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.TxR_VERT_INSERTS": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 145, "ExtSel": "", }, "M2M.TxR_VERT_INSERTS.BL_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 145, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.TxR_VERT_INSERTS.AD_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 145, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxR_VERT_INSERTS.BL_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 145, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2M.TxR_VERT_INSERTS.AK_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 145, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2M.TxR_VERT_INSERTS.AD_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 145, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.TxR_VERT_INSERTS.AK_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 145, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxR_VERT_INSERTS.IV": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 145, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.TxR_VERT_NACK": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", }, "M2M.TxR_VERT_NACK.AD_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxR_VERT_NACK.BL_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2M.TxR_VERT_NACK.BL_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.TxR_VERT_NACK.IV": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.TxR_VERT_NACK.AK_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2M.TxR_VERT_NACK.AD_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.TxR_VERT_NACK.AK_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxR_VERT_OCCUPANCY": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", }, "M2M.TxR_VERT_OCCUPANCY.AK_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2M.TxR_VERT_OCCUPANCY.AK_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxR_VERT_OCCUPANCY.AD_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.TxR_VERT_OCCUPANCY.IV": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.TxR_VERT_OCCUPANCY.BL_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.TxR_VERT_OCCUPANCY.AD_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxR_VERT_OCCUPANCY.BL_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2M.TxR_VERT_STARVED": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", }, "M2M.TxR_VERT_STARVED.BL_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.TxR_VERT_STARVED.AD_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.TxR_VERT_STARVED.BL_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bx1xxxxxx", }, "M2M.TxR_VERT_STARVED.AD_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bxxx1xxxx", }, "M2M.TxR_VERT_STARVED.AK_AG0": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.TxR_VERT_STARVED.AK_AG1": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bxx1xxxxx", }, "M2M.TxR_VERT_STARVED.IV": { "Box": "M2M", "Category": "M2M CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.VERT_RING_AD_IN_USE": { "Box": "M2M", "Category": "M2M Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AD Ring In Use", "EvSel": 166, "ExtSel": "", }, "M2M.VERT_RING_AD_IN_USE.UP_EVEN": { "Box": "M2M", "Category": "M2M Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AD Ring In Use", "EvSel": 166, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.VERT_RING_AD_IN_USE.DN_ODD": { "Box": "M2M", "Category": "M2M Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AD Ring In Use", "EvSel": 166, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.VERT_RING_AD_IN_USE.UP_ODD": { "Box": "M2M", "Category": "M2M Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AD Ring In Use", "EvSel": 166, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.VERT_RING_AD_IN_USE.DN_EVEN": { "Box": "M2M", "Category": "M2M Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AD Ring In Use", "EvSel": 166, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.VERT_RING_AK_IN_USE": { "Box": "M2M", "Category": "M2M Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AK Ring In Use", "EvSel": 168, "ExtSel": "", }, "M2M.VERT_RING_AK_IN_USE.UP_ODD": { "Box": "M2M", "Category": "M2M Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AK Ring In Use", "EvSel": 168, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.VERT_RING_AK_IN_USE.DN_EVEN": { "Box": "M2M", "Category": "M2M Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AK Ring In Use", "EvSel": 168, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.VERT_RING_AK_IN_USE.DN_ODD": { "Box": "M2M", "Category": "M2M Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AK Ring In Use", "EvSel": 168, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.VERT_RING_AK_IN_USE.UP_EVEN": { "Box": "M2M", "Category": "M2M Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AK Ring In Use", "EvSel": 168, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.VERT_RING_BL_IN_USE": { "Box": "M2M", "Category": "M2M Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical BL Ring in Use", "EvSel": 170, "ExtSel": "", }, "M2M.VERT_RING_BL_IN_USE.DN_ODD": { "Box": "M2M", "Category": "M2M Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical BL Ring in Use", "EvSel": 170, "ExtSel": "", "Umask": "bxxxx1xxx", }, "M2M.VERT_RING_BL_IN_USE.UP_EVEN": { "Box": "M2M", "Category": "M2M Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical BL Ring in Use", "EvSel": 170, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.VERT_RING_BL_IN_USE.DN_EVEN": { "Box": "M2M", "Category": "M2M Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical BL Ring in Use", "EvSel": 170, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.VERT_RING_BL_IN_USE.UP_ODD": { "Box": "M2M", "Category": "M2M Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical BL Ring in Use", "EvSel": 170, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.VERT_RING_IV_IN_USE": { "Box": "M2M", "Category": "M2M Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "Vertical IV Ring in Use", "EvSel": 172, "ExtSel": "", }, "M2M.VERT_RING_IV_IN_USE.DN": { "Box": "M2M", "Category": "M2M Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "Vertical IV Ring in Use", "EvSel": 172, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.VERT_RING_IV_IN_USE.UP": { "Box": "M2M", "Category": "M2M Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "Vertical IV Ring in Use", "EvSel": 172, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.WPQ_CYCLES_REG_CREDITS": { "Box": "M2M", "Category": "M2M WPQ_CREDITS Events", "Desc": "M2M->iMC WPQ Cycles w/Credits - Regular", "EvSel": 77, "ExtSel": "", "Notes": "To Count # cycles w/o credits, either set .thresh to 1 and .invert to 1 OR subtract this from total cycles", }, "M2M.WPQ_CYCLES_REG_CREDITS.CHN0": { "Box": "M2M", "Category": "M2M WPQ_CREDITS Events", "Desc": "M2M->iMC WPQ Cycles w/Credits - Regular", "EvSel": 77, "ExtSel": "", "Notes": "To Count # cycles w/o credits, either set .thresh to 1 and .invert to 1 OR subtract this from total cycles", "Umask": "bxxxxxxx1", }, "M2M.WPQ_CYCLES_REG_CREDITS.CHN2": { "Box": "M2M", "Category": "M2M WPQ_CREDITS Events", "Desc": "M2M->iMC WPQ Cycles w/Credits - Regular", "EvSel": 77, "ExtSel": "", "Notes": "To Count # cycles w/o credits, either set .thresh to 1 and .invert to 1 OR subtract this from total cycles", "Umask": "bxxxxx1xx", }, "M2M.WPQ_CYCLES_REG_CREDITS.CHN1": { "Box": "M2M", "Category": "M2M WPQ_CREDITS Events", "Desc": "M2M->iMC WPQ Cycles w/Credits - Regular", "EvSel": 77, "ExtSel": "", "Notes": "To Count # cycles w/o credits, either set .thresh to 1 and .invert to 1 OR subtract this from total cycles", "Umask": "bxxxxxx1x", }, "M2M.WPQ_CYCLES_SPEC_CREDITS": { "Box": "M2M", "Category": "M2M WPQ_CREDITS Events", "Desc": "M2M->iMC WPQ Cycles w/Credits - Special", "EvSel": 78, "ExtSel": "", "Notes": "To Count # cycles w/o credits, either set .thresh to 1 and .invert to 1 OR subtract this from total cycles", }, "M2M.WPQ_CYCLES_SPEC_CREDITS.CHN0": { "Box": "M2M", "Category": "M2M WPQ_CREDITS Events", "Desc": "M2M->iMC WPQ Cycles w/Credits - Special", "EvSel": 78, "ExtSel": "", "Notes": "To Count # cycles w/o credits, either set .thresh to 1 and .invert to 1 OR subtract this from total cycles", "Umask": "bxxxxxxx1", }, "M2M.WPQ_CYCLES_SPEC_CREDITS.CHN2": { "Box": "M2M", "Category": "M2M WPQ_CREDITS Events", "Desc": "M2M->iMC WPQ Cycles w/Credits - Special", "EvSel": 78, "ExtSel": "", "Notes": "To Count # cycles w/o credits, either set .thresh to 1 and .invert to 1 OR subtract this from total cycles", "Umask": "bxxxxx1xx", }, "M2M.WPQ_CYCLES_SPEC_CREDITS.CHN1": { "Box": "M2M", "Category": "M2M WPQ_CREDITS Events", "Desc": "M2M->iMC WPQ Cycles w/Credits - Special", "EvSel": 78, "ExtSel": "", "Notes": "To Count # cycles w/o credits, either set .thresh to 1 and .invert to 1 OR subtract this from total cycles", "Umask": "bxxxxxx1x", }, "M2M.WRITE_TRACKER_CYCLES_FULL": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Write Tracker Cycles Full", "EvSel": 74, "ExtSel": "", }, "M2M.WRITE_TRACKER_CYCLES_FULL.CH1": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Write Tracker Cycles Full", "EvSel": 74, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.WRITE_TRACKER_CYCLES_FULL.CH2": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Write Tracker Cycles Full", "EvSel": 74, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.WRITE_TRACKER_CYCLES_FULL.CH0": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Write Tracker Cycles Full", "EvSel": 74, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.WRITE_TRACKER_CYCLES_NE": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Write Tracker Cycles Not Empty", "EvSel": 75, "ExtSel": "", }, "M2M.WRITE_TRACKER_CYCLES_NE.CH2": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Write Tracker Cycles Not Empty", "EvSel": 75, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.WRITE_TRACKER_CYCLES_NE.CH1": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Write Tracker Cycles Not Empty", "EvSel": 75, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.WRITE_TRACKER_CYCLES_NE.CH0": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Write Tracker Cycles Not Empty", "EvSel": 75, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.WRITE_TRACKER_INSERTS": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Write Tracker Inserts", "EvSel": 97, "ExtSel": "", }, "M2M.WRITE_TRACKER_INSERTS.CH1": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Write Tracker Inserts", "EvSel": 97, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.WRITE_TRACKER_INSERTS.CH2": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Write Tracker Inserts", "EvSel": 97, "ExtSel": "", "Umask": "bxxxxx1xx", }, "M2M.WRITE_TRACKER_INSERTS.CH0": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Write Tracker Inserts", "EvSel": 97, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.WRITE_TRACKER_OCCUPANCY": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Write Tracker Occupancy", "EvSel": 96, "ExtSel": "", }, "M2M.WRITE_TRACKER_OCCUPANCY.CH0": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Write Tracker Occupancy", "EvSel": 96, "ExtSel": "", "Umask": "bxxxxxxx1", }, "M2M.WRITE_TRACKER_OCCUPANCY.CH1": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Write Tracker Occupancy", "EvSel": 96, "ExtSel": "", "Umask": "bxxxxxx1x", }, "M2M.WRITE_TRACKER_OCCUPANCY.CH2": { "Box": "M2M", "Category": "M2M TRACKER Events", "Desc": "Write Tracker Occupancy", "EvSel": 96, "ExtSel": "", "Umask": "bxxxxx1xx", }, # PCU: "PCU.CLOCKTICKS": { "Box": "PCU", "Category": "PCU MEMORY_PHASE_SHEDDING Events", "Counters": "0-3", "Defn": "The PCU runs off a fixed 1 GHz clock. This event counts the number of pclk cycles measured while the counter was enabled. The pclk, like the Memory Controller's dclk, counts at a constant rate making it a good measure of actual wall time.", "Desc": "pclk Cycles", "EvSel": 0, "ExtSel": "", }, "PCU.CORE_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "EvSel": 96, "ExtSel": "", }, "PCU.CTS_EVENT0": { "Box": "PCU", "Category": "PCU Misc Events", "EvSel": 17, "ExtSel": "", }, "PCU.CTS_EVENT1": { "Box": "PCU", "Category": "PCU Misc Events", "EvSel": 18, "ExtSel": "", }, "PCU.DEMOTIONS": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "EvSel": 48, "ExtSel": "", }, "PCU.FIVR_PS_PS0_CYCLES": { "Box": "PCU", "Category": "PCU FIVR Events", "Counters": "0-3", "Defn": "Cycles spent in phase-shedding power state 0", "Desc": "Phase Shed 0 Cycles", "EvSel": 117, "ExtSel": "", }, "PCU.FIVR_PS_PS1_CYCLES": { "Box": "PCU", "Category": "PCU FIVR Events", "Counters": "0-3", "Defn": "Cycles spent in phase-shedding power state 1", "Desc": "Phase Shed 1 Cycles", "EvSel": 118, "ExtSel": "", }, "PCU.FIVR_PS_PS2_CYCLES": { "Box": "PCU", "Category": "PCU FIVR Events", "Counters": "0-3", "Defn": "Cycles spent in phase-shedding power state 2", "Desc": "Phase Shed 2 Cycles", "EvSel": 119, "ExtSel": "", }, "PCU.FIVR_PS_PS3_CYCLES": { "Box": "PCU", "Category": "PCU FIVR Events", "Counters": "0-3", "Defn": "Cycles spent in phase-shedding power state 3", "Desc": "Phase Shed 3 Cycles", "EvSel": 120, "ExtSel": "", }, "PCU.FREQ_MAX_LIMIT_THERMAL_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_MAX_LIMIT Events", "Counters": "0-3", "Defn": "Counts the number of cycles when thermal conditions are the upper limit on frequency. This is related to the THERMAL_THROTTLE CYCLES_ABOVE_TEMP event, which always counts cycles when we are above the thermal temperature. This event (STRONGEST_UPPER_LIMIT) is sampled at the output of the algorithm that determines the actual frequency, while THERMAL_THROTTLE looks at the input.", "Desc": "Thermal Strongest Upper Limit Cycles", "EvSel": 4, "ExtSel": "", }, "PCU.FREQ_MAX_POWER_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_MAX_LIMIT Events", "Counters": "0-3", "Defn": "Counts the number of cycles when power is the upper limit on frequency.", "Desc": "Power Strongest Upper Limit Cycles", "EvSel": 5, "ExtSel": "", }, "PCU.FREQ_MIN_IO_P_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_MIN_LIMIT Events", "Counters": "0-3", "Defn": "Counts the number of cycles when IO P Limit is preventing us from dropping the frequency lower. This algorithm monitors the needs to the IO subsystem on both local and remote sockets and will maintain a frequency high enough to maintain good IO BW. This is necessary for when all the IA cores on a socket are idle but a user still would like to maintain high IO Bandwidth.", "Desc": "IO P Limit Strongest Lower Limit Cycles", "EvSel": 115, "ExtSel": "", }, "PCU.FREQ_TRANS_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_TRANS Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the system is changing frequency. This can not be filtered by thread ID. One can also use it with the occupancy counter that monitors number of threads in C0 to estimate the performance impact that frequency transitions had on the system.", "Desc": "Cycles spent changing Frequency", "EvSel": 116, "ExtSel": "", }, "PCU.MCP_PROCHOT_CYCLES": { "Box": "PCU", "Category": "PCU PROCHOT Events", "EvSel": 6, "ExtSel": "", }, "PCU.MEMORY_PHASE_SHEDDING_CYCLES": { "Box": "PCU", "Category": "PCU MEMORY_PHASE_SHEDDING Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the PCU has triggered memory phase shedding. This is a mode that can be run in the iMC physicals that saves power at the expense of additional latency.", "Desc": "Memory Phase Shedding Cycles", "EvSel": 47, "ExtSel": "", "Notes": "Package C1", }, "PCU.PKG_RESIDENCY_C0_CYCLES": { "Box": "PCU", "Category": "PCU PKG_C_STATE_RESIDENCY Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the package was in C0. This event can be used in conjunction with edge detect to count C0 entrances (or exits using invert). Residency events do not include transition times.", "Desc": "Package C State Residency - C0", "EvSel": 42, "ExtSel": "", }, "PCU.PKG_RESIDENCY_C2E_CYCLES": { "Box": "PCU", "Category": "PCU PKG_C_STATE_RESIDENCY Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the package was in C2E. This event can be used in conjunction with edge detect to count C2E entrances (or exits using invert). Residency events do not include transition times.", "Desc": "Package C State Residency - C2E", "EvSel": 43, "ExtSel": "", }, "PCU.PKG_RESIDENCY_C3_CYCLES": { "Box": "PCU", "Category": "PCU PKG_C_STATE_RESIDENCY Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the package was in C3. This event can be used in conjunction with edge detect to count C3 entrances (or exits using invert). Residency events do not include transition times.", "Desc": "Package C State Residency - C3", "EvSel": 44, "ExtSel": "", }, "PCU.PKG_RESIDENCY_C6_CYCLES": { "Box": "PCU", "Category": "PCU PKG_C_STATE_RESIDENCY Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the package was in C6. This event can be used in conjunction with edge detect to count C6 entrances (or exits using invert). Residency events do not include transition times.", "Desc": "Package C State Residency - C6", "EvSel": 45, "ExtSel": "", }, "PCU.PMAX_THROTTLED_CYCLES": { "Box": "PCU", "Category": "PCU FREQ_MAX_LIMIT Events", "EvSel": 7, "ExtSel": "", }, "PCU.PROCHOT_EXTERNAL_CYCLES": { "Box": "PCU", "Category": "PCU PROCHOT Events", "Counters": "0-3", "Defn": "Counts the number of cycles that we are in external PROCHOT mode. This mode is triggered when a sensor off the die determines that something off-die (like DRAM) is too hot and must throttle to avoid damaging the chip.", "Desc": "External Prochot", "EvSel": 10, "ExtSel": "", }, "PCU.PROCHOT_INTERNAL_CYCLES": { "Box": "PCU", "Category": "PCU PROCHOT Events", "Counters": "0-3", "Defn": "Counts the number of cycles that we are in Interal PROCHOT mode. This mode is triggered when a sensor on the die determines that we are too hot and must throttle to avoid damaging the chip.", "Desc": "Internal Prochot", "EvSel": 9, "ExtSel": "", }, "PCU.TOTAL_TRANSITION_CYCLES": { "Box": "PCU", "Category": "PCU CORE_C_STATE_TRANSITION Events", "Counters": "0-3", "Defn": "Number of cycles spent performing core C state transitions across all cores.", "Desc": "Total Core C State Transition Cycles", "EvSel": 114, "ExtSel": "", }, "PCU.VR_HOT_CYCLES": { "Box": "PCU", "Category": "PCU VR_HOT Events", "Counters": "0-3", "Desc": "VR Hot", "EvSel": 66, "ExtSel": "", }, # IRP: "IRP.CACHE_TOTAL_OCCUPANCY": { "Box": "IRP", "Category": "IRP WRITE_CACHE Events", "Counters": "0-1", "Defn": "Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.", "Desc": "Total Write Cache Occupancy", "EvSel": 15, "ExtSel": "", }, "IRP.CACHE_TOTAL_OCCUPANCY.ANY": { "Box": "IRP", "Category": "IRP WRITE_CACHE Events", "Counters": "0-1", "Defn": "Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.", "Desc": "Total Write Cache Occupancy", "EvSel": 15, "ExtSel": "", "Umask": "b00000001", }, "IRP.CACHE_TOTAL_OCCUPANCY.IV_Q": { "Box": "IRP", "Category": "IRP WRITE_CACHE Events", "Counters": "0-1", "Defn": "Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.", "Desc": "Total Write Cache Occupancy", "EvSel": 15, "ExtSel": "", "Umask": "b00000010", }, "IRP.CACHE_TOTAL_OCCUPANCY.MEM": { "Box": "IRP", "Category": "IRP WRITE_CACHE Events", "Counters": "0-1", "Defn": "Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.", "Desc": "Total Write Cache Occupancy", "EvSel": 15, "ExtSel": "", "Umask": "b00000100", }, "IRP.CLOCKTICKS": { "Box": "IRP", "Category": "IRP CLOCK Events", "Desc": "IRP Clocks", "EvSel": 1, "ExtSel": "", }, "IRP.COHERENT_OPS": { "Box": "IRP", "Category": "IRP Coherency Events", "Counters": "0-1", "Defn": "Counts the number of coherency related operations servied by the IRP", "Desc": "Coherent Ops", "EvSel": 16, "ExtSel": "", }, "IRP.COHERENT_OPS.PCIRDCUR": { "Box": "IRP", "Category": "IRP Coherency Events", "Counters": "0-1", "Defn": "Counts the number of coherency related operations servied by the IRP", "Desc": "Coherent Ops", "EvSel": 16, "ExtSel": "", "Umask": "bxxxxxxx1", }, "IRP.COHERENT_OPS.PCIDCAHINT": { "Box": "IRP", "Category": "IRP Coherency Events", "Counters": "0-1", "Defn": "Counts the number of coherency related operations servied by the IRP", "Desc": "Coherent Ops", "EvSel": 16, "ExtSel": "", "Umask": "bxx1xxxxx", }, "IRP.COHERENT_OPS.RFO": { "Box": "IRP", "Category": "IRP Coherency Events", "Counters": "0-1", "Defn": "Counts the number of coherency related operations servied by the IRP", "Desc": "Coherent Ops", "EvSel": 16, "ExtSel": "", "Umask": "bxxxx1xxx", }, "IRP.COHERENT_OPS.PCITOM": { "Box": "IRP", "Category": "IRP Coherency Events", "Counters": "0-1", "Defn": "Counts the number of coherency related operations servied by the IRP", "Desc": "Coherent Ops", "EvSel": 16, "ExtSel": "", "Umask": "bxxx1xxxx", }, "IRP.COHERENT_OPS.WBMTOI": { "Box": "IRP", "Category": "IRP Coherency Events", "Counters": "0-1", "Defn": "Counts the number of coherency related operations servied by the IRP", "Desc": "Coherent Ops", "EvSel": 16, "ExtSel": "", "Umask": "bx1xxxxxx", }, "IRP.COHERENT_OPS.DRD": { "Box": "IRP", "Category": "IRP Coherency Events", "Counters": "0-1", "Defn": "Counts the number of coherency related operations servied by the IRP", "Desc": "Coherent Ops", "EvSel": 16, "ExtSel": "", "Umask": "bxxxxx1xx", }, "IRP.COHERENT_OPS.CLFLUSH": { "Box": "IRP", "Category": "IRP Coherency Events", "Counters": "0-1", "Defn": "Counts the number of coherency related operations servied by the IRP", "Desc": "Coherent Ops", "EvSel": 16, "ExtSel": "", "Umask": "b1xxxxxxx", }, "IRP.COHERENT_OPS.CRD": { "Box": "IRP", "Category": "IRP Coherency Events", "Counters": "0-1", "Defn": "Counts the number of coherency related operations servied by the IRP", "Desc": "Coherent Ops", "EvSel": 16, "ExtSel": "", "Umask": "bxxxxxx1x", }, "IRP.FAF_FULL": { "Box": "IRP", "Category": "IRP FAF Events", "Desc": "FAF RF full", "EvSel": 23, "ExtSel": "", }, "IRP.FAF_INSERTS": { "Box": "IRP", "Category": "IRP FAF Events", "Desc": "FAF - request insert from TC.", "EvSel": 24, "ExtSel": "", }, "IRP.FAF_OCCUPANCY": { "Box": "IRP", "Category": "IRP FAF Events", "Desc": "FAF occupancy", "EvSel": 25, "ExtSel": "", }, "IRP.FAF_TRANSACTIONS": { "Box": "IRP", "Category": "IRP FAF Events", "Desc": "FAF allocation -- sent to ADQ", "EvSel": 22, "ExtSel": "", }, "IRP.IRP_ALL": { "Box": "IRP", "Category": "IRP IRP Buffer Events", "EvSel": 30, "ExtSel": "", }, "IRP.IRP_ALL.OUTBOUND_INSERTS": { "Box": "IRP", "Category": "IRP IRP Buffer Events", "EvSel": 30, "ExtSel": "", "Umask": "b00000010", }, "IRP.IRP_ALL.INBOUND_INSERTS": { "Box": "IRP", "Category": "IRP IRP Buffer Events", "EvSel": 30, "ExtSel": "", "Umask": "b00000001", }, "IRP.MISC0": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 0", "EvSel": 28, "ExtSel": "", }, "IRP.MISC0.UNKNOWN": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 0", "EvSel": 28, "ExtSel": "", "Umask": "b1xx00000", }, "IRP.MISC0.FAST_REQ": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 0", "EvSel": 28, "ExtSel": "", "Umask": "b000000x1", }, "IRP.MISC0.2ND_RD_INSERT": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 0", "EvSel": 28, "ExtSel": "", "Umask": "bx00xx100", }, "IRP.MISC0.FAST_REJ": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 0", "EvSel": 28, "ExtSel": "", "Umask": "b0000001x", }, "IRP.MISC0.2ND_WR_INSERT": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 0", "EvSel": 28, "ExtSel": "", "Umask": "bx00x1x00", }, "IRP.MISC0.FAST_XFER": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 0", "EvSel": 28, "ExtSel": "", "Umask": "bxx100000", }, "IRP.MISC0.2ND_ATOMIC_INSERT": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 0", "EvSel": 28, "ExtSel": "", "Umask": "bx001xx00", }, "IRP.MISC0.PF_ACK_HINT": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 0", "EvSel": 28, "ExtSel": "", "Umask": "bx1x00000", }, "IRP.MISC1": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 1", "EvSel": 29, "ExtSel": "", }, "IRP.MISC1.SEC_RCVD_VLD": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 1", "EvSel": 29, "ExtSel": "", "Umask": "bx1xx0000", }, "IRP.MISC1.SLOW_M": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 1", "EvSel": 29, "ExtSel": "", "Umask": "b000x1xxx", }, "IRP.MISC1.LOST_FWD": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 1", "EvSel": 29, "ExtSel": "", "Umask": "b0001xxxx", }, "IRP.MISC1.SEC_RCVD_INVLD": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 1", "EvSel": 29, "ExtSel": "", "Umask": "bxx1x0000", }, "IRP.MISC1.SLOW_E": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 1", "EvSel": 29, "ExtSel": "", "Umask": "b000xx1xx", }, "IRP.MISC1.SLOW_S": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 1", "EvSel": 29, "ExtSel": "", "Umask": "b000xxx1x", }, "IRP.MISC1.SLOW_I": { "Box": "IRP", "Category": "IRP MISC Events", "Counters": "0-1", "Desc": "Misc Events - Set 1", "EvSel": 29, "ExtSel": "", "Umask": "b000xxxx1", }, "IRP.P2P_INSERTS": { "Box": "IRP", "Category": "IRP P2P Events", "Defn": "P2P requests from the ITC", "Desc": "P2P Requests", "EvSel": 20, "ExtSel": "", }, "IRP.P2P_OCCUPANCY": { "Box": "IRP", "Category": "IRP P2P Events", "Defn": "P2P B & S Queue Occupancy", "Desc": "P2P Occupancy", "EvSel": 21, "ExtSel": "", }, "IRP.P2P_TRANSACTIONS": { "Box": "IRP", "Category": "IRP P2P Events", "Desc": "P2P Transactions", "EvSel": 19, "ExtSel": "", }, "IRP.P2P_TRANSACTIONS.RD": { "Box": "IRP", "Category": "IRP P2P Events", "Desc": "P2P Transactions", "EvSel": 19, "ExtSel": "", "Umask": "bxxxxxxx1", }, "IRP.P2P_TRANSACTIONS.WR": { "Box": "IRP", "Category": "IRP P2P Events", "Desc": "P2P Transactions", "EvSel": 19, "ExtSel": "", "Umask": "bxxxxxx1x", }, "IRP.P2P_TRANSACTIONS.REM": { "Box": "IRP", "Category": "IRP P2P Events", "Desc": "P2P Transactions", "EvSel": 19, "ExtSel": "", "Umask": "bxxx1xxxx", }, "IRP.P2P_TRANSACTIONS.REM_AND_TGT_MATCH": { "Box": "IRP", "Category": "IRP P2P Events", "Desc": "P2P Transactions", "EvSel": 19, "ExtSel": "", "Umask": "bxx1xxxxx", }, "IRP.P2P_TRANSACTIONS.LOC_AND_TGT_MATCH": { "Box": "IRP", "Category": "IRP P2P Events", "Desc": "P2P Transactions", "EvSel": 19, "ExtSel": "", "Umask": "b1xxxxxxx", }, "IRP.P2P_TRANSACTIONS.LOC": { "Box": "IRP", "Category": "IRP P2P Events", "Desc": "P2P Transactions", "EvSel": 19, "ExtSel": "", "Umask": "bx1xxxxxx", }, "IRP.P2P_TRANSACTIONS.CMPL": { "Box": "IRP", "Category": "IRP P2P Events", "Desc": "P2P Transactions", "EvSel": 19, "ExtSel": "", "Umask": "bxxxx1xxx", }, "IRP.P2P_TRANSACTIONS.MSG": { "Box": "IRP", "Category": "IRP P2P Events", "Desc": "P2P Transactions", "EvSel": 19, "ExtSel": "", "Umask": "bxxxxx1xx", }, "IRP.SNOOP_RESP": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Desc": "Snoop Responses", "EvSel": 18, "ExtSel": "", "Notes": "The first 4 subevent bits are the Responses to the Code/Data/Invalid Snoops represented by the last 3 subevent bits. At least 1 of the bottom 4 bits must be combined with 1 of the top 3 bits to obtain counts. Unsure which combinations are possible.", }, "IRP.SNOOP_RESP.MISS": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Desc": "Snoop Responses", "EvSel": 18, "ExtSel": "", "Notes": "The first 4 subevent bits are the Responses to the Code/Data/Invalid Snoops represented by the last 3 subevent bits. At least 1 of the bottom 4 bits must be combined with 1 of the top 3 bits to obtain counts. Unsure which combinations are possible.", "Umask": "bxxxxxxx1", }, "IRP.SNOOP_RESP.SNPINV": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Desc": "Snoop Responses", "EvSel": 18, "ExtSel": "", "Notes": "The first 4 subevent bits are the Responses to the Code/Data/Invalid Snoops represented by the last 3 subevent bits. At least 1 of the bottom 4 bits must be combined with 1 of the top 3 bits to obtain counts. Unsure which combinations are possible.", "Umask": "bx1xxxxxx", }, "IRP.SNOOP_RESP.HIT_ES": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Desc": "Snoop Responses", "EvSel": 18, "ExtSel": "", "Notes": "The first 4 subevent bits are the Responses to the Code/Data/Invalid Snoops represented by the last 3 subevent bits. At least 1 of the bottom 4 bits must be combined with 1 of the top 3 bits to obtain counts. Unsure which combinations are possible.", "Umask": "bxxxxx1xx", }, "IRP.SNOOP_RESP.SNPCODE": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Desc": "Snoop Responses", "EvSel": 18, "ExtSel": "", "Notes": "The first 4 subevent bits are the Responses to the Code/Data/Invalid Snoops represented by the last 3 subevent bits. At least 1 of the bottom 4 bits must be combined with 1 of the top 3 bits to obtain counts. Unsure which combinations are possible.", "Umask": "bxxx1xxxx", }, "IRP.SNOOP_RESP.HIT_I": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Desc": "Snoop Responses", "EvSel": 18, "ExtSel": "", "Notes": "The first 4 subevent bits are the Responses to the Code/Data/Invalid Snoops represented by the last 3 subevent bits. At least 1 of the bottom 4 bits must be combined with 1 of the top 3 bits to obtain counts. Unsure which combinations are possible.", "Umask": "bxxxxxx1x", }, "IRP.SNOOP_RESP.SNPDATA": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Desc": "Snoop Responses", "EvSel": 18, "ExtSel": "", "Notes": "The first 4 subevent bits are the Responses to the Code/Data/Invalid Snoops represented by the last 3 subevent bits. At least 1 of the bottom 4 bits must be combined with 1 of the top 3 bits to obtain counts. Unsure which combinations are possible.", "Umask": "bxx1xxxxx", }, "IRP.SNOOP_RESP.HIT_M": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Desc": "Snoop Responses", "EvSel": 18, "ExtSel": "", "Notes": "The first 4 subevent bits are the Responses to the Code/Data/Invalid Snoops represented by the last 3 subevent bits. At least 1 of the bottom 4 bits must be combined with 1 of the top 3 bits to obtain counts. Unsure which combinations are possible.", "Umask": "bxxxx1xxx", }, "IRP.TRANSACTIONS": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Defn": "Counts the number of \"Inbound\" transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.", "Desc": "Inbound Transaction Count", "EvSel": 17, "ExtSel": "", "Notes": "Bit 7 is a filter that can be applied to the other subevents. Meaningless by itself.", }, "IRP.TRANSACTIONS.WR_PREF": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Defn": "Counts the number of \"Inbound\" transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.", "Desc": "Inbound Transaction Count", "EvSel": 17, "ExtSel": "", "Notes": "Bit 7 is a filter that can be applied to the other subevents. Meaningless by itself.", "Umask": "bxxxx1xxx", }, "IRP.TRANSACTIONS.READS": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Defn": "Counts the number of \"Inbound\" transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.", "Desc": "Inbound Transaction Count", "EvSel": 17, "ExtSel": "", "Notes": "Bit 7 is a filter that can be applied to the other subevents. Meaningless by itself.", "Umask": "bxxxxxxx1", }, "IRP.TRANSACTIONS.ATOMIC": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Defn": "Counts the number of \"Inbound\" transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.", "Desc": "Inbound Transaction Count", "EvSel": 17, "ExtSel": "", "Notes": "Bit 7 is a filter that can be applied to the other subevents. Meaningless by itself.", "Umask": "bxxx1xxxx", }, "IRP.TRANSACTIONS.WRITES": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Defn": "Counts the number of \"Inbound\" transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.", "Desc": "Inbound Transaction Count", "EvSel": 17, "ExtSel": "", "Notes": "Bit 7 is a filter that can be applied to the other subevents. Meaningless by itself.", "Umask": "bxxxxxx1x", }, "IRP.TRANSACTIONS.RD_PREF": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Defn": "Counts the number of \"Inbound\" transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.", "Desc": "Inbound Transaction Count", "EvSel": 17, "ExtSel": "", "Notes": "Bit 7 is a filter that can be applied to the other subevents. Meaningless by itself.", "Umask": "bxxxxx1xx", }, "IRP.TRANSACTIONS.OTHER": { "Box": "IRP", "Category": "IRP TRANSACTIONS Events", "Counters": "0-1", "Defn": "Counts the number of \"Inbound\" transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.", "Desc": "Inbound Transaction Count", "EvSel": 17, "ExtSel": "", "Notes": "Bit 7 is a filter that can be applied to the other subevents. Meaningless by itself.", "Umask": "bxx1xxxxx", }, "IRP.TxC_AK_INSERTS": { "Box": "IRP", "Category": "IRP AK Egress Events", "Counters": "0-1", "Desc": "AK Egress Allocations", "EvSel": 11, "ExtSel": "", }, "IRP.TxC_BL_DRS_CYCLES_FULL": { "Box": "IRP", "Category": "IRP BL Egress Events", "Counters": "0-1", "Desc": "BL DRS Egress Cycles Full", "EvSel": 5, "ExtSel": "", }, "IRP.TxC_BL_DRS_INSERTS": { "Box": "IRP", "Category": "IRP BL Egress Events", "Counters": "0-1", "Desc": "BL DRS Egress Inserts", "EvSel": 2, "ExtSel": "", }, "IRP.TxC_BL_DRS_OCCUPANCY": { "Box": "IRP", "Category": "IRP BL Egress Events", "Counters": "0-1", "Desc": "BL DRS Egress Occupancy", "EvSel": 8, "ExtSel": "", }, "IRP.TxC_BL_NCB_CYCLES_FULL": { "Box": "IRP", "Category": "IRP BL Egress Events", "Counters": "0-1", "Desc": "BL NCB Egress Cycles Full", "EvSel": 6, "ExtSel": "", }, "IRP.TxC_BL_NCB_INSERTS": { "Box": "IRP", "Category": "IRP BL Egress Events", "Counters": "0-1", "Desc": "BL NCB Egress Inserts", "EvSel": 3, "ExtSel": "", }, "IRP.TxC_BL_NCB_OCCUPANCY": { "Box": "IRP", "Category": "IRP BL Egress Events", "Counters": "0-1", "Desc": "BL NCB Egress Occupancy", "EvSel": 9, "ExtSel": "", }, "IRP.TxC_BL_NCS_CYCLES_FULL": { "Box": "IRP", "Category": "IRP BL Egress Events", "Counters": "0-1", "Desc": "BL NCS Egress Cycles Full", "EvSel": 7, "ExtSel": "", }, "IRP.TxC_BL_NCS_INSERTS": { "Box": "IRP", "Category": "IRP BL Egress Events", "Counters": "0-1", "Desc": "BL NCS Egress Inserts", "EvSel": 4, "ExtSel": "", }, "IRP.TxC_BL_NCS_OCCUPANCY": { "Box": "IRP", "Category": "IRP BL Egress Events", "Counters": "0-1", "Desc": "BL NCS Egress Occupancy", "EvSel": 10, "ExtSel": "", }, "IRP.TxR2_AD_STALL_CREDIT_CYCLES": { "Box": "IRP", "Category": "IRP STALL_CYCLES Events", "Counters": "0-1", "Defn": "Counts the number times when it is not possible to issue a request to the R2PCIe because there are no AD Egress Credits available.", "Desc": "No AD Egress Credit Stalls", "EvSel": 26, "ExtSel": "", }, "IRP.TxR2_BL_STALL_CREDIT_CYCLES": { "Box": "IRP", "Category": "IRP STALL_CYCLES Events", "Counters": "0-1", "Defn": "Counts the number times when it is not possible to issue data to the R2PCIe because there are no BL Egress Credits available.", "Desc": "No BL Egress Credit Stalls", "EvSel": 27, "ExtSel": "", }, "IRP.TxS_DATA_INSERTS_NCB": { "Box": "IRP", "Category": "IRP OUTBOUND_REQUESTS Events", "Counters": "0-1", "Defn": "Counts the number of requests issued to the switch (towards the devices).", "Desc": "Outbound Read Requests", "EvSel": 13, "ExtSel": "", }, "IRP.TxS_DATA_INSERTS_NCS": { "Box": "IRP", "Category": "IRP OUTBOUND_REQUESTS Events", "Counters": "0-1", "Defn": "Counts the number of requests issued to the switch (towards the devices).", "Desc": "Outbound Read Requests", "EvSel": 14, "ExtSel": "", }, "IRP.TxS_REQUEST_OCCUPANCY": { "Box": "IRP", "Category": "IRP OUTBOUND_REQUESTS Events", "Counters": "0-1", "Defn": "Accumultes the number of outstanding outbound requests from the IRP to the switch (towards the devices). This can be used in conjuection with the allocations event in order to calculate average latency of outbound requests.", "Desc": "Outbound Request Queue Occupancy", "EvSel": 12, "ExtSel": "", }, # IIO: "IIO.CLOCKTICKS": { "Box": "IIO", "Category": "IIO Debug Events", "Counters": "0-3", "Desc": "Traffic Controller Clocks", "EvSel": 1, "ExtSel": "", }, "IIO.COMP_BUF_INSERTS": { "Box": "IIO", "Category": "IIO PCIe Completion Buffer Events", "Desc": "PCIe Completion Buffer Inserts", "EvSel": 194, "ExtSel": "", }, "IIO.COMP_BUF_INSERTS.PORT3": { "Box": "IIO", "Category": "IIO PCIe Completion Buffer Events", "Desc": "PCIe Completion Buffer Inserts", "EvSel": 194, "FCMask": "b111", "ExtSel": "", "PortMask": "bxxxx1xxx", "Umask": "bxxxxx1xx", }, "IIO.COMP_BUF_INSERTS.PORT0": { "Box": "IIO", "Category": "IIO PCIe Completion Buffer Events", "Desc": "PCIe Completion Buffer Inserts", "EvSel": 194, "FCMask": "b111", "ExtSel": "", "PortMask": "bxxxxxxx1", "Umask": "bxxxxx1xx", }, "IIO.COMP_BUF_INSERTS.PORT2": { "Box": "IIO", "Category": "IIO PCIe Completion Buffer Events", "Desc": "PCIe Completion Buffer Inserts", "EvSel": 194, "FCMask": "b111", "ExtSel": "", "PortMask": "bxxxxx1xx", "Umask": "bxxxxx1xx", }, "IIO.COMP_BUF_INSERTS.PORT1": { "Box": "IIO", "Category": "IIO PCIe Completion Buffer Events", "Desc": "PCIe Completion Buffer Inserts", "EvSel": 194, "FCMask": "b111", "ExtSel": "", "PortMask": "bxxxxxx1x", "Umask": "bxxxxx1xx", }, "IIO.COMP_BUF_OCCUPANCY": { "Box": "IIO", "Category": "IIO PCIe Completion Buffer Events", "Desc": "PCIe Completion Buffer Occupancy", "EvSel": 213, "ExtSel": "", }, "IIO.COMP_BUF_OCCUPANCY.PORT2": { "Box": "IIO", "Category": "IIO PCIe Completion Buffer Events", "Desc": "PCIe Completion Buffer Occupancy", "EvSel": 213, "ExtSel": "", "PortMask": "bxxxxx1xx", "Umask": "bxxxxxxxx", }, "IIO.COMP_BUF_OCCUPANCY.PORT1": { "Box": "IIO", "Category": "IIO PCIe Completion Buffer Events", "Desc": "PCIe Completion Buffer Occupancy", "EvSel": 213, "ExtSel": "", "PortMask": "bxxxxxx1x", "Umask": "bxxxxxxxx", }, "IIO.COMP_BUF_OCCUPANCY.PORT0": { "Box": "IIO", "Category": "IIO PCIe Completion Buffer Events", "Desc": "PCIe Completion Buffer Occupancy", "EvSel": 213, "ExtSel": "", "PortMask": "bxxxxxxx1", "Umask": "bxxxxxxxx", }, "IIO.COMP_BUF_OCCUPANCY.PORT3": { "Box": "IIO", "Category": "IIO PCIe Completion Buffer Events", "Desc": "PCIe Completion Buffer Occupancy", "EvSel": 213, "ExtSel": "", "PortMask": "bxxxx1xxx", "Umask": "bxxxxxxxx", }, "IIO.DATA_REQ_BY_CPU": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", }, "IIO.DATA_REQ_BY_CPU.IO_WRITE.PART0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxxxx1", "Umask": "bxx1xxxxx", }, "IIO.DATA_REQ_BY_CPU.IO_WRITE.VTD1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxx1xxxxx", "Umask": "bxx1xxxxx", }, "IIO.DATA_REQ_BY_CPU.CFG_READ.VTD1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxx1xxxxx", "Umask": "bx1xxxxxx", }, "IIO.DATA_REQ_BY_CPU.MEM_WRITE.PART2": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxx1xx", "Umask": "bxxxxxxx1", }, "IIO.DATA_REQ_BY_CPU.CFG_READ.PART2": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxx1xx", "Umask": "bx1xxxxxx", }, "IIO.DATA_REQ_BY_CPU.PEER_READ.PART2": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxx1xx", "Umask": "bxxxx1xxx", }, "IIO.DATA_REQ_BY_CPU.CFG_WRITE.VTD1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxx1xxxxx", "Umask": "bxxx1xxxx", }, "IIO.DATA_REQ_BY_CPU.IO_READ.PART0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxxxx1", "Umask": "b1xxxxxxx", }, "IIO.DATA_REQ_BY_CPU.MEM_WRITE.VTD1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxx1xxxxx", "Umask": "bxxxxxxx1", }, "IIO.DATA_REQ_BY_CPU.PEER_WRITE.PART3": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxx1xxx", "Umask": "bxxxxxx1x", }, "IIO.DATA_REQ_BY_CPU.PEER_WRITE.PART2": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxx1xx", "Umask": "bxxxxxx1x", }, "IIO.DATA_REQ_BY_CPU.MEM_WRITE.PART0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxxxx1", "Umask": "bxxxxxxx1", }, "IIO.DATA_REQ_BY_CPU.CFG_WRITE.PART2": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxx1xx", "Umask": "bxxx1xxxx", }, "IIO.DATA_REQ_BY_CPU.PEER_WRITE.PART0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxxxx1", "Umask": "bxxxxxx1x", }, "IIO.DATA_REQ_BY_CPU.CFG_WRITE.PART0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxxxx1", "Umask": "bxxx1xxxx", }, "IIO.DATA_REQ_BY_CPU.IO_READ.PART1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxxx1x", "Umask": "b1xxxxxxx", }, "IIO.DATA_REQ_BY_CPU.IO_READ.VTD0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxx1xxxx", "Umask": "b1xxxxxxx", }, "IIO.DATA_REQ_BY_CPU.PEER_WRITE.VTD1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxx1xxxxx", "Umask": "bxxxxxx1x", }, "IIO.DATA_REQ_BY_CPU.MEM_READ.PART3": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxx1xxx", "Umask": "bxxxxx1xx", }, "IIO.DATA_REQ_BY_CPU.CFG_WRITE.VTD0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxx1xxxx", "Umask": "bxxx1xxxx", }, "IIO.DATA_REQ_BY_CPU.MEM_WRITE.PART1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxxx1x", "Umask": "bxxxxxxx1", }, "IIO.DATA_REQ_BY_CPU.CFG_WRITE.PART3": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxx1xxx", "Umask": "bxxx1xxxx", }, "IIO.DATA_REQ_BY_CPU.PEER_READ.PART0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxxxx1", "Umask": "bxxxx1xxx", }, "IIO.DATA_REQ_BY_CPU.PEER_READ.PART1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxxx1x", "Umask": "bxxxx1xxx", }, "IIO.DATA_REQ_BY_CPU.CFG_READ.PART1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxxx1x", "Umask": "bx1xxxxxx", }, "IIO.DATA_REQ_BY_CPU.MEM_READ.PART1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxxx1x", "Umask": "bxxxxx1xx", }, "IIO.DATA_REQ_BY_CPU.MEM_READ.VTD1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxx1xxxxx", "Umask": "bxxxxx1xx", }, "IIO.DATA_REQ_BY_CPU.MEM_READ.VTD0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxx1xxxx", "Umask": "bxxxxx1xx", }, "IIO.DATA_REQ_BY_CPU.CFG_READ.PART0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxxxx1", "Umask": "bx1xxxxxx", }, "IIO.DATA_REQ_BY_CPU.MEM_READ.PART0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxxxx1", "Umask": "bxxxxx1xx", }, "IIO.DATA_REQ_BY_CPU.MEM_READ.PART2": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxx1xx", "Umask": "bxxxxx1xx", }, "IIO.DATA_REQ_BY_CPU.MEM_WRITE.PART3": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxx1xxx", "Umask": "bxxxxxxx1", }, "IIO.DATA_REQ_BY_CPU.PEER_WRITE.VTD0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxx1xxxx", "Umask": "bxxxxxx1x", }, "IIO.DATA_REQ_BY_CPU.CFG_WRITE.PART1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxxx1x", "Umask": "bxxx1xxxx", }, "IIO.DATA_REQ_BY_CPU.PEER_READ.VTD1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxx1xxxxx", "Umask": "bxxxx1xxx", }, "IIO.DATA_REQ_BY_CPU.CFG_READ.PART3": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxx1xxx", "Umask": "bx1xxxxxx", }, "IIO.DATA_REQ_BY_CPU.MEM_WRITE.VTD0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxx1xxxx", "Umask": "bxxxxxxx1", }, "IIO.DATA_REQ_BY_CPU.IO_READ.VTD1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxx1xxxxx", "Umask": "b1xxxxxxx", }, "IIO.DATA_REQ_BY_CPU.CFG_READ.VTD0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxx1xxxx", "Umask": "bx1xxxxxx", }, "IIO.DATA_REQ_BY_CPU.PEER_READ.VTD0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxx1xxxx", "Umask": "bxxxx1xxx", }, "IIO.DATA_REQ_BY_CPU.PEER_READ.PART3": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxx1xxx", "Umask": "bxxxx1xxx", }, "IIO.DATA_REQ_BY_CPU.IO_WRITE.PART3": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxx1xxx", "Umask": "bxx1xxxxx", }, "IIO.DATA_REQ_BY_CPU.IO_WRITE.PART1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxxx1x", "Umask": "bxx1xxxxx", }, "IIO.DATA_REQ_BY_CPU.IO_READ.PART2": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxx1xx", "Umask": "b1xxxxxxx", }, "IIO.DATA_REQ_BY_CPU.PEER_WRITE.PART1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxxx1x", "Umask": "bxxxxxx1x", }, "IIO.DATA_REQ_BY_CPU.IO_WRITE.VTD0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxx1xxxx", "Umask": "bxx1xxxxx", }, "IIO.DATA_REQ_BY_CPU.IO_READ.PART3": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxx1xxx", "Umask": "b1xxxxxxx", }, "IIO.DATA_REQ_BY_CPU.IO_WRITE.PART2": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "2-3", "Defn": "Number of double word (4 bytes) requests initiated by the main die to the attached device.", "Desc": "Data requested by the CPU", "EvSel": 192, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxx1xx", "Umask": "bxx1xxxxx", }, "IIO.DATA_REQ_OF_CPU": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of double word (4 bytes) requests the attached device made of the main die.", "Desc": "Data requested of the CPU", "EvSel": 131, "ExtSel": "", "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", }, "IIO.DATA_REQ_OF_CPU.PEER_WRITE.PART1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of double word (4 bytes) requests the attached device made of the main die.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxx1x", "Umask": "bxxxxxx1x", }, "IIO.DATA_REQ_OF_CPU.PEER_READ.VTD1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of double word (4 bytes) requests the attached device made of the main die.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxx1xxxxx", "Umask": "bxxxx1xxx", }, "IIO.DATA_REQ_OF_CPU.MSG.VTD1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of double word (4 bytes) requests the attached device made of the main die.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxx1xxxxx", "Umask": "bx1xxxxxx", }, "IIO.DATA_REQ_OF_CPU.ATOMICCMP.PART2": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of double word (4 bytes) requests the attached device made of the main die.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxx1xx", "Umask": "bxx1xxxxx", }, "IIO.DATA_REQ_OF_CPU.MEM_WRITE.VTD0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of double word (4 bytes) requests the attached device made of the main die.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxx1xxxx", "Umask": "bxxxxxxx1", }, "IIO.DATA_REQ_OF_CPU.PEER_READ.PART3": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of double word (4 bytes) requests the attached device made of the main die.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxx1xxx", "Umask": "bxxxx1xxx", }, "IIO.DATA_REQ_OF_CPU.ATOMICCMP.PART0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of double word (4 bytes) requests the attached device made of the main die.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxxx1", "Umask": "bxx1xxxxx", }, "IIO.DATA_REQ_OF_CPU.PEER_READ.VTD0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of double word (4 bytes) requests the attached device made of the main die.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxx1xxxx", "Umask": "bxxxx1xxx", }, "IIO.DATA_REQ_OF_CPU.MEM_READ.PART2": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of double word (4 bytes) requests the attached device made of the main die.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxx1xx", "Umask": "bxxxxx1xx", }, "IIO.DATA_REQ_OF_CPU.MEM_WRITE.PART3": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of double word (4 bytes) requests the attached device made of the main die.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxx1xxx", "Umask": "bxxxxxxx1", }, "IIO.DATA_REQ_OF_CPU.ATOMIC.PART3": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of double word (4 bytes) requests the attached device made of the main die.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxx1xxx", "Umask": "bxxx1xxxx", }, "IIO.DATA_REQ_OF_CPU.PEER_WRITE.VTD0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of double word (4 bytes) requests the attached device made of the main die.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxx1xxxx", "Umask": "bxxxxxx1x", }, "IIO.DATA_REQ_OF_CPU.MEM_READ.PART1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of double word (4 bytes) requests the attached device made of the main die.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxx1x", "Umask": "bxxxxx1xx", }, "IIO.DATA_REQ_OF_CPU.MEM_READ.VTD1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of double word (4 bytes) requests the attached device made of the main die.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxx1xxxxx", "Umask": "bxxxxx1xx", }, "IIO.DATA_REQ_OF_CPU.MEM_READ.VTD0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of double word (4 bytes) requests the attached device made of the main die.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxx1xxxx", "Umask": "bxxxxx1xx", }, "IIO.DATA_REQ_OF_CPU.ATOMIC.PART0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of double word (4 bytes) requests the attached device made of the main die.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxxx1", "Umask": "bxxx1xxxx", }, "IIO.DATA_REQ_OF_CPU.MSG.PART2": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of double word (4 bytes) requests the attached device made of the main die.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxx1xx", "Umask": "bx1xxxxxx", }, "IIO.DATA_REQ_OF_CPU.MEM_READ.PART0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of double word (4 bytes) requests the attached device made of the main die.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxxx1", "Umask": "bxxxxx1xx", }, "IIO.DATA_REQ_OF_CPU.PEER_READ.PART0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of double word (4 bytes) requests the attached device made of the main die.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxxx1", "Umask": "bxxxx1xxx", }, "IIO.DATA_REQ_OF_CPU.PEER_READ.PART1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of double word (4 bytes) requests the attached device made of the main die.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxx1x", "Umask": "bxxxx1xxx", }, "IIO.DATA_REQ_OF_CPU.ATOMIC.VTD1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of double word (4 bytes) requests the attached device made of the main die.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxx1xxxxx", "Umask": "bxxx1xxxx", }, "IIO.DATA_REQ_OF_CPU.MEM_READ.PART3": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of double word (4 bytes) requests the attached device made of the main die.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxx1xxx", "Umask": "bxxxxx1xx", }, "IIO.DATA_REQ_OF_CPU.ATOMIC.PART2": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of double word (4 bytes) requests the attached device made of the main die.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxx1xx", "Umask": "bxxx1xxxx", }, "IIO.DATA_REQ_OF_CPU.MEM_WRITE.PART1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of double word (4 bytes) requests the attached device made of the main die.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxx1x", "Umask": "bxxxxxxx1", }, "IIO.DATA_REQ_OF_CPU.MSG.PART3": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of double word (4 bytes) requests the attached device made of the main die.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxx1xxx", "Umask": "bx1xxxxxx", }, "IIO.DATA_REQ_OF_CPU.ATOMICCMP.PART1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of double word (4 bytes) requests the attached device made of the main die.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxx1x", "Umask": "bxx1xxxxx", }, "IIO.DATA_REQ_OF_CPU.MSG.PART0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of double word (4 bytes) requests the attached device made of the main die.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxxx1", "Umask": "bx1xxxxxx", }, "IIO.DATA_REQ_OF_CPU.PEER_WRITE.PART0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of double word (4 bytes) requests the attached device made of the main die.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxxx1", "Umask": "bxxxxxx1x", }, "IIO.DATA_REQ_OF_CPU.MSG.PART1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of double word (4 bytes) requests the attached device made of the main die.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxx1x", "Umask": "bx1xxxxxx", }, "IIO.DATA_REQ_OF_CPU.ATOMIC.VTD0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of double word (4 bytes) requests the attached device made of the main die.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxx1xxxx", "Umask": "bxxx1xxxx", }, "IIO.DATA_REQ_OF_CPU.ATOMIC.PART1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of double word (4 bytes) requests the attached device made of the main die.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxx1x", "Umask": "bxxx1xxxx", }, "IIO.DATA_REQ_OF_CPU.PEER_WRITE.VTD1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of double word (4 bytes) requests the attached device made of the main die.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxx1xxxxx", "Umask": "bxxxxxx1x", }, "IIO.DATA_REQ_OF_CPU.ATOMICCMP.PART3": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of double word (4 bytes) requests the attached device made of the main die.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxx1xxx", "Umask": "bxx1xxxxx", }, "IIO.DATA_REQ_OF_CPU.MEM_WRITE.PART2": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of double word (4 bytes) requests the attached device made of the main die.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxx1xx", "Umask": "bxxxxxxx1", }, "IIO.DATA_REQ_OF_CPU.PEER_READ.PART2": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of double word (4 bytes) requests the attached device made of the main die.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxx1xx", "Umask": "bxxxx1xxx", }, "IIO.DATA_REQ_OF_CPU.MSG.VTD0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of double word (4 bytes) requests the attached device made of the main die.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxx1xxxx", "Umask": "bx1xxxxxx", }, "IIO.DATA_REQ_OF_CPU.MEM_WRITE.VTD1": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of double word (4 bytes) requests the attached device made of the main die.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxx1xxxxx", "Umask": "bxxxxxxx1", }, "IIO.DATA_REQ_OF_CPU.PEER_WRITE.PART3": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of double word (4 bytes) requests the attached device made of the main die.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxx1xxx", "Umask": "bxxxxxx1x", }, "IIO.DATA_REQ_OF_CPU.MEM_WRITE.PART0": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of double word (4 bytes) requests the attached device made of the main die.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxxxx1", "Umask": "bxxxxxxx1", }, "IIO.DATA_REQ_OF_CPU.PEER_WRITE.PART2": { "Box": "IIO", "Category": "IIO Payload Events", "Counters": "0-1", "Defn": "Number of double word (4 bytes) requests the attached device made of the main die.", "Desc": "Data requested of the CPU", "EvSel": 131, "FCMask": "b111", "ExtSel": "", "Notes": "Multiply value by 4 to convert to number of Bytes. Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic. Counts are incremented on the request path rather than the completion path. Expect to change back on next prodcut.", "PortMask": "bxxxxx1xx", "Umask": "bxxxxxx1x", }, "IIO.LINK_NUM_CORR_ERR": { "Box": "IIO", "Category": "IIO Link Events", "Desc": "Num Link Correctable Errors", "EvSel": 15, "ExtSel": "", }, "IIO.LINK_NUM_RETRIES": { "Box": "IIO", "Category": "IIO Link Events", "Desc": "Num Link Retries", "EvSel": 14, "ExtSel": "", }, "IIO.MASK_MATCH": { "Box": "IIO", "Category": "IIO Miscellaneous Events", "Desc": "Number packets that passed the Mask/Match Filter", "EvSel": 33, "ExtSel": "", }, "IIO.MASK_MATCH_AND": { "Box": "IIO", "Category": "IIO Debug Events", "Counters": "0-3", "Defn": "Asserted if all bits specified by mask match", "Desc": "AND Mask/match for debug bus", "EvSel": 2, "ExtSel": "", }, "IIO.MASK_MATCH_AND.BUS1": { "Box": "IIO", "Category": "IIO Debug Events", "Counters": "0-3", "Defn": "Asserted if all bits specified by mask match", "Desc": "AND Mask/match for debug bus", "EvSel": 2, "ExtSel": "", "Umask": "bxxxxxx1x", }, "IIO.MASK_MATCH_AND.NOT_BUS0_NOT_BUS1": { "Box": "IIO", "Category": "IIO Debug Events", "Counters": "0-3", "Defn": "Asserted if all bits specified by mask match", "Desc": "AND Mask/match for debug bus", "EvSel": 2, "ExtSel": "", "Umask": "bxx1xxxxx", }, "IIO.MASK_MATCH_AND.BUS0_NOT_BUS1": { "Box": "IIO", "Category": "IIO Debug Events", "Counters": "0-3", "Defn": "Asserted if all bits specified by mask match", "Desc": "AND Mask/match for debug bus", "EvSel": 2, "ExtSel": "", "Umask": "bxxxxx1xx", }, "IIO.MASK_MATCH_AND.NOT_BUS0_BUS1": { "Box": "IIO", "Category": "IIO Debug Events", "Counters": "0-3", "Defn": "Asserted if all bits specified by mask match", "Desc": "AND Mask/match for debug bus", "EvSel": 2, "ExtSel": "", "Umask": "bxxx1xxxx", }, "IIO.MASK_MATCH_AND.BUS0": { "Box": "IIO", "Category": "IIO Debug Events", "Counters": "0-3", "Defn": "Asserted if all bits specified by mask match", "Desc": "AND Mask/match for debug bus", "EvSel": 2, "ExtSel": "", "Umask": "bxxxxxxx1", }, "IIO.MASK_MATCH_AND.BUS0_BUS1": { "Box": "IIO", "Category": "IIO Debug Events", "Counters": "0-3", "Defn": "Asserted if all bits specified by mask match", "Desc": "AND Mask/match for debug bus", "EvSel": 2, "ExtSel": "", "Umask": "bxxxx1xxx", }, "IIO.MASK_MATCH_OR": { "Box": "IIO", "Category": "IIO Debug Events", "Counters": "0-3", "Defn": "Asserted if any bits specified by mask match", "Desc": "OR Mask/match for debug bus", "EvSel": 3, "ExtSel": "", }, "IIO.MASK_MATCH_OR.BUS0_BUS1": { "Box": "IIO", "Category": "IIO Debug Events", "Counters": "0-3", "Defn": "Asserted if any bits specified by mask match", "Desc": "OR Mask/match for debug bus", "EvSel": 3, "ExtSel": "", "Umask": "bxxxx1xxx", }, "IIO.MASK_MATCH_OR.BUS0": { "Box": "IIO", "Category": "IIO Debug Events", "Counters": "0-3", "Defn": "Asserted if any bits specified by mask match", "Desc": "OR Mask/match for debug bus", "EvSel": 3, "ExtSel": "", "Umask": "bxxxxxxx1", }, "IIO.MASK_MATCH_OR.NOT_BUS0_BUS1": { "Box": "IIO", "Category": "IIO Debug Events", "Counters": "0-3", "Defn": "Asserted if any bits specified by mask match", "Desc": "OR Mask/match for debug bus", "EvSel": 3, "ExtSel": "", "Umask": "bxxx1xxxx", }, "IIO.MASK_MATCH_OR.NOT_BUS0_NOT_BUS1": { "Box": "IIO", "Category": "IIO Debug Events", "Counters": "0-3", "Defn": "Asserted if any bits specified by mask match", "Desc": "OR Mask/match for debug bus", "EvSel": 3, "ExtSel": "", "Umask": "bxx1xxxxx", }, "IIO.MASK_MATCH_OR.BUS0_NOT_BUS1": { "Box": "IIO", "Category": "IIO Debug Events", "Counters": "0-3", "Defn": "Asserted if any bits specified by mask match", "Desc": "OR Mask/match for debug bus", "EvSel": 3, "ExtSel": "", "Umask": "bxxxxx1xx", }, "IIO.MASK_MATCH_OR.BUS1": { "Box": "IIO", "Category": "IIO Debug Events", "Counters": "0-3", "Defn": "Asserted if any bits specified by mask match", "Desc": "OR Mask/match for debug bus", "EvSel": 3, "ExtSel": "", "Umask": "bxxxxxx1x", }, "IIO.NOTHING": { "Box": "IIO", "Category": "IIO CLOCK Events", "EvSel": 0, "ExtSel": "", }, "IIO.SYMBOL_TIMES": { "Box": "IIO", "Category": "IIO Miscellaneous Events", "Defn": "Gen1 - increment once every 4nS, Gen2 - increment once every 2nS, Gen3 - increment once every 1nS", "Desc": "Symbol Times on Link", "EvSel": 130, "ExtSel": "", }, "IIO.TXN_REQ_BY_CPU": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", }, "IIO.TXN_REQ_BY_CPU.PEER_READ.PART2": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxx1xx", "Umask": "bxxxx1xxx", }, "IIO.TXN_REQ_BY_CPU.CFG_READ.PART2": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxx1xx", "Umask": "bx1xxxxxx", }, "IIO.TXN_REQ_BY_CPU.IO_WRITE.PART0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxxxx1", "Umask": "bxx1xxxxx", }, "IIO.TXN_REQ_BY_CPU.IO_WRITE.VTD1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxx1xxxxx", "Umask": "bxx1xxxxx", }, "IIO.TXN_REQ_BY_CPU.CFG_READ.VTD1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxx1xxxxx", "Umask": "bx1xxxxxx", }, "IIO.TXN_REQ_BY_CPU.MEM_WRITE.PART2": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxx1xx", "Umask": "bxxxxxxx1", }, "IIO.TXN_REQ_BY_CPU.PEER_WRITE.PART3": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxx1xxx", "Umask": "bxxxxxx1x", }, "IIO.TXN_REQ_BY_CPU.MEM_WRITE.PART0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxxxx1", "Umask": "bxxxxxxx1", }, "IIO.TXN_REQ_BY_CPU.PEER_WRITE.PART2": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxx1xx", "Umask": "bxxxxxx1x", }, "IIO.TXN_REQ_BY_CPU.CFG_WRITE.VTD1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxx1xxxxx", "Umask": "bxxx1xxxx", }, "IIO.TXN_REQ_BY_CPU.IO_READ.PART0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxxxx1", "Umask": "b1xxxxxxx", }, "IIO.TXN_REQ_BY_CPU.MEM_WRITE.VTD1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxx1xxxxx", "Umask": "bxxxxxxx1", }, "IIO.TXN_REQ_BY_CPU.IO_READ.VTD0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxx1xxxx", "Umask": "b1xxxxxxx", }, "IIO.TXN_REQ_BY_CPU.PEER_WRITE.PART0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxxxx1", "Umask": "bxxxxxx1x", }, "IIO.TXN_REQ_BY_CPU.CFG_WRITE.PART2": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxx1xx", "Umask": "bxxx1xxxx", }, "IIO.TXN_REQ_BY_CPU.CFG_WRITE.PART0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxxxx1", "Umask": "bxxx1xxxx", }, "IIO.TXN_REQ_BY_CPU.IO_READ.PART1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxxx1x", "Umask": "b1xxxxxxx", }, "IIO.TXN_REQ_BY_CPU.PEER_WRITE.VTD1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxx1xxxxx", "Umask": "bxxxxxx1x", }, "IIO.TXN_REQ_BY_CPU.MEM_WRITE.PART1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxxx1x", "Umask": "bxxxxxxx1", }, "IIO.TXN_REQ_BY_CPU.CFG_WRITE.VTD0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxx1xxxx", "Umask": "bxxx1xxxx", }, "IIO.TXN_REQ_BY_CPU.MEM_READ.PART3": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxx1xxx", "Umask": "bxxxxx1xx", }, "IIO.TXN_REQ_BY_CPU.CFG_WRITE.PART3": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxx1xxx", "Umask": "bxxx1xxxx", }, "IIO.TXN_REQ_BY_CPU.PEER_READ.PART0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxxxx1", "Umask": "bxxxx1xxx", }, "IIO.TXN_REQ_BY_CPU.PEER_READ.PART1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxxx1x", "Umask": "bxxxx1xxx", }, "IIO.TXN_REQ_BY_CPU.MEM_READ.VTD0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxx1xxxx", "Umask": "bxxxxx1xx", }, "IIO.TXN_REQ_BY_CPU.CFG_READ.PART1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxxx1x", "Umask": "bx1xxxxxx", }, "IIO.TXN_REQ_BY_CPU.MEM_READ.VTD1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxx1xxxxx", "Umask": "bxxxxx1xx", }, "IIO.TXN_REQ_BY_CPU.MEM_READ.PART1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxxx1x", "Umask": "bxxxxx1xx", }, "IIO.TXN_REQ_BY_CPU.MEM_READ.PART0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxxxx1", "Umask": "bxxxxx1xx", }, "IIO.TXN_REQ_BY_CPU.CFG_READ.PART0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxxxx1", "Umask": "bx1xxxxxx", }, "IIO.TXN_REQ_BY_CPU.MEM_READ.PART2": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxx1xx", "Umask": "bxxxxx1xx", }, "IIO.TXN_REQ_BY_CPU.CFG_WRITE.PART1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxxx1x", "Umask": "bxxx1xxxx", }, "IIO.TXN_REQ_BY_CPU.PEER_WRITE.VTD0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxx1xxxx", "Umask": "bxxxxxx1x", }, "IIO.TXN_REQ_BY_CPU.MEM_WRITE.PART3": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxx1xxx", "Umask": "bxxxxxxx1", }, "IIO.TXN_REQ_BY_CPU.CFG_READ.PART3": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxx1xxx", "Umask": "bx1xxxxxx", }, "IIO.TXN_REQ_BY_CPU.MEM_WRITE.VTD0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxx1xxxx", "Umask": "bxxxxxxx1", }, "IIO.TXN_REQ_BY_CPU.PEER_READ.VTD1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxx1xxxxx", "Umask": "bxxxx1xxx", }, "IIO.TXN_REQ_BY_CPU.IO_WRITE.PART3": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxx1xxx", "Umask": "bxx1xxxxx", }, "IIO.TXN_REQ_BY_CPU.IO_READ.VTD1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxx1xxxxx", "Umask": "b1xxxxxxx", }, "IIO.TXN_REQ_BY_CPU.CFG_READ.VTD0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxx1xxxx", "Umask": "bx1xxxxxx", }, "IIO.TXN_REQ_BY_CPU.PEER_READ.VTD0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxx1xxxx", "Umask": "bxxxx1xxx", }, "IIO.TXN_REQ_BY_CPU.PEER_READ.PART3": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxx1xxx", "Umask": "bxxxx1xxx", }, "IIO.TXN_REQ_BY_CPU.IO_WRITE.PART1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxxx1x", "Umask": "bxx1xxxxx", }, "IIO.TXN_REQ_BY_CPU.IO_WRITE.VTD0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxx1xxxx", "Umask": "bxx1xxxxx", }, "IIO.TXN_REQ_BY_CPU.IO_READ.PART2": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxx1xx", "Umask": "b1xxxxxxx", }, "IIO.TXN_REQ_BY_CPU.PEER_WRITE.PART1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxxx1x", "Umask": "bxxxxxx1x", }, "IIO.TXN_REQ_BY_CPU.IO_WRITE.PART2": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxx1xx", "Umask": "bxx1xxxxx", }, "IIO.TXN_REQ_BY_CPU.IO_READ.PART3": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.", "Desc": "Number Transactions requested by the CPU", "EvSel": 193, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxx1xxx", "Umask": "b1xxxxxxx", }, "IIO.TXN_REQ_OF_CPU": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", }, "IIO.TXN_REQ_OF_CPU.MSG.PART1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxxx1x", "Umask": "bx1xxxxxx", }, "IIO.TXN_REQ_OF_CPU.PEER_WRITE.PART0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxxxx1", "Umask": "bxxxxxx1x", }, "IIO.TXN_REQ_OF_CPU.ATOMIC.PART1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxxx1x", "Umask": "bxxx1xxxx", }, "IIO.TXN_REQ_OF_CPU.ATOMIC.VTD0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxx1xxxx", "Umask": "bxxx1xxxx", }, "IIO.TXN_REQ_OF_CPU.PEER_WRITE.VTD1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxx1xxxxx", "Umask": "bxxxxxx1x", }, "IIO.TXN_REQ_OF_CPU.MEM_WRITE.PART2": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxx1xx", "Umask": "bxxxxxxx1", }, "IIO.TXN_REQ_OF_CPU.ATOMICCMP.PART3": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxx1xxx", "Umask": "bxx1xxxxx", }, "IIO.TXN_REQ_OF_CPU.MSG.VTD0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxx1xxxx", "Umask": "bx1xxxxxx", }, "IIO.TXN_REQ_OF_CPU.PEER_READ.PART2": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxx1xx", "Umask": "bxxxx1xxx", }, "IIO.TXN_REQ_OF_CPU.MEM_WRITE.VTD1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxx1xxxxx", "Umask": "bxxxxxxx1", }, "IIO.TXN_REQ_OF_CPU.MEM_WRITE.PART0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxxxx1", "Umask": "bxxxxxxx1", }, "IIO.TXN_REQ_OF_CPU.PEER_WRITE.PART2": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxx1xx", "Umask": "bxxxxxx1x", }, "IIO.TXN_REQ_OF_CPU.PEER_WRITE.PART3": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxx1xxx", "Umask": "bxxxxxx1x", }, "IIO.TXN_REQ_OF_CPU.PEER_READ.PART0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxxxx1", "Umask": "bxxxx1xxx", }, "IIO.TXN_REQ_OF_CPU.PEER_READ.PART1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxxx1x", "Umask": "bxxxx1xxx", }, "IIO.TXN_REQ_OF_CPU.ATOMIC.VTD1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxx1xxxxx", "Umask": "bxxx1xxxx", }, "IIO.TXN_REQ_OF_CPU.MEM_READ.PART3": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxx1xxx", "Umask": "bxxxxx1xx", }, "IIO.TXN_REQ_OF_CPU.MSG.PART3": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxx1xxx", "Umask": "bx1xxxxxx", }, "IIO.TXN_REQ_OF_CPU.MEM_WRITE.PART1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxxx1x", "Umask": "bxxxxxxx1", }, "IIO.TXN_REQ_OF_CPU.ATOMIC.PART2": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxx1xx", "Umask": "bxxx1xxxx", }, "IIO.TXN_REQ_OF_CPU.MSG.PART0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxxxx1", "Umask": "bx1xxxxxx", }, "IIO.TXN_REQ_OF_CPU.ATOMICCMP.PART1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxxx1x", "Umask": "bxx1xxxxx", }, "IIO.TXN_REQ_OF_CPU.MEM_READ.PART2": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxx1xx", "Umask": "bxxxxx1xx", }, "IIO.TXN_REQ_OF_CPU.ATOMIC.PART3": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxx1xxx", "Umask": "bxxx1xxxx", }, "IIO.TXN_REQ_OF_CPU.MEM_WRITE.PART3": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxx1xxx", "Umask": "bxxxxxxx1", }, "IIO.TXN_REQ_OF_CPU.PEER_WRITE.VTD0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxx1xxxx", "Umask": "bxxxxxx1x", }, "IIO.TXN_REQ_OF_CPU.ATOMIC.PART0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bx1xxxxxx", "Umask": "bxxx1xxxx", }, "IIO.TXN_REQ_OF_CPU.MEM_READ.VTD0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxx1xxxx", "Umask": "bxxxxx1xx", }, "IIO.TXN_REQ_OF_CPU.MEM_READ.VTD1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxx1xxxxx", "Umask": "bxxxxx1xx", }, "IIO.TXN_REQ_OF_CPU.MEM_READ.PART1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxxx1x", "Umask": "bxxxxx1xx", }, "IIO.TXN_REQ_OF_CPU.MSG.PART2": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxx1xx", "Umask": "bx1xxxxxx", }, "IIO.TXN_REQ_OF_CPU.MEM_READ.PART0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxxxx1", "Umask": "bxxxxx1xx", }, "IIO.TXN_REQ_OF_CPU.PEER_WRITE.PART1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxxx1x", "Umask": "bxxxxxx1x", }, "IIO.TXN_REQ_OF_CPU.PEER_READ.VTD1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxx1xxxxx", "Umask": "bxxxx1xxx", }, "IIO.TXN_REQ_OF_CPU.ATOMICCMP.PART2": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxx1xx", "Umask": "bxx1xxxxx", }, "IIO.TXN_REQ_OF_CPU.MEM_WRITE.VTD0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxx1xxxx", "Umask": "bxxxxxxx1", }, "IIO.TXN_REQ_OF_CPU.MSG.VTD1": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxx1xxxxx", "Umask": "bx1xxxxxx", }, "IIO.TXN_REQ_OF_CPU.ATOMICCMP.PART0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxxxxx1", "Umask": "bxx1xxxxx", }, "IIO.TXN_REQ_OF_CPU.PEER_READ.PART3": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxxx1xxx", "Umask": "bxxxx1xxx", }, "IIO.TXN_REQ_OF_CPU.PEER_READ.VTD0": { "Box": "IIO", "Category": "IIO Transaction Events", "Counters": "0-3", "Defn": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.", "Desc": "Number Transactions requested of the CPU", "EvSel": 132, "FCMask": "b111", "ExtSel": "", "Notes": "Unlike free running counters, Mem Read and Peer read subevents count requests not completions. Peer R/W subevents do not include confined P2P traffic.", "PortMask": "bxxx1xxxx", "Umask": "bxxxx1xxx", }, "IIO.VTD_ACCESS": { "Box": "IIO", "Category": "IIO VTd Events", "Counters": "0-3", "Desc": "VTd Access", "EvSel": 65, "ExtSel": "", "Notes": ".L4_PAGE_HIT + L4_MISS == # VT-D Lookups? .CTXT_MISS + .L1_MISS + .L2_MISS + .L3_MISS + .L4_MISS == # Page Walker Reads Issued by VT-d?", }, "IIO.VTD_ACCESS.L1_MISS": { "Box": "IIO", "Category": "IIO VTd Events", "Counters": "0-3", "Desc": "VTd Access", "EvSel": 65, "ExtSel": "", "Notes": ".L4_PAGE_HIT + L4_MISS == # VT-D Lookups? .CTXT_MISS + .L1_MISS + .L2_MISS + .L3_MISS + .L4_MISS == # Page Walker Reads Issued by VT-d?", "Umask": "bxxxxx1xx", }, "IIO.VTD_ACCESS.TLB_FULL": { "Box": "IIO", "Category": "IIO VTd Events", "Counters": "0-3", "Desc": "VTd Access", "EvSel": 65, "ExtSel": "", "Notes": ".L4_PAGE_HIT + L4_MISS == # VT-D Lookups? .CTXT_MISS + .L1_MISS + .L2_MISS + .L3_MISS + .L4_MISS == # Page Walker Reads Issued by VT-d?", "Umask": "bx1xxxxxx", }, "IIO.VTD_ACCESS.L2_MISS": { "Box": "IIO", "Category": "IIO VTd Events", "Counters": "0-3", "Desc": "VTd Access", "EvSel": 65, "ExtSel": "", "Notes": ".L4_PAGE_HIT + L4_MISS == # VT-D Lookups? .CTXT_MISS + .L1_MISS + .L2_MISS + .L3_MISS + .L4_MISS == # Page Walker Reads Issued by VT-d?", "Umask": "bxxxx1xxx", }, "IIO.VTD_ACCESS.L4_PAGE_HIT": { "Box": "IIO", "Category": "IIO VTd Events", "Counters": "0-3", "Desc": "VTd Access", "EvSel": 65, "ExtSel": "", "Notes": ".L4_PAGE_HIT + L4_MISS == # VT-D Lookups? .CTXT_MISS + .L1_MISS + .L2_MISS + .L3_MISS + .L4_MISS == # Page Walker Reads Issued by VT-d?", "Umask": "bxxxxxxx1", }, "IIO.VTD_ACCESS.L3_MISS": { "Box": "IIO", "Category": "IIO VTd Events", "Counters": "0-3", "Desc": "VTd Access", "EvSel": 65, "ExtSel": "", "Notes": ".L4_PAGE_HIT + L4_MISS == # VT-D Lookups? .CTXT_MISS + .L1_MISS + .L2_MISS + .L3_MISS + .L4_MISS == # Page Walker Reads Issued by VT-d?", "Umask": "bxxx1xxxx", }, "IIO.VTD_ACCESS.TLB1_MISS": { "Box": "IIO", "Category": "IIO VTd Events", "Counters": "0-3", "Desc": "VTd Access", "EvSel": 65, "ExtSel": "", "Notes": ".L4_PAGE_HIT + L4_MISS == # VT-D Lookups? .CTXT_MISS + .L1_MISS + .L2_MISS + .L3_MISS + .L4_MISS == # Page Walker Reads Issued by VT-d?", "Umask": "b1xxxxxxx", }, "IIO.VTD_ACCESS.TLB_MISS": { "Box": "IIO", "Category": "IIO VTd Events", "Counters": "0-3", "Desc": "VTd Access", "EvSel": 65, "ExtSel": "", "Notes": ".L4_PAGE_HIT + L4_MISS == # VT-D Lookups? .CTXT_MISS + .L1_MISS + .L2_MISS + .L3_MISS + .L4_MISS == # Page Walker Reads Issued by VT-d?", "Umask": "bxx1xxxxx", }, "IIO.VTD_ACCESS.CTXT_MISS": { "Box": "IIO", "Category": "IIO VTd Events", "Counters": "0-3", "Desc": "VTd Access", "EvSel": 65, "ExtSel": "", "Notes": ".L4_PAGE_HIT + L4_MISS == # VT-D Lookups? .CTXT_MISS + .L1_MISS + .L2_MISS + .L3_MISS + .L4_MISS == # Page Walker Reads Issued by VT-d?", "Umask": "bxxxxxx1x", }, "IIO.VTD_OCCUPANCY": { "Box": "IIO", "Category": "IIO VTd Events", "Counters": "0-3", "Desc": "VTd Occupancy", "EvSel": 64, "ExtSel": "", }, # CHA: "CHA.AG0_AD_CRD_ACQUIRED": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", }, "CHA.AG0_AD_CRD_ACQUIRED.TGR0": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxxxxx1", }, "CHA.AG0_AD_CRD_ACQUIRED.TGR4": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxx1xxxx", }, "CHA.AG0_AD_CRD_ACQUIRED.TGR3": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxx1xxx", }, "CHA.AG0_AD_CRD_ACQUIRED.TGR1": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxxxx1x", }, "CHA.AG0_AD_CRD_ACQUIRED.TGR2": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxxx1xx", }, "CHA.AG0_AD_CRD_ACQUIRED.TGR5": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 AD Credits Acquired", "EvSel": 128, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxx1xxxxx", }, "CHA.AG0_AD_CRD_OCCUPANCY": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", }, "CHA.AG0_AD_CRD_OCCUPANCY.TGR1": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "bxxxxxx1x", }, "CHA.AG0_AD_CRD_OCCUPANCY.TGR5": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "bxx1xxxxx", }, "CHA.AG0_AD_CRD_OCCUPANCY.TGR2": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "bxxxxx1xx", }, "CHA.AG0_AD_CRD_OCCUPANCY.TGR0": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "bxxxxxxx1", }, "CHA.AG0_AD_CRD_OCCUPANCY.TGR3": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "bxxxx1xxx", }, "CHA.AG0_AD_CRD_OCCUPANCY.TGR4": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 AD Credits Occupancy", "EvSel": 130, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "bxxx1xxxx", }, "CHA.AG0_BL_CRD_ACQUIRED": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", }, "CHA.AG0_BL_CRD_ACQUIRED.TGR3": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.AG0_BL_CRD_ACQUIRED.TGR4": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.AG0_BL_CRD_ACQUIRED.TGR0": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.AG0_BL_CRD_ACQUIRED.TGR2": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.AG0_BL_CRD_ACQUIRED.TGR5": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.AG0_BL_CRD_ACQUIRED.TGR1": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent0 BL Credits Acquired", "EvSel": 136, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.AG0_BL_CRD_OCCUPANCY": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", }, "CHA.AG0_BL_CRD_OCCUPANCY.TGR1": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.AG0_BL_CRD_OCCUPANCY.TGR2": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.AG0_BL_CRD_OCCUPANCY.TGR5": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.AG0_BL_CRD_OCCUPANCY.TGR0": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.AG0_BL_CRD_OCCUPANCY.TGR4": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.AG0_BL_CRD_OCCUPANCY.TGR3": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent0 BL Credits Occupancy", "EvSel": 138, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.AG1_AD_CRD_ACQUIRED": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", }, "CHA.AG1_AD_CRD_ACQUIRED.TGR1": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxxxx1x", }, "CHA.AG1_AD_CRD_ACQUIRED.TGR5": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxx1xxxxx", }, "CHA.AG1_AD_CRD_ACQUIRED.TGR2": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxxx1xx", }, "CHA.AG1_AD_CRD_ACQUIRED.TGR0": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxxxxx1", }, "CHA.AG1_AD_CRD_ACQUIRED.TGR4": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxx1xxxx", }, "CHA.AG1_AD_CRD_ACQUIRED.TGR3": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 AD Credits Acquired", "EvSel": 132, "ExtSel": "", "Notes": "If multiple masks are selected, will count the OR of all selected", "Umask": "bxxxx1xxx", }, "CHA.AG1_AD_CRD_OCCUPANCY": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", }, "CHA.AG1_AD_CRD_OCCUPANCY.TGR5": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "bxx1xxxxx", }, "CHA.AG1_AD_CRD_OCCUPANCY.TGR2": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "bxxxxx1xx", }, "CHA.AG1_AD_CRD_OCCUPANCY.TGR1": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "bxxxxxx1x", }, "CHA.AG1_AD_CRD_OCCUPANCY.TGR4": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "bxxx1xxxx", }, "CHA.AG1_AD_CRD_OCCUPANCY.TGR3": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "bxxxx1xxx", }, "CHA.AG1_AD_CRD_OCCUPANCY.TGR0": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 AD Credits Occupancy", "EvSel": 134, "ExtSel": "", "Notes": "If multiple masks are selected, will count the SUM of all selected", "Umask": "bxxxxxxx1", }, "CHA.AG1_BL_CRD_OCCUPANCY": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", }, "CHA.AG1_BL_CRD_OCCUPANCY.TGR3": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.AG1_BL_CRD_OCCUPANCY.TGR4": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.AG1_BL_CRD_OCCUPANCY.TGR0": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.AG1_BL_CRD_OCCUPANCY.TGR2": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.AG1_BL_CRD_OCCUPANCY.TGR5": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.AG1_BL_CRD_OCCUPANCY.TGR1": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress", "Desc": "CMS Agent1 BL Credits Occupancy", "EvSel": 142, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.AG1_BL_CREDITS_ACQUIRED": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", }, "CHA.AG1_BL_CREDITS_ACQUIRED.TGR1": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.AG1_BL_CREDITS_ACQUIRED.TGR5": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.AG1_BL_CREDITS_ACQUIRED.TGR2": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.AG1_BL_CREDITS_ACQUIRED.TGR0": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.AG1_BL_CREDITS_ACQUIRED.TGR3": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.AG1_BL_CREDITS_ACQUIRED.TGR4": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.", "Desc": "CMS Agent1 BL Credits Acquired", "EvSel": 140, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.BYPASS_CHA_IMC": { "Box": "CHA", "Category": "CHA HA BYPASS Events", "Defn": "Counts the number of times when the CHA was able to bypass HA pipe on the way to iMC. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filtered by when the bypass was taken and when it was not.", "Desc": "CHA to iMC Bypass", "EvSel": 87, "ExtSel": "", }, "CHA.BYPASS_CHA_IMC.NOT_TAKEN": { "Box": "CHA", "Category": "CHA HA BYPASS Events", "Defn": "Counts the number of times when the CHA was able to bypass HA pipe on the way to iMC. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filtered by when the bypass was taken and when it was not.", "Desc": "CHA to iMC Bypass", "EvSel": 87, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.BYPASS_CHA_IMC.INTERMEDIATE": { "Box": "CHA", "Category": "CHA HA BYPASS Events", "Defn": "Counts the number of times when the CHA was able to bypass HA pipe on the way to iMC. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filtered by when the bypass was taken and when it was not.", "Desc": "CHA to iMC Bypass", "EvSel": 87, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.BYPASS_CHA_IMC.TAKEN": { "Box": "CHA", "Category": "CHA HA BYPASS Events", "Defn": "Counts the number of times when the CHA was able to bypass HA pipe on the way to iMC. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filtered by when the bypass was taken and when it was not.", "Desc": "CHA to iMC Bypass", "EvSel": 87, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.CLOCKTICKS": { "Box": "CHA", "Category": "CHA UCLK Events", "Counters": "0-3", "Desc": "Uncore Clocks", "EvSel": 0, "ExtSel": "", }, "CHA.CMS_CLOCKTICKS": { "Box": "CHA", "Category": "CHA Misc Events", "Desc": "CMS Clockticks", "EvSel": 192, "ExtSel": "", }, "CHA.CORE_PMA": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Counters": "0-3", "Desc": "Core PMA Events", "EvSel": 23, "ExtSel": "", }, "CHA.CORE_PMA.C1_STATE": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Counters": "0-3", "Desc": "Core PMA Events", "EvSel": 23, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.CORE_PMA.C6_TRANSITION": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Counters": "0-3", "Desc": "Core PMA Events", "EvSel": 23, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.CORE_PMA.GV": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Counters": "0-3", "Desc": "Core PMA Events", "EvSel": 23, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.CORE_PMA.C1_TRANSITION": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Counters": "0-3", "Desc": "Core PMA Events", "EvSel": 23, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.CORE_PMA.C6_STATE": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Counters": "0-3", "Desc": "Core PMA Events", "EvSel": 23, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.CORE_SNP": { "Box": "CHA", "Category": "CHA ISMQ Events", "Defn": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).", "Desc": "Core Cross Snoops Issued", "EvSel": 51, "ExtSel": "", }, "CHA.CORE_SNP.CORE_GTONE": { "Box": "CHA", "Category": "CHA ISMQ Events", "Defn": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).", "Desc": "Core Cross Snoops Issued", "EvSel": 51, "ExtSel": "", "Umask": "b01000010", }, "CHA.CORE_SNP.EVICT_REMOTE": { "Box": "CHA", "Category": "CHA ISMQ Events", "Defn": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).", "Desc": "Core Cross Snoops Issued", "EvSel": 51, "ExtSel": "", "Umask": "b10000100", }, "CHA.CORE_SNP.CORE_REMOTE": { "Box": "CHA", "Category": "CHA ISMQ Events", "Defn": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).", "Desc": "Core Cross Snoops Issued", "EvSel": 51, "ExtSel": "", "Umask": "b01000100", }, "CHA.CORE_SNP.EXT_REMOTE": { "Box": "CHA", "Category": "CHA ISMQ Events", "Defn": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).", "Desc": "Core Cross Snoops Issued", "EvSel": 51, "ExtSel": "", "Umask": "b00100100", }, "CHA.CORE_SNP.EXT_GTONE": { "Box": "CHA", "Category": "CHA ISMQ Events", "Defn": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).", "Desc": "Core Cross Snoops Issued", "EvSel": 51, "ExtSel": "", "Umask": "b00100010", }, "CHA.CORE_SNP.ANY_REMOTE": { "Box": "CHA", "Category": "CHA ISMQ Events", "Defn": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).", "Desc": "Core Cross Snoops Issued", "EvSel": 51, "ExtSel": "", "Umask": "b11100100", }, "CHA.CORE_SNP.EXT_ONE": { "Box": "CHA", "Category": "CHA ISMQ Events", "Defn": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).", "Desc": "Core Cross Snoops Issued", "EvSel": 51, "ExtSel": "", "Umask": "b00100001", }, "CHA.CORE_SNP.CORE_ONE": { "Box": "CHA", "Category": "CHA ISMQ Events", "Defn": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).", "Desc": "Core Cross Snoops Issued", "EvSel": 51, "ExtSel": "", "Umask": "b01000001", }, "CHA.CORE_SNP.EVICT_GTONE": { "Box": "CHA", "Category": "CHA ISMQ Events", "Defn": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).", "Desc": "Core Cross Snoops Issued", "EvSel": 51, "ExtSel": "", "Umask": "b10000010", }, "CHA.CORE_SNP.ANY_ONE": { "Box": "CHA", "Category": "CHA ISMQ Events", "Defn": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).", "Desc": "Core Cross Snoops Issued", "EvSel": 51, "ExtSel": "", "Umask": "b11100001", }, "CHA.CORE_SNP.ANY_GTONE": { "Box": "CHA", "Category": "CHA ISMQ Events", "Defn": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).", "Desc": "Core Cross Snoops Issued", "EvSel": 51, "ExtSel": "", "Umask": "b11100010", }, "CHA.CORE_SNP.EVICT_ONE": { "Box": "CHA", "Category": "CHA ISMQ Events", "Defn": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).", "Desc": "Core Cross Snoops Issued", "EvSel": 51, "ExtSel": "", "Umask": "b10000001", }, "CHA.COUNTER0_OCCUPANCY": { "Box": "CHA", "Category": "CHA OCCUPANCY Events", "Counters": "0-3", "Defn": "Since occupancy counts can only be captured in the Cbo's 0 counter, this event allows a user to capture occupancy related information by filtering the Cb0 occupancy count captured in Counter 0. The filtering available is found in the control register - threshold, invert and edge detect. E.g. setting threshold to 1 can effectively monitor how many cycles the monitored queue has an entry.", "Desc": "Counter 0 Occupancy", "EvSel": 31, "ExtSel": "", }, "CHA.DIR_LOOKUP": { "Box": "CHA", "Category": "CHA HA DIRECTORY Events", "Defn": "Counts the number of transactions that looked up the Home Agent directory. Can be filtered by requests that had to snoop and those that did not have to.", "Desc": "Directory Lookups", "EvSel": 83, "ExtSel": "", "Notes": "Only valid for parts that implement the Directory", }, "CHA.DIR_LOOKUP.NO_SNP": { "Box": "CHA", "Category": "CHA HA DIRECTORY Events", "Defn": "Counts the number of transactions that looked up the Home Agent directory. Can be filtered by requests that had to snoop and those that did not have to.", "Desc": "Directory Lookups", "EvSel": 83, "ExtSel": "", "Notes": "Only valid for parts that implement the Directory", "Umask": "bxxxxxx1x", }, "CHA.DIR_LOOKUP.SNP": { "Box": "CHA", "Category": "CHA HA DIRECTORY Events", "Defn": "Counts the number of transactions that looked up the Home Agent directory. Can be filtered by requests that had to snoop and those that did not have to.", "Desc": "Directory Lookups", "EvSel": 83, "ExtSel": "", "Notes": "Only valid for parts that implement the Directory", "Umask": "bxxxxxxx1", }, "CHA.DIR_UPDATE": { "Box": "CHA", "Category": "CHA HA DIRECTORY Events", "Defn": "Counts the number of directory updates that were required. These result in writes to the memory controller.", "Desc": "Directory Updates", "EvSel": 84, "ExtSel": "", "Notes": "Only valid for parts that implement the Directory. Note that any directory update that's a part of an explicit (eviction or core/IO) or an implicit (snoop response) writeback is not counted.", }, "CHA.DIR_UPDATE.TOR": { "Box": "CHA", "Category": "CHA HA DIRECTORY Events", "Defn": "Counts the number of directory updates that were required. These result in writes to the memory controller.", "Desc": "Directory Updates", "EvSel": 84, "ExtSel": "", "Notes": "Only valid for parts that implement the Directory. Note that any directory update that's a part of an explicit (eviction or core/IO) or an implicit (snoop response) writeback is not counted.", "Umask": "bxxxxxx1x", }, "CHA.DIR_UPDATE.HA": { "Box": "CHA", "Category": "CHA HA DIRECTORY Events", "Defn": "Counts the number of directory updates that were required. These result in writes to the memory controller.", "Desc": "Directory Updates", "EvSel": 84, "ExtSel": "", "Notes": "Only valid for parts that implement the Directory. Note that any directory update that's a part of an explicit (eviction or core/IO) or an implicit (snoop response) writeback is not counted.", "Umask": "bxxxxxxx1", }, "CHA.EGRESS_ORDERING": { "Box": "CHA", "Category": "CHA Horizontal In Use RING Events", "Defn": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements", "Desc": "Egress Blocking due to Ordering requirements", "EvSel": 174, "ExtSel": "", }, "CHA.EGRESS_ORDERING.IV_SNOOPGO_DN": { "Box": "CHA", "Category": "CHA Horizontal In Use RING Events", "Defn": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements", "Desc": "Egress Blocking due to Ordering requirements", "EvSel": 174, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.EGRESS_ORDERING.IV_SNOOPGO_UP": { "Box": "CHA", "Category": "CHA Horizontal In Use RING Events", "Defn": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements", "Desc": "Egress Blocking due to Ordering requirements", "EvSel": 174, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.FAST_ASSERTED": { "Box": "CHA", "Category": "CHA Horizontal RING Events", "Defn": "Counts the number of cycles either the local or incoming distress signals are asserted. Incoming distress includes up, dn and across.", "Desc": "FaST wire asserted", "EvSel": 165, "ExtSel": "", }, "CHA.FAST_ASSERTED.HORZ": { "Box": "CHA", "Category": "CHA Horizontal RING Events", "Defn": "Counts the number of cycles either the local or incoming distress signals are asserted. Incoming distress includes up, dn and across.", "Desc": "FaST wire asserted", "EvSel": 165, "ExtSel": "", "Umask": "b00000010", }, "CHA.FAST_ASSERTED.VERT": { "Box": "CHA", "Category": "CHA Horizontal RING Events", "Defn": "Counts the number of cycles either the local or incoming distress signals are asserted. Incoming distress includes up, dn and across.", "Desc": "FaST wire asserted", "EvSel": 165, "ExtSel": "", "Umask": "b00000001", }, "CHA.HITME_HIT": { "Box": "CHA", "Category": "CHA HA HitME Events", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 95, "ExtSel": "", }, "CHA.HITME_HIT.EX_RDS": { "Box": "CHA", "Category": "CHA HA HitME Events", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 95, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.HITME_HIT.WBMTOI_OR_S": { "Box": "CHA", "Category": "CHA HA HitME Events", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 95, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.HITME_HIT.SHARED_OWNREQ": { "Box": "CHA", "Category": "CHA HA HitME Events", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 95, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.HITME_HIT.WBMTOE": { "Box": "CHA", "Category": "CHA HA HitME Events", "Desc": "Counts Number of Hits in HitMe Cache", "EvSel": 95, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.HITME_LOOKUP": { "Box": "CHA", "Category": "CHA HA HitME Events", "Desc": "Counts Number of times HitMe Cache is accessed", "EvSel": 94, "ExtSel": "", }, "CHA.HITME_LOOKUP.READ": { "Box": "CHA", "Category": "CHA HA HitME Events", "Desc": "Counts Number of times HitMe Cache is accessed", "EvSel": 94, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.HITME_LOOKUP.WRITE": { "Box": "CHA", "Category": "CHA HA HitME Events", "Desc": "Counts Number of times HitMe Cache is accessed", "EvSel": 94, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.HITME_MISS": { "Box": "CHA", "Category": "CHA HA HitME Events", "Desc": "Counts Number of Misses in HitMe Cache", "EvSel": 96, "ExtSel": "", }, "CHA.HITME_MISS.READ_OR_INV": { "Box": "CHA", "Category": "CHA HA HitME Events", "Desc": "Counts Number of Misses in HitMe Cache", "EvSel": 96, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.HITME_MISS.SHARED_RDINVOWN": { "Box": "CHA", "Category": "CHA HA HitME Events", "Desc": "Counts Number of Misses in HitMe Cache", "EvSel": 96, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.HITME_MISS.NOTSHARED_RDINVOWN": { "Box": "CHA", "Category": "CHA HA HitME Events", "Desc": "Counts Number of Misses in HitMe Cache", "EvSel": 96, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.HITME_UPDATE": { "Box": "CHA", "Category": "CHA HA HitME Pipe Events", "Desc": "Counts the number of Allocate/Update to HitMe Cache", "EvSel": 97, "ExtSel": "", }, "CHA.HITME_UPDATE.DEALLOCATE": { "Box": "CHA", "Category": "CHA HA HitME Pipe Events", "Desc": "Counts the number of Allocate/Update to HitMe Cache", "EvSel": 97, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.HITME_UPDATE.SHARED": { "Box": "CHA", "Category": "CHA HA HitME Pipe Events", "Desc": "Counts the number of Allocate/Update to HitMe Cache", "EvSel": 97, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.HITME_UPDATE.RSPFWDI_REM": { "Box": "CHA", "Category": "CHA HA HitME Pipe Events", "Desc": "Counts the number of Allocate/Update to HitMe Cache", "EvSel": 97, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.HITME_UPDATE.DEALLOCATE_RSPFWDI_LOC": { "Box": "CHA", "Category": "CHA HA HitME Pipe Events", "Desc": "Counts the number of Allocate/Update to HitMe Cache", "EvSel": 97, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.HITME_UPDATE.RDINVOWN": { "Box": "CHA", "Category": "CHA HA HitME Pipe Events", "Desc": "Counts the number of Allocate/Update to HitMe Cache", "EvSel": 97, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.HORZ_RING_AD_IN_USE": { "Box": "CHA", "Category": "CHA Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AD Ring In Use", "EvSel": 167, "ExtSel": "", }, "CHA.HORZ_RING_AD_IN_USE.LEFT_EVEN": { "Box": "CHA", "Category": "CHA Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AD Ring In Use", "EvSel": 167, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.HORZ_RING_AD_IN_USE.RIGHT_ODD": { "Box": "CHA", "Category": "CHA Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AD Ring In Use", "EvSel": 167, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.HORZ_RING_AD_IN_USE.RIGHT_EVEN": { "Box": "CHA", "Category": "CHA Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AD Ring In Use", "EvSel": 167, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.HORZ_RING_AD_IN_USE.LEFT_ODD": { "Box": "CHA", "Category": "CHA Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AD Ring In Use", "EvSel": 167, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.HORZ_RING_AK_IN_USE": { "Box": "CHA", "Category": "CHA Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 169, "ExtSel": "", }, "CHA.HORZ_RING_AK_IN_USE.RIGHT_EVEN": { "Box": "CHA", "Category": "CHA Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 169, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.HORZ_RING_AK_IN_USE.LEFT_ODD": { "Box": "CHA", "Category": "CHA Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 169, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.HORZ_RING_AK_IN_USE.LEFT_EVEN": { "Box": "CHA", "Category": "CHA Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 169, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.HORZ_RING_AK_IN_USE.RIGHT_ODD": { "Box": "CHA", "Category": "CHA Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal AK Ring In Use", "EvSel": 169, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.HORZ_RING_BL_IN_USE": { "Box": "CHA", "Category": "CHA Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal BL Ring in Use", "EvSel": 171, "ExtSel": "", }, "CHA.HORZ_RING_BL_IN_USE.LEFT_ODD": { "Box": "CHA", "Category": "CHA Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal BL Ring in Use", "EvSel": 171, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.HORZ_RING_BL_IN_USE.RIGHT_EVEN": { "Box": "CHA", "Category": "CHA Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal BL Ring in Use", "EvSel": 171, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.HORZ_RING_BL_IN_USE.RIGHT_ODD": { "Box": "CHA", "Category": "CHA Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal BL Ring in Use", "EvSel": 171, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.HORZ_RING_BL_IN_USE.LEFT_EVEN": { "Box": "CHA", "Category": "CHA Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Horizontal BL Ring in Use", "EvSel": 171, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.HORZ_RING_IV_IN_USE": { "Box": "CHA", "Category": "CHA Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "Horizontal IV Ring in Use", "EvSel": 173, "ExtSel": "", }, "CHA.HORZ_RING_IV_IN_USE.RIGHT": { "Box": "CHA", "Category": "CHA Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "Horizontal IV Ring in Use", "EvSel": 173, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.HORZ_RING_IV_IN_USE.LEFT": { "Box": "CHA", "Category": "CHA Horizontal In Use RING Events", "Defn": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "Horizontal IV Ring in Use", "EvSel": 173, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.IMC_READS_COUNT": { "Box": "CHA", "Category": "CHA HA READ WRITE Events", "Defn": "Count of the number of reads issued to any of the memory controller channels. This can be filtered by the priority of the reads.", "Desc": "HA to iMC Reads Issued", "EvSel": 89, "ExtSel": "", "Notes": "To match the number of reads seen at the IMC, it's necessary to account for any bypasses. IMC_READS_COUNT.* + BYPASS_CHA_IMC.TAKEN == CAS_COUNT.RD", }, "CHA.IMC_READS_COUNT.PRIORITY": { "Box": "CHA", "Category": "CHA HA READ WRITE Events", "Defn": "Count of the number of reads issued to any of the memory controller channels. This can be filtered by the priority of the reads.", "Desc": "HA to iMC Reads Issued", "EvSel": 89, "ExtSel": "", "Notes": "To match the number of reads seen at the IMC, it's necessary to account for any bypasses. IMC_READS_COUNT.* + BYPASS_CHA_IMC.TAKEN == CAS_COUNT.RD", "Umask": "bxxxxxx1x", }, "CHA.IMC_READS_COUNT.NORMAL": { "Box": "CHA", "Category": "CHA HA READ WRITE Events", "Defn": "Count of the number of reads issued to any of the memory controller channels. This can be filtered by the priority of the reads.", "Desc": "HA to iMC Reads Issued", "EvSel": 89, "ExtSel": "", "Notes": "To match the number of reads seen at the IMC, it's necessary to account for any bypasses. IMC_READS_COUNT.* + BYPASS_CHA_IMC.TAKEN == CAS_COUNT.RD", "Umask": "bxxxxxxx1", }, "CHA.IMC_WRITES_COUNT": { "Box": "CHA", "Category": "CHA HA READ WRITE Events", "Defn": "Counts the total number of writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.", "Desc": "Writes Issued to the iMC by the HA", "EvSel": 91, "ExtSel": "", "Notes": "Directory bits are stored in memory. Remote socket RFOs will result in a directory update which, in turn, will cause a write command.", }, "CHA.IMC_WRITES_COUNT.FULL_PRIORITY": { "Box": "CHA", "Category": "CHA HA READ WRITE Events", "Defn": "Counts the total number of writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.", "Desc": "Writes Issued to the iMC by the HA", "EvSel": 91, "ExtSel": "", "Notes": "Directory bits are stored in memory. Remote socket RFOs will result in a directory update which, in turn, will cause a write command.", "Umask": "bxxxxx1xx", }, "CHA.IMC_WRITES_COUNT.PARTIAL_MIG": { "Box": "CHA", "Category": "CHA HA READ WRITE Events", "Defn": "Counts the total number of writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.", "Desc": "Writes Issued to the iMC by the HA", "EvSel": 91, "ExtSel": "", "Notes": "Directory bits are stored in memory. Remote socket RFOs will result in a directory update which, in turn, will cause a write command.", "Umask": "bxx1xxxxx", }, "CHA.IMC_WRITES_COUNT.FULL_MIG": { "Box": "CHA", "Category": "CHA HA READ WRITE Events", "Defn": "Counts the total number of writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.", "Desc": "Writes Issued to the iMC by the HA", "EvSel": 91, "ExtSel": "", "Notes": "Directory bits are stored in memory. Remote socket RFOs will result in a directory update which, in turn, will cause a write command.", "Umask": "bxxx1xxxx", }, "CHA.IMC_WRITES_COUNT.FULL": { "Box": "CHA", "Category": "CHA HA READ WRITE Events", "Defn": "Counts the total number of writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.", "Desc": "Writes Issued to the iMC by the HA", "EvSel": 91, "ExtSel": "", "Notes": "Directory bits are stored in memory. Remote socket RFOs will result in a directory update which, in turn, will cause a write command.", "Umask": "bxxxxxxx1", }, "CHA.IMC_WRITES_COUNT.PARTIAL_PRIORITY": { "Box": "CHA", "Category": "CHA HA READ WRITE Events", "Defn": "Counts the total number of writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.", "Desc": "Writes Issued to the iMC by the HA", "EvSel": 91, "ExtSel": "", "Notes": "Directory bits are stored in memory. Remote socket RFOs will result in a directory update which, in turn, will cause a write command.", "Umask": "bxxxx1xxx", }, "CHA.IMC_WRITES_COUNT.PARTIAL": { "Box": "CHA", "Category": "CHA HA READ WRITE Events", "Defn": "Counts the total number of writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.", "Desc": "Writes Issued to the iMC by the HA", "EvSel": 91, "ExtSel": "", "Notes": "Directory bits are stored in memory. Remote socket RFOs will result in a directory update which, in turn, will cause a write command.", "Umask": "bxxxxxx1x", }, "CHA.IODC_ALLOC": { "Box": "CHA", "Category": "CHA HA IODC Events", "Desc": "Counts Number of times IODC entry allocation is attempted", "EvSel": 98, "ExtSel": "", }, "CHA.IODC_ALLOC.IODCFULL": { "Box": "CHA", "Category": "CHA HA IODC Events", "Desc": "Counts Number of times IODC entry allocation is attempted", "EvSel": 98, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.IODC_ALLOC.OSBGATED": { "Box": "CHA", "Category": "CHA HA IODC Events", "Desc": "Counts Number of times IODC entry allocation is attempted", "EvSel": 98, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.IODC_ALLOC.INVITOM": { "Box": "CHA", "Category": "CHA HA IODC Events", "Desc": "Counts Number of times IODC entry allocation is attempted", "EvSel": 98, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.IODC_DEALLOC": { "Box": "CHA", "Category": "CHA HA IODC Events", "Desc": "Counts number of IODC deallocations", "EvSel": 99, "ExtSel": "", }, "CHA.IODC_DEALLOC.WBMTOI": { "Box": "CHA", "Category": "CHA HA IODC Events", "Desc": "Counts number of IODC deallocations", "EvSel": 99, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.IODC_DEALLOC.WBPUSHMTOI": { "Box": "CHA", "Category": "CHA HA IODC Events", "Desc": "Counts number of IODC deallocations", "EvSel": 99, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.IODC_DEALLOC.SNPOUT": { "Box": "CHA", "Category": "CHA HA IODC Events", "Desc": "Counts number of IODC deallocations", "EvSel": 99, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.IODC_DEALLOC.ALL": { "Box": "CHA", "Category": "CHA HA IODC Events", "Desc": "Counts number of IODC deallocations", "EvSel": 99, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.IODC_DEALLOC.WBMTOE": { "Box": "CHA", "Category": "CHA HA IODC Events", "Desc": "Counts number of IODC deallocations", "EvSel": 99, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.LLC_LOOKUP": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state.", "Desc": "Cache and Snoop Filter Lookups", "EvSel": 52, "ExtSel": "", "Notes": "Bit 0 of the umask must always be set for this event. This allows us to match against a given state (or states) as programmed in the Cn_MSR_PMON_BOX_FILTER0.state field bitmask. 0 = I (miss), 4 = S, 5 = E, 6 = M, 7 = F. For example, if you wanted to monitor F and S hits, you could set 00001001b in the 8-bit state field. To monitor any lookup, set the field to 0x1F. Extra note - it may be a little confusing for customers of earlier products. With the CBo and HA functionality combined, it's possible to also measure Snoop Filter lookups with bits 1-3 of the FILTER0.state field", }, "CHA.LLC_LOOKUP.REMOTE_SNOOP": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state.", "Desc": "Cache and Snoop Filter Lookups", "EvSel": 52, "ExtSel": "", "Notes": "Bit 0 of the umask must always be set for this event. This allows us to match against a given state (or states) as programmed in the Cn_MSR_PMON_BOX_FILTER0.state field bitmask. 0 = I (miss), 4 = S, 5 = E, 6 = M, 7 = F. For example, if you wanted to monitor F and S hits, you could set 00001001b in the 8-bit state field. To monitor any lookup, set the field to 0x1F. Extra note - it may be a little confusing for customers of earlier products. With the CBo and HA functionality combined, it's possible to also measure Snoop Filter lookups with bits 1-3 of the FILTER0.state field", "Umask": "b00001001", }, "CHA.LLC_LOOKUP.LOCAL": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state.", "Desc": "Cache and Snoop Filter Lookups", "EvSel": 52, "ExtSel": "", "Notes": "Bit 0 of the umask must always be set for this event. This allows us to match against a given state (or states) as programmed in the Cn_MSR_PMON_BOX_FILTER0.state field bitmask. 0 = I (miss), 4 = S, 5 = E, 6 = M, 7 = F. For example, if you wanted to monitor F and S hits, you could set 00001001b in the 8-bit state field. To monitor any lookup, set the field to 0x1F. Extra note - it may be a little confusing for customers of earlier products. With the CBo and HA functionality combined, it's possible to also measure Snoop Filter lookups with bits 1-3 of the FILTER0.state field", "Umask": "b00110001", }, "CHA.LLC_LOOKUP.REMOTE": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state.", "Desc": "Cache and Snoop Filter Lookups", "EvSel": 52, "ExtSel": "", "Notes": "Bit 0 of the umask must always be set for this event. This allows us to match against a given state (or states) as programmed in the Cn_MSR_PMON_BOX_FILTER0.state field bitmask. 0 = I (miss), 4 = S, 5 = E, 6 = M, 7 = F. For example, if you wanted to monitor F and S hits, you could set 00001001b in the 8-bit state field. To monitor any lookup, set the field to 0x1F. Extra note - it may be a little confusing for customers of earlier products. With the CBo and HA functionality combined, it's possible to also measure Snoop Filter lookups with bits 1-3 of the FILTER0.state field", "Umask": "b10010001", }, "CHA.LLC_LOOKUP.DATA_READ": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state.", "Desc": "Cache and Snoop Filter Lookups", "EvSel": 52, "ExtSel": "", "Notes": "Bit 0 of the umask must always be set for this event. This allows us to match against a given state (or states) as programmed in the Cn_MSR_PMON_BOX_FILTER0.state field bitmask. 0 = I (miss), 4 = S, 5 = E, 6 = M, 7 = F. For example, if you wanted to monitor F and S hits, you could set 00001001b in the 8-bit state field. To monitor any lookup, set the field to 0x1F. Extra note - it may be a little confusing for customers of earlier products. With the CBo and HA functionality combined, it's possible to also measure Snoop Filter lookups with bits 1-3 of the FILTER0.state field", "Umask": "b00000011", }, "CHA.LLC_LOOKUP.ANY": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state.", "Desc": "Cache and Snoop Filter Lookups", "EvSel": 52, "ExtSel": "", "Notes": "Bit 0 of the umask must always be set for this event. This allows us to match against a given state (or states) as programmed in the Cn_MSR_PMON_BOX_FILTER0.state field bitmask. 0 = I (miss), 4 = S, 5 = E, 6 = M, 7 = F. For example, if you wanted to monitor F and S hits, you could set 00001001b in the 8-bit state field. To monitor any lookup, set the field to 0x1F. Extra note - it may be a little confusing for customers of earlier products. With the CBo and HA functionality combined, it's possible to also measure Snoop Filter lookups with bits 1-3 of the FILTER0.state field", "Umask": "b00010001", }, "CHA.LLC_LOOKUP.WRITE": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state.", "Desc": "Cache and Snoop Filter Lookups", "EvSel": 52, "ExtSel": "", "Notes": "Bit 0 of the umask must always be set for this event. This allows us to match against a given state (or states) as programmed in the Cn_MSR_PMON_BOX_FILTER0.state field bitmask. 0 = I (miss), 4 = S, 5 = E, 6 = M, 7 = F. For example, if you wanted to monitor F and S hits, you could set 00001001b in the 8-bit state field. To monitor any lookup, set the field to 0x1F. Extra note - it may be a little confusing for customers of earlier products. With the CBo and HA functionality combined, it's possible to also measure Snoop Filter lookups with bits 1-3 of the FILTER0.state field", "Umask": "b00000101", }, "CHA.LLC_VICTIMS": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Notes": "Does not include evict cleans", }, "CHA.LLC_VICTIMS.LOCAL_F": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Notes": "Does not include evict cleans", "Umask": "b00101000", }, "CHA.LLC_VICTIMS.LOCAL_M": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Notes": "Does not include evict cleans", "Umask": "b00100001", }, "CHA.LLC_VICTIMS.REMOTE_S": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Notes": "Does not include evict cleans", "Umask": "b10000100", }, "CHA.LLC_VICTIMS.LOCAL_E": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Notes": "Does not include evict cleans", "Umask": "b00100010", }, "CHA.LLC_VICTIMS.LOCAL_S": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Notes": "Does not include evict cleans", "Umask": "b00100100", }, "CHA.LLC_VICTIMS.REMOTE_M": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Notes": "Does not include evict cleans", "Umask": "b10000001", }, "CHA.LLC_VICTIMS.TOTAL_E": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Notes": "Does not include evict cleans", "Umask": "b10100010", }, "CHA.LLC_VICTIMS.REMOTE_ALL": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Notes": "Does not include evict cleans", "Umask": "b10001111", }, "CHA.LLC_VICTIMS.TOTAL_M": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Notes": "Does not include evict cleans", "Umask": "b10100001", }, "CHA.LLC_VICTIMS.TOTAL_S": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Notes": "Does not include evict cleans", "Umask": "b10100100", }, "CHA.LLC_VICTIMS.REMOTE_E": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Notes": "Does not include evict cleans", "Umask": "b10000010", }, "CHA.LLC_VICTIMS.LOCAL_ALL": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Notes": "Does not include evict cleans", "Umask": "b00101111", }, "CHA.LLC_VICTIMS.TOTAL_F": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Notes": "Does not include evict cleans", "Umask": "b10101000", }, "CHA.LLC_VICTIMS.REMOTE_F": { "Box": "CHA", "Category": "CHA CACHE Events", "Counters": "0-3", "Defn": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.", "Desc": "Lines Victimized", "EvSel": 55, "ExtSel": "", "Notes": "Does not include evict cleans", "Umask": "b10001000", }, "CHA.MISC": { "Box": "CHA", "Category": "CHA MISC Events", "Counters": "0-3", "Defn": "Miscellaneous events in the Cbo.", "Desc": "Cbo Misc", "EvSel": 57, "ExtSel": "", }, "CHA.MISC.CV0_PREF_MISS": { "Box": "CHA", "Category": "CHA MISC Events", "Counters": "0-3", "Defn": "Miscellaneous events in the Cbo.", "Desc": "Cbo Misc", "EvSel": 57, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.MISC.WC_ALIASING": { "Box": "CHA", "Category": "CHA MISC Events", "Counters": "0-3", "Defn": "Miscellaneous events in the Cbo.", "Desc": "Cbo Misc", "EvSel": 57, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.MISC.RFO_HIT_S": { "Box": "CHA", "Category": "CHA MISC Events", "Counters": "0-3", "Defn": "Miscellaneous events in the Cbo.", "Desc": "Cbo Misc", "EvSel": 57, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.MISC.RSPI_WAS_FSE": { "Box": "CHA", "Category": "CHA MISC Events", "Counters": "0-3", "Defn": "Miscellaneous events in the Cbo.", "Desc": "Cbo Misc", "EvSel": 57, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.MISC.CV0_PREF_VIC": { "Box": "CHA", "Category": "CHA MISC Events", "Counters": "0-3", "Defn": "Miscellaneous events in the Cbo.", "Desc": "Cbo Misc", "EvSel": 57, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.OSB": { "Box": "CHA", "Category": "CHA HA OSB Events", "Defn": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.", "Desc": "OSB Snoop Broadcast", "EvSel": 85, "ExtSel": "", }, "CHA.READ_NO_CREDITS": { "Box": "CHA", "Category": "CHA HA READ WRITE Events", "Defn": "Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue.", "Desc": "CHA iMC CHNx READ Credits Empty", "EvSel": 88, "ExtSel": "", }, "CHA.READ_NO_CREDITS.EDC1_SMI3": { "Box": "CHA", "Category": "CHA HA READ WRITE Events", "Defn": "Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue.", "Desc": "CHA iMC CHNx READ Credits Empty", "EvSel": 88, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.READ_NO_CREDITS.EDC3_SMI5": { "Box": "CHA", "Category": "CHA HA READ WRITE Events", "Defn": "Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue.", "Desc": "CHA iMC CHNx READ Credits Empty", "EvSel": 88, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.READ_NO_CREDITS.MC0_SMI0": { "Box": "CHA", "Category": "CHA HA READ WRITE Events", "Defn": "Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue.", "Desc": "CHA iMC CHNx READ Credits Empty", "EvSel": 88, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.READ_NO_CREDITS.EDC0_SMI2": { "Box": "CHA", "Category": "CHA HA READ WRITE Events", "Defn": "Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue.", "Desc": "CHA iMC CHNx READ Credits Empty", "EvSel": 88, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.READ_NO_CREDITS.EDC2_SMI4": { "Box": "CHA", "Category": "CHA HA READ WRITE Events", "Defn": "Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue.", "Desc": "CHA iMC CHNx READ Credits Empty", "EvSel": 88, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.READ_NO_CREDITS.MC1_SMI1": { "Box": "CHA", "Category": "CHA HA READ WRITE Events", "Defn": "Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue.", "Desc": "CHA iMC CHNx READ Credits Empty", "EvSel": 88, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.REQUESTS": { "Box": "CHA", "Category": "CHA HA REQUEST Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "Read and Write Requests", "EvSel": 80, "ExtSel": "", }, "CHA.REQUESTS.INVITOE_LOCAL": { "Box": "CHA", "Category": "CHA HA REQUEST Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "Read and Write Requests", "EvSel": 80, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.REQUESTS.READS": { "Box": "CHA", "Category": "CHA HA REQUEST Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "Read and Write Requests", "EvSel": 80, "ExtSel": "", "Umask": "b00000011", }, "CHA.REQUESTS.READS_REMOTE": { "Box": "CHA", "Category": "CHA HA REQUEST Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "Read and Write Requests", "EvSel": 80, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.REQUESTS.WRITES_REMOTE": { "Box": "CHA", "Category": "CHA HA REQUEST Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "Read and Write Requests", "EvSel": 80, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.REQUESTS.READS_LOCAL": { "Box": "CHA", "Category": "CHA HA REQUEST Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "Read and Write Requests", "EvSel": 80, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.REQUESTS.WRITES": { "Box": "CHA", "Category": "CHA HA REQUEST Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "Read and Write Requests", "EvSel": 80, "ExtSel": "", "Umask": "b00001100", }, "CHA.REQUESTS.WRITES_LOCAL": { "Box": "CHA", "Category": "CHA HA REQUEST Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "Read and Write Requests", "EvSel": 80, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.REQUESTS.INVITOE_REMOTE": { "Box": "CHA", "Category": "CHA HA REQUEST Events", "Counters": "0-3", "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).", "Desc": "Read and Write Requests", "EvSel": 80, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.RING_BOUNCES_HORZ": { "Box": "CHA", "Category": "CHA Horizontal RING Events", "Defn": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Horizontal Ring.", "EvSel": 161, "ExtSel": "", }, "CHA.RING_BOUNCES_HORZ.AK": { "Box": "CHA", "Category": "CHA Horizontal RING Events", "Defn": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Horizontal Ring.", "EvSel": 161, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.RING_BOUNCES_HORZ.BL": { "Box": "CHA", "Category": "CHA Horizontal RING Events", "Defn": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Horizontal Ring.", "EvSel": 161, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.RING_BOUNCES_HORZ.AD": { "Box": "CHA", "Category": "CHA Horizontal RING Events", "Defn": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Horizontal Ring.", "EvSel": 161, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.RING_BOUNCES_HORZ.IV": { "Box": "CHA", "Category": "CHA Horizontal RING Events", "Defn": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Horizontal Ring.", "EvSel": 161, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.RING_BOUNCES_VERT": { "Box": "CHA", "Category": "CHA Vertical RING Events", "Defn": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Vertical Ring.", "EvSel": 160, "ExtSel": "", }, "CHA.RING_BOUNCES_VERT.IV": { "Box": "CHA", "Category": "CHA Vertical RING Events", "Defn": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Vertical Ring.", "EvSel": 160, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.RING_BOUNCES_VERT.BL": { "Box": "CHA", "Category": "CHA Vertical RING Events", "Defn": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Vertical Ring.", "EvSel": 160, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.RING_BOUNCES_VERT.AD": { "Box": "CHA", "Category": "CHA Vertical RING Events", "Defn": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Vertical Ring.", "EvSel": 160, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.RING_BOUNCES_VERT.AK": { "Box": "CHA", "Category": "CHA Vertical RING Events", "Defn": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.", "Desc": "Messages that bounced on the Vertical Ring.", "EvSel": 160, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.RING_SINK_STARVED_HORZ": { "Box": "CHA", "Category": "CHA Horizontal RING Events", "Desc": "Sink Starvation on Horizontal Ring", "EvSel": 163, "ExtSel": "", }, "CHA.RING_SINK_STARVED_HORZ.AK": { "Box": "CHA", "Category": "CHA Horizontal RING Events", "Desc": "Sink Starvation on Horizontal Ring", "EvSel": 163, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.RING_SINK_STARVED_HORZ.BL": { "Box": "CHA", "Category": "CHA Horizontal RING Events", "Desc": "Sink Starvation on Horizontal Ring", "EvSel": 163, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.RING_SINK_STARVED_HORZ.AD": { "Box": "CHA", "Category": "CHA Horizontal RING Events", "Desc": "Sink Starvation on Horizontal Ring", "EvSel": 163, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.RING_SINK_STARVED_HORZ.IV": { "Box": "CHA", "Category": "CHA Horizontal RING Events", "Desc": "Sink Starvation on Horizontal Ring", "EvSel": 163, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.RING_SINK_STARVED_HORZ.AK_AG1": { "Box": "CHA", "Category": "CHA Horizontal RING Events", "Desc": "Sink Starvation on Horizontal Ring", "EvSel": 163, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.RING_SINK_STARVED_VERT": { "Box": "CHA", "Category": "CHA Vertical RING Events", "Desc": "Sink Starvation on Vertical Ring", "EvSel": 162, "ExtSel": "", }, "CHA.RING_SINK_STARVED_VERT.IV": { "Box": "CHA", "Category": "CHA Vertical RING Events", "Desc": "Sink Starvation on Vertical Ring", "EvSel": 162, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.RING_SINK_STARVED_VERT.AK": { "Box": "CHA", "Category": "CHA Vertical RING Events", "Desc": "Sink Starvation on Vertical Ring", "EvSel": 162, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.RING_SINK_STARVED_VERT.BL": { "Box": "CHA", "Category": "CHA Vertical RING Events", "Desc": "Sink Starvation on Vertical Ring", "EvSel": 162, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.RING_SINK_STARVED_VERT.AD": { "Box": "CHA", "Category": "CHA Vertical RING Events", "Desc": "Sink Starvation on Vertical Ring", "EvSel": 162, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.RING_SRC_THRTL": { "Box": "CHA", "Category": "CHA Horizontal RING Events", "Desc": "Source Throttle", "EvSel": 164, "ExtSel": "", }, "CHA.RxC_INSERTS": { "Box": "CHA", "Category": "CHA INGRESS Events", "Counters": "0-3", "Defn": "Counts number of allocations per cycle into the specified Ingress queue.", "Desc": "Ingress (from CMS) Allocations", "EvSel": 19, "ExtSel": "", "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", }, "CHA.RxC_INSERTS.RRQ": { "Box": "CHA", "Category": "CHA INGRESS Events", "Counters": "0-3", "Defn": "Counts number of allocations per cycle into the specified Ingress queue.", "Desc": "Ingress (from CMS) Allocations", "EvSel": 19, "ExtSel": "", "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "Umask": "bx1xxxxxx", }, "CHA.RxC_INSERTS.IRQ_REJ": { "Box": "CHA", "Category": "CHA INGRESS Events", "Counters": "0-3", "Defn": "Counts number of allocations per cycle into the specified Ingress queue.", "Desc": "Ingress (from CMS) Allocations", "EvSel": 19, "ExtSel": "", "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "Umask": "bxxxxxx1x", }, "CHA.RxC_INSERTS.IPQ": { "Box": "CHA", "Category": "CHA INGRESS Events", "Counters": "0-3", "Defn": "Counts number of allocations per cycle into the specified Ingress queue.", "Desc": "Ingress (from CMS) Allocations", "EvSel": 19, "ExtSel": "", "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "Umask": "bxxxxx1xx", }, "CHA.RxC_INSERTS.WBQ": { "Box": "CHA", "Category": "CHA INGRESS Events", "Counters": "0-3", "Defn": "Counts number of allocations per cycle into the specified Ingress queue.", "Desc": "Ingress (from CMS) Allocations", "EvSel": 19, "ExtSel": "", "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "Umask": "b1xxxxxxx", }, "CHA.RxC_INSERTS.IRQ": { "Box": "CHA", "Category": "CHA INGRESS Events", "Counters": "0-3", "Defn": "Counts number of allocations per cycle into the specified Ingress queue.", "Desc": "Ingress (from CMS) Allocations", "EvSel": 19, "ExtSel": "", "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "Umask": "bxxxxxxx1", }, "CHA.RxC_INSERTS.PRQ_REJ": { "Box": "CHA", "Category": "CHA INGRESS Events", "Counters": "0-3", "Defn": "Counts number of allocations per cycle into the specified Ingress queue.", "Desc": "Ingress (from CMS) Allocations", "EvSel": 19, "ExtSel": "", "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "Umask": "bxx1xxxxx", }, "CHA.RxC_INSERTS.PRQ": { "Box": "CHA", "Category": "CHA INGRESS Events", "Counters": "0-3", "Defn": "Counts number of allocations per cycle into the specified Ingress queue.", "Desc": "Ingress (from CMS) Allocations", "EvSel": 19, "ExtSel": "", "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "Umask": "bxxx1xxxx", }, "CHA.RxC_IPQ0_REJECT": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress Probe Queue Rejects", "EvSel": 34, "ExtSel": "", }, "CHA.RxC_IPQ0_REJECT.BL_RSP_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress Probe Queue Rejects", "EvSel": 34, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.RxC_IPQ0_REJECT.BL_NCS_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress Probe Queue Rejects", "EvSel": 34, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.RxC_IPQ0_REJECT.IV_NON_UPI": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress Probe Queue Rejects", "EvSel": 34, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.RxC_IPQ0_REJECT.AK_NON_UPI": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress Probe Queue Rejects", "EvSel": 34, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.RxC_IPQ0_REJECT.BL_NCB_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress Probe Queue Rejects", "EvSel": 34, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.RxC_IPQ0_REJECT.AD_RSP_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress Probe Queue Rejects", "EvSel": 34, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.RxC_IPQ0_REJECT.BL_WB_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress Probe Queue Rejects", "EvSel": 34, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.RxC_IPQ0_REJECT.AD_REQ_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress Probe Queue Rejects", "EvSel": 34, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.RxC_IPQ1_REJECT": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress Probe Queue Rejects", "EvSel": 35, "ExtSel": "", }, "CHA.RxC_IPQ1_REJECT.LLC_VICTIM": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress Probe Queue Rejects", "EvSel": 35, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.RxC_IPQ1_REJECT.HA": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress Probe Queue Rejects", "EvSel": 35, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.RxC_IPQ1_REJECT.PA_MATCH": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress Probe Queue Rejects", "EvSel": 35, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.RxC_IPQ1_REJECT.VICTIM": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress Probe Queue Rejects", "EvSel": 35, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.RxC_IPQ1_REJECT.ALLOW_SNP": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress Probe Queue Rejects", "EvSel": 35, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.RxC_IPQ1_REJECT.ANY0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress Probe Queue Rejects", "EvSel": 35, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.RxC_IPQ1_REJECT.LLC_OR_SF_WAY": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress Probe Queue Rejects", "EvSel": 35, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.RxC_IPQ1_REJECT.SF_VICTIM": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress Probe Queue Rejects", "EvSel": 35, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.RxC_IRQ0_REJECT": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress (from CMS) Request Queue Rejects", "EvSel": 24, "ExtSel": "", }, "CHA.RxC_IRQ0_REJECT.AD_RSP_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress (from CMS) Request Queue Rejects", "EvSel": 24, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.RxC_IRQ0_REJECT.AD_REQ_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress (from CMS) Request Queue Rejects", "EvSel": 24, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.RxC_IRQ0_REJECT.BL_WB_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress (from CMS) Request Queue Rejects", "EvSel": 24, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.RxC_IRQ0_REJECT.BL_RSP_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress (from CMS) Request Queue Rejects", "EvSel": 24, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.RxC_IRQ0_REJECT.BL_NCS_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress (from CMS) Request Queue Rejects", "EvSel": 24, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.RxC_IRQ0_REJECT.AK_NON_UPI": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress (from CMS) Request Queue Rejects", "EvSel": 24, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.RxC_IRQ0_REJECT.IV_NON_UPI": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress (from CMS) Request Queue Rejects", "EvSel": 24, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.RxC_IRQ0_REJECT.BL_NCB_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress (from CMS) Request Queue Rejects", "EvSel": 24, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.RxC_IRQ1_REJECT": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress (from CMS) Request Queue Rejects", "EvSel": 25, "ExtSel": "", }, "CHA.RxC_IRQ1_REJECT.ANY0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress (from CMS) Request Queue Rejects", "EvSel": 25, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.RxC_IRQ1_REJECT.VICTIM": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress (from CMS) Request Queue Rejects", "EvSel": 25, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.RxC_IRQ1_REJECT.ALLOW_SNP": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress (from CMS) Request Queue Rejects", "EvSel": 25, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.RxC_IRQ1_REJECT.SF_VICTIM": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress (from CMS) Request Queue Rejects", "EvSel": 25, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.RxC_IRQ1_REJECT.LLC_OR_SF_WAY": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress (from CMS) Request Queue Rejects", "EvSel": 25, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.RxC_IRQ1_REJECT.LLC_VICTIM": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress (from CMS) Request Queue Rejects", "EvSel": 25, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.RxC_IRQ1_REJECT.HA": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress (from CMS) Request Queue Rejects", "EvSel": 25, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.RxC_IRQ1_REJECT.PA_MATCH": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress (from CMS) Request Queue Rejects", "EvSel": 25, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.RxC_ISMQ0_REJECT": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Rejects", "EvSel": 36, "ExtSel": "", }, "CHA.RxC_ISMQ0_REJECT.BL_RSP_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Rejects", "EvSel": 36, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.RxC_ISMQ0_REJECT.BL_NCS_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Rejects", "EvSel": 36, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.RxC_ISMQ0_REJECT.IV_NON_UPI": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Rejects", "EvSel": 36, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.RxC_ISMQ0_REJECT.AK_NON_UPI": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Rejects", "EvSel": 36, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.RxC_ISMQ0_REJECT.BL_NCB_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Rejects", "EvSel": 36, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.RxC_ISMQ0_REJECT.AD_RSP_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Rejects", "EvSel": 36, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.RxC_ISMQ0_REJECT.BL_WB_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Rejects", "EvSel": 36, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.RxC_ISMQ0_REJECT.AD_REQ_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Rejects", "EvSel": 36, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.RxC_ISMQ0_RETRY": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 44, "ExtSel": "", }, "CHA.RxC_ISMQ0_RETRY.BL_NCB_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 44, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.RxC_ISMQ0_RETRY.AK_NON_UPI": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 44, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.RxC_ISMQ0_RETRY.IV_NON_UPI": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 44, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.RxC_ISMQ0_RETRY.BL_NCS_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 44, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.RxC_ISMQ0_RETRY.BL_RSP_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 44, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.RxC_ISMQ0_RETRY.BL_WB_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 44, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.RxC_ISMQ0_RETRY.AD_REQ_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 44, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.RxC_ISMQ0_RETRY.AD_RSP_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 44, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.RxC_ISMQ1_REJECT": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Rejects", "EvSel": 37, "ExtSel": "", }, "CHA.RxC_ISMQ1_REJECT.HA": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Rejects", "EvSel": 37, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.RxC_ISMQ1_REJECT.ANY0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Rejects", "EvSel": 37, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.RxC_ISMQ1_RETRY": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 45, "ExtSel": "", }, "CHA.RxC_ISMQ1_RETRY.HA": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 45, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.RxC_ISMQ1_RETRY.ANY0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.", "Desc": "ISMQ Retries", "EvSel": 45, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.RxC_OCCUPANCY": { "Box": "CHA", "Category": "CHA INGRESS Events", "Counters": 0, "Defn": "Counts number of entries in the specified Ingress queue in each cycle.", "Desc": "Ingress (from CMS) Occupancy", "EvSel": 17, "ExtSel": "", "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", }, "CHA.RxC_OCCUPANCY.WBQ": { "Box": "CHA", "Category": "CHA INGRESS Events", "Counters": 0, "Defn": "Counts number of entries in the specified Ingress queue in each cycle.", "Desc": "Ingress (from CMS) Occupancy", "EvSel": 17, "ExtSel": "", "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "Umask": "b10000000", }, "CHA.RxC_OCCUPANCY.IRQ": { "Box": "CHA", "Category": "CHA INGRESS Events", "Counters": 0, "Defn": "Counts number of entries in the specified Ingress queue in each cycle.", "Desc": "Ingress (from CMS) Occupancy", "EvSel": 17, "ExtSel": "", "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "Umask": "b00000001", }, "CHA.RxC_OCCUPANCY.IPQ": { "Box": "CHA", "Category": "CHA INGRESS Events", "Counters": 0, "Defn": "Counts number of entries in the specified Ingress queue in each cycle.", "Desc": "Ingress (from CMS) Occupancy", "EvSel": 17, "ExtSel": "", "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "Umask": "b00000100", }, "CHA.RxC_OCCUPANCY.RRQ": { "Box": "CHA", "Category": "CHA INGRESS Events", "Counters": 0, "Defn": "Counts number of entries in the specified Ingress queue in each cycle.", "Desc": "Ingress (from CMS) Occupancy", "EvSel": 17, "ExtSel": "", "Notes": "IRQ_REJECTED should not be Ored with the other umasks.", "Umask": "b01000000", }, "CHA.RxC_OTHER0_RETRY": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)", "Desc": "Other Retries", "EvSel": 46, "ExtSel": "", }, "CHA.RxC_OTHER0_RETRY.BL_NCS_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)", "Desc": "Other Retries", "EvSel": 46, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.RxC_OTHER0_RETRY.BL_RSP_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)", "Desc": "Other Retries", "EvSel": 46, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.RxC_OTHER0_RETRY.BL_NCB_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)", "Desc": "Other Retries", "EvSel": 46, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.RxC_OTHER0_RETRY.IV_NON_UPI": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)", "Desc": "Other Retries", "EvSel": 46, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.RxC_OTHER0_RETRY.AK_NON_UPI": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)", "Desc": "Other Retries", "EvSel": 46, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.RxC_OTHER0_RETRY.AD_REQ_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)", "Desc": "Other Retries", "EvSel": 46, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.RxC_OTHER0_RETRY.BL_WB_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)", "Desc": "Other Retries", "EvSel": 46, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.RxC_OTHER0_RETRY.AD_RSP_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)", "Desc": "Other Retries", "EvSel": 46, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.RxC_OTHER1_RETRY": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)", "Desc": "Other Retries", "EvSel": 47, "ExtSel": "", }, "CHA.RxC_OTHER1_RETRY.LLC_VICTIM": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)", "Desc": "Other Retries", "EvSel": 47, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.RxC_OTHER1_RETRY.HA": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)", "Desc": "Other Retries", "EvSel": 47, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.RxC_OTHER1_RETRY.PA_MATCH": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)", "Desc": "Other Retries", "EvSel": 47, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.RxC_OTHER1_RETRY.VICTIM": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)", "Desc": "Other Retries", "EvSel": 47, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.RxC_OTHER1_RETRY.ALLOW_SNP": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)", "Desc": "Other Retries", "EvSel": 47, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.RxC_OTHER1_RETRY.ANY0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)", "Desc": "Other Retries", "EvSel": 47, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.RxC_OTHER1_RETRY.LLC_OR_SF_WAY": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)", "Desc": "Other Retries", "EvSel": 47, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.RxC_OTHER1_RETRY.SF_VICTIM": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)", "Desc": "Other Retries", "EvSel": 47, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.RxC_PRQ0_REJECT": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress (from CMS) Request Queue Rejects", "EvSel": 32, "ExtSel": "", }, "CHA.RxC_PRQ0_REJECT.AD_RSP_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress (from CMS) Request Queue Rejects", "EvSel": 32, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.RxC_PRQ0_REJECT.AD_REQ_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress (from CMS) Request Queue Rejects", "EvSel": 32, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.RxC_PRQ0_REJECT.BL_WB_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress (from CMS) Request Queue Rejects", "EvSel": 32, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.RxC_PRQ0_REJECT.BL_RSP_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress (from CMS) Request Queue Rejects", "EvSel": 32, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.RxC_PRQ0_REJECT.BL_NCS_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress (from CMS) Request Queue Rejects", "EvSel": 32, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.RxC_PRQ0_REJECT.IV_NON_UPI": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress (from CMS) Request Queue Rejects", "EvSel": 32, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.RxC_PRQ0_REJECT.AK_NON_UPI": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress (from CMS) Request Queue Rejects", "EvSel": 32, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.RxC_PRQ0_REJECT.BL_NCB_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress (from CMS) Request Queue Rejects", "EvSel": 32, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.RxC_PRQ1_REJECT": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress (from CMS) Request Queue Rejects", "EvSel": 33, "ExtSel": "", }, "CHA.RxC_PRQ1_REJECT.PA_MATCH": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress (from CMS) Request Queue Rejects", "EvSel": 33, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.RxC_PRQ1_REJECT.LLC_VICTIM": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress (from CMS) Request Queue Rejects", "EvSel": 33, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.RxC_PRQ1_REJECT.HA": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress (from CMS) Request Queue Rejects", "EvSel": 33, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.RxC_PRQ1_REJECT.LLC_OR_SF_WAY": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress (from CMS) Request Queue Rejects", "EvSel": 33, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.RxC_PRQ1_REJECT.SF_VICTIM": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress (from CMS) Request Queue Rejects", "EvSel": 33, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.RxC_PRQ1_REJECT.ALLOW_SNP": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress (from CMS) Request Queue Rejects", "EvSel": 33, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.RxC_PRQ1_REJECT.VICTIM": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress (from CMS) Request Queue Rejects", "EvSel": 33, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.RxC_PRQ1_REJECT.ANY0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Desc": "Ingress (from CMS) Request Queue Rejects", "EvSel": 33, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.RxC_REQ_Q0_RETRY": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "\"REQUESTQ\" includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)", "Desc": "Request Queue Retries", "EvSel": 42, "ExtSel": "", }, "CHA.RxC_REQ_Q0_RETRY.BL_NCB_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "\"REQUESTQ\" includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)", "Desc": "Request Queue Retries", "EvSel": 42, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.RxC_REQ_Q0_RETRY.IV_NON_UPI": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "\"REQUESTQ\" includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)", "Desc": "Request Queue Retries", "EvSel": 42, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.RxC_REQ_Q0_RETRY.AK_NON_UPI": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "\"REQUESTQ\" includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)", "Desc": "Request Queue Retries", "EvSel": 42, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.RxC_REQ_Q0_RETRY.BL_RSP_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "\"REQUESTQ\" includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)", "Desc": "Request Queue Retries", "EvSel": 42, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.RxC_REQ_Q0_RETRY.BL_NCS_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "\"REQUESTQ\" includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)", "Desc": "Request Queue Retries", "EvSel": 42, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.RxC_REQ_Q0_RETRY.BL_WB_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "\"REQUESTQ\" includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)", "Desc": "Request Queue Retries", "EvSel": 42, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.RxC_REQ_Q0_RETRY.AD_REQ_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "\"REQUESTQ\" includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)", "Desc": "Request Queue Retries", "EvSel": 42, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.RxC_REQ_Q0_RETRY.AD_RSP_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "\"REQUESTQ\" includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)", "Desc": "Request Queue Retries", "EvSel": 42, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.RxC_REQ_Q1_RETRY": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "\"REQUESTQ\" includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)", "Desc": "Request Queue Retries", "EvSel": 43, "ExtSel": "", }, "CHA.RxC_REQ_Q1_RETRY.PA_MATCH": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "\"REQUESTQ\" includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)", "Desc": "Request Queue Retries", "EvSel": 43, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.RxC_REQ_Q1_RETRY.LLC_VICTIM": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "\"REQUESTQ\" includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)", "Desc": "Request Queue Retries", "EvSel": 43, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.RxC_REQ_Q1_RETRY.HA": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "\"REQUESTQ\" includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)", "Desc": "Request Queue Retries", "EvSel": 43, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.RxC_REQ_Q1_RETRY.SF_VICTIM": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "\"REQUESTQ\" includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)", "Desc": "Request Queue Retries", "EvSel": 43, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.RxC_REQ_Q1_RETRY.LLC_OR_SF_WAY": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "\"REQUESTQ\" includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)", "Desc": "Request Queue Retries", "EvSel": 43, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.RxC_REQ_Q1_RETRY.ANY0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "\"REQUESTQ\" includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)", "Desc": "Request Queue Retries", "EvSel": 43, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.RxC_REQ_Q1_RETRY.ALLOW_SNP": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "\"REQUESTQ\" includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)", "Desc": "Request Queue Retries", "EvSel": 43, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.RxC_REQ_Q1_RETRY.VICTIM": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "\"REQUESTQ\" includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)", "Desc": "Request Queue Retries", "EvSel": 43, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.RxC_RRQ0_REJECT": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.", "Desc": "RRQ Rejects", "EvSel": 38, "ExtSel": "", }, "CHA.RxC_RRQ0_REJECT.AD_RSP_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.", "Desc": "RRQ Rejects", "EvSel": 38, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.RxC_RRQ0_REJECT.AD_REQ_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.", "Desc": "RRQ Rejects", "EvSel": 38, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.RxC_RRQ0_REJECT.BL_WB_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.", "Desc": "RRQ Rejects", "EvSel": 38, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.RxC_RRQ0_REJECT.IV_NON_UPI": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.", "Desc": "RRQ Rejects", "EvSel": 38, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.RxC_RRQ0_REJECT.AK_NON_UPI": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.", "Desc": "RRQ Rejects", "EvSel": 38, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.RxC_RRQ0_REJECT.BL_NCB_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.", "Desc": "RRQ Rejects", "EvSel": 38, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.RxC_RRQ0_REJECT.BL_RSP_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.", "Desc": "RRQ Rejects", "EvSel": 38, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.RxC_RRQ0_REJECT.BL_NCS_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.", "Desc": "RRQ Rejects", "EvSel": 38, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.RxC_RRQ1_REJECT": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.", "Desc": "RRQ Rejects", "EvSel": 39, "ExtSel": "", }, "CHA.RxC_RRQ1_REJECT.HA": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.", "Desc": "RRQ Rejects", "EvSel": 39, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.RxC_RRQ1_REJECT.LLC_VICTIM": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.", "Desc": "RRQ Rejects", "EvSel": 39, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.RxC_RRQ1_REJECT.PA_MATCH": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.", "Desc": "RRQ Rejects", "EvSel": 39, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.RxC_RRQ1_REJECT.ANY0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.", "Desc": "RRQ Rejects", "EvSel": 39, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.RxC_RRQ1_REJECT.ALLOW_SNP": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.", "Desc": "RRQ Rejects", "EvSel": 39, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.RxC_RRQ1_REJECT.VICTIM": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.", "Desc": "RRQ Rejects", "EvSel": 39, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.RxC_RRQ1_REJECT.SF_VICTIM": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.", "Desc": "RRQ Rejects", "EvSel": 39, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.RxC_RRQ1_REJECT.LLC_OR_SF_WAY": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.", "Desc": "RRQ Rejects", "EvSel": 39, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.RxC_WBQ0_REJECT": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.", "Desc": "WBQ Rejects", "EvSel": 40, "ExtSel": "", }, "CHA.RxC_WBQ0_REJECT.AK_NON_UPI": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.", "Desc": "WBQ Rejects", "EvSel": 40, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.RxC_WBQ0_REJECT.IV_NON_UPI": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.", "Desc": "WBQ Rejects", "EvSel": 40, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.RxC_WBQ0_REJECT.BL_NCB_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.", "Desc": "WBQ Rejects", "EvSel": 40, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.RxC_WBQ0_REJECT.BL_NCS_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.", "Desc": "WBQ Rejects", "EvSel": 40, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.RxC_WBQ0_REJECT.BL_RSP_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.", "Desc": "WBQ Rejects", "EvSel": 40, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.RxC_WBQ0_REJECT.AD_RSP_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.", "Desc": "WBQ Rejects", "EvSel": 40, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.RxC_WBQ0_REJECT.AD_REQ_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.", "Desc": "WBQ Rejects", "EvSel": 40, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.RxC_WBQ0_REJECT.BL_WB_VN0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.", "Desc": "WBQ Rejects", "EvSel": 40, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.RxC_WBQ1_REJECT": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.", "Desc": "WBQ Rejects", "EvSel": 41, "ExtSel": "", }, "CHA.RxC_WBQ1_REJECT.PA_MATCH": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.", "Desc": "WBQ Rejects", "EvSel": 41, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.RxC_WBQ1_REJECT.LLC_VICTIM": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.", "Desc": "WBQ Rejects", "EvSel": 41, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.RxC_WBQ1_REJECT.HA": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.", "Desc": "WBQ Rejects", "EvSel": 41, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.RxC_WBQ1_REJECT.SF_VICTIM": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.", "Desc": "WBQ Rejects", "EvSel": 41, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.RxC_WBQ1_REJECT.LLC_OR_SF_WAY": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.", "Desc": "WBQ Rejects", "EvSel": 41, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.RxC_WBQ1_REJECT.ANY0": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.", "Desc": "WBQ Rejects", "EvSel": 41, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.RxC_WBQ1_REJECT.VICTIM": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.", "Desc": "WBQ Rejects", "EvSel": 41, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.RxC_WBQ1_REJECT.ALLOW_SNP": { "Box": "CHA", "Category": "CHA INGRESS_RETRY Events", "Defn": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.", "Desc": "WBQ Rejects", "EvSel": 41, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.RxR_BUSY_STARVED": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority", "Desc": "Transgress Injection Starvation", "EvSel": 180, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. Selecting multiple ring types NOT supported", }, "CHA.RxR_BUSY_STARVED.BL_CRD": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority", "Desc": "Transgress Injection Starvation", "EvSel": 180, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. Selecting multiple ring types NOT supported", "Umask": "bx1xxxxxx", }, "CHA.RxR_BUSY_STARVED.AD_CRD": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority", "Desc": "Transgress Injection Starvation", "EvSel": 180, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. Selecting multiple ring types NOT supported", "Umask": "bxxx1xxxx", }, "CHA.RxR_BUSY_STARVED.AD_BNC": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority", "Desc": "Transgress Injection Starvation", "EvSel": 180, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. Selecting multiple ring types NOT supported", "Umask": "bxxxxxxx1", }, "CHA.RxR_BUSY_STARVED.BL_BNC": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority", "Desc": "Transgress Injection Starvation", "EvSel": 180, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. Selecting multiple ring types NOT supported", "Umask": "bxxxxx1xx", }, "CHA.RxR_BYPASS": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 178, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", }, "CHA.RxR_BYPASS.BL_BNC": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 178, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bxxxxx1xx", }, "CHA.RxR_BYPASS.AK_BNC": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 178, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bxxxxxx1x", }, "CHA.RxR_BYPASS.AD_BNC": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 178, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bxxxxxxx1", }, "CHA.RxR_BYPASS.IV_BNC": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 178, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bxxxx1xxx", }, "CHA.RxR_BYPASS.BL_CRD": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 178, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bx1xxxxxx", }, "CHA.RxR_BYPASS.AD_CRD": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Number of packets bypassing the CMS Ingress", "Desc": "Transgress Ingress Bypass", "EvSel": 178, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bxxx1xxxx", }, "CHA.RxR_CRD_STARVED": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 179, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", }, "CHA.RxR_CRD_STARVED.BL_BNC": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 179, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "bxxxxx1xx", }, "CHA.RxR_CRD_STARVED.IFV": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 179, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "b1xxxxxxx", }, "CHA.RxR_CRD_STARVED.AD_CRD": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 179, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "bxxx1xxxx", }, "CHA.RxR_CRD_STARVED.BL_CRD": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 179, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "bx1xxxxxx", }, "CHA.RxR_CRD_STARVED.IV_BNC": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 179, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "bxxxx1xxx", }, "CHA.RxR_CRD_STARVED.AK_BNC": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 179, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "bxxxxxx1x", }, "CHA.RxR_CRD_STARVED.AD_BNC": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.", "Desc": "Transgress Injection Starvation", "EvSel": 179, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the OR of the two. For this purpose IFV is considered an AK ring type. Selecting multiple ring types NOT supported", "Umask": "bxxxxxxx1", }, "CHA.RxR_INSERTS": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 177, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", }, "CHA.RxR_INSERTS.BL_BNC": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 177, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bxxxxx1xx", }, "CHA.RxR_INSERTS.AD_BNC": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 177, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bxxxxxxx1", }, "CHA.RxR_INSERTS.AK_BNC": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 177, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bxxxxxx1x", }, "CHA.RxR_INSERTS.AD_CRD": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 177, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bxxx1xxxx", }, "CHA.RxR_INSERTS.IV_BNC": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 177, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bxxxx1xxx", }, "CHA.RxR_INSERTS.BL_CRD": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Allocations", "EvSel": 177, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bx1xxxxxx", }, "CHA.RxR_OCCUPANCY": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 176, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", }, "CHA.RxR_OCCUPANCY.AD_CRD": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 176, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bxxx1xxxx", }, "CHA.RxR_OCCUPANCY.BL_CRD": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 176, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bx1xxxxxx", }, "CHA.RxR_OCCUPANCY.IV_BNC": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 176, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bxxxx1xxx", }, "CHA.RxR_OCCUPANCY.AK_BNC": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 176, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bxxxxxx1x", }, "CHA.RxR_OCCUPANCY.AD_BNC": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 176, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bxxxxxxx1", }, "CHA.RxR_OCCUPANCY.BL_BNC": { "Box": "CHA", "Category": "CHA CMS Transgress INGRESS Events", "Defn": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh", "Desc": "Transgress Ingress Occupancy", "EvSel": 176, "ExtSel": "", "Notes": "If both masks are selected for one ring type (ex: AD CRD + BNC), will count the SUM of the two. Selecting multiple ring types NOT supported", "Umask": "bxxxxx1xx", }, "CHA.SF_EVICTION": { "Box": "CHA", "Category": "CHA CACHE Events", "Desc": "Snoop Filter Eviction", "EvSel": 61, "ExtSel": "", }, "CHA.SF_EVICTION.S_STATE": { "Box": "CHA", "Category": "CHA CACHE Events", "Desc": "Snoop Filter Eviction", "EvSel": 61, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.SF_EVICTION.M_STATE": { "Box": "CHA", "Category": "CHA CACHE Events", "Desc": "Snoop Filter Eviction", "EvSel": 61, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.SF_EVICTION.E_STATE": { "Box": "CHA", "Category": "CHA CACHE Events", "Desc": "Snoop Filter Eviction", "EvSel": 61, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.SNOOPS_SENT": { "Box": "CHA", "Category": "CHA HA REQUEST Events", "Defn": "Counts the number of snoops issued by the HA.", "Desc": "Snoops Sent", "EvSel": 81, "ExtSel": "", }, "CHA.SNOOPS_SENT.LOCAL": { "Box": "CHA", "Category": "CHA HA REQUEST Events", "Defn": "Counts the number of snoops issued by the HA.", "Desc": "Snoops Sent", "EvSel": 81, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.SNOOPS_SENT.BCST_REMOTE": { "Box": "CHA", "Category": "CHA HA REQUEST Events", "Defn": "Counts the number of snoops issued by the HA.", "Desc": "Snoops Sent", "EvSel": 81, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.SNOOPS_SENT.REMOTE": { "Box": "CHA", "Category": "CHA HA REQUEST Events", "Defn": "Counts the number of snoops issued by the HA.", "Desc": "Snoops Sent", "EvSel": 81, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.SNOOPS_SENT.ALL": { "Box": "CHA", "Category": "CHA HA REQUEST Events", "Defn": "Counts the number of snoops issued by the HA.", "Desc": "Snoops Sent", "EvSel": 81, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.SNOOPS_SENT.DIRECT_LOCAL": { "Box": "CHA", "Category": "CHA HA REQUEST Events", "Defn": "Counts the number of snoops issued by the HA.", "Desc": "Snoops Sent", "EvSel": 81, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.SNOOPS_SENT.DIRECT_REMOTE": { "Box": "CHA", "Category": "CHA HA REQUEST Events", "Defn": "Counts the number of snoops issued by the HA.", "Desc": "Snoops Sent", "EvSel": 81, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.SNOOPS_SENT.BCST_LOCAL": { "Box": "CHA", "Category": "CHA HA REQUEST Events", "Defn": "Counts the number of snoops issued by the HA.", "Desc": "Snoops Sent", "EvSel": 81, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.SNOOP_RESP": { "Box": "CHA", "Category": "CHA HA SNOOP RESPONSE Events", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 92, "ExtSel": "", }, "CHA.SNOOP_RESP.RSPI": { "Box": "CHA", "Category": "CHA HA SNOOP RESPONSE Events", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 92, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.SNOOP_RESP.RSP_WBWB": { "Box": "CHA", "Category": "CHA HA SNOOP RESPONSE Events", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 92, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.SNOOP_RESP.RSPIFWD": { "Box": "CHA", "Category": "CHA HA SNOOP RESPONSE Events", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 92, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.SNOOP_RESP.RSPFWD": { "Box": "CHA", "Category": "CHA HA SNOOP RESPONSE Events", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 92, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.SNOOP_RESP.RSP_FWD_WB": { "Box": "CHA", "Category": "CHA HA SNOOP RESPONSE Events", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 92, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.SNOOP_RESP.RSPS": { "Box": "CHA", "Category": "CHA HA SNOOP RESPONSE Events", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 92, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.SNOOP_RESP.RSPSFWD": { "Box": "CHA", "Category": "CHA HA SNOOP RESPONSE Events", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 92, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.SNOOP_RESP.RSPCNFLCTS": { "Box": "CHA", "Category": "CHA HA SNOOP RESPONSE Events", "Defn": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.", "Desc": "Snoop Responses Received", "EvSel": 92, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.SNOOP_RESP_LOCAL": { "Box": "CHA", "Category": "CHA HA SNOOP RESPONSE Events", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 93, "ExtSel": "", }, "CHA.SNOOP_RESP_LOCAL.RSPSFWD": { "Box": "CHA", "Category": "CHA HA SNOOP RESPONSE Events", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 93, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.SNOOP_RESP_LOCAL.RSPIFWD": { "Box": "CHA", "Category": "CHA HA SNOOP RESPONSE Events", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 93, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.SNOOP_RESP_LOCAL.RSPFWD": { "Box": "CHA", "Category": "CHA HA SNOOP RESPONSE Events", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 93, "ExtSel": "", "Umask": "b1xxxxxxx", }, "CHA.SNOOP_RESP_LOCAL.RSP_WB": { "Box": "CHA", "Category": "CHA HA SNOOP RESPONSE Events", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 93, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.SNOOP_RESP_LOCAL.RSP_FWD_WB": { "Box": "CHA", "Category": "CHA HA SNOOP RESPONSE Events", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 93, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.SNOOP_RESP_LOCAL.RSPI": { "Box": "CHA", "Category": "CHA HA SNOOP RESPONSE Events", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 93, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.SNOOP_RESP_LOCAL.RSPS": { "Box": "CHA", "Category": "CHA HA SNOOP RESPONSE Events", "Defn": "Number of snoop responses received for a Local request", "Desc": "Snoop Responses Received Local", "EvSel": 93, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.STALL_NO_TxR_HORZ_CRD_AD_AG0": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", }, "CHA.STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR0": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR3": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR4": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR1": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR5": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR2": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent0 Transgress Credits", "EvSel": 208, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.STALL_NO_TxR_HORZ_CRD_AD_AG1": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", }, "CHA.STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR5": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR2": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR1": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR3": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR4": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR0": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No AD Agent1 Transgress Credits", "EvSel": 210, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.STALL_NO_TxR_HORZ_CRD_BL_AG0": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", }, "CHA.STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR5": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR2": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR1": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR4": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR3": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR0": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent0 Transgress Credits", "EvSel": 212, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.STALL_NO_TxR_HORZ_CRD_BL_AG1": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", }, "CHA.STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR3": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR4": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR0": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR2": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR5": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR1": { "Box": "CHA", "Category": "CHA CMS Transgress Credit Events", "Defn": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.", "Desc": "Stall on No BL Agent1 Transgress Credits", "EvSel": 214, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.TOR_INSERTS": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", }, "CHA.TOR_INSERTS.EVICT": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "bxxxxxx1x", }, "CHA.TOR_INSERTS.IO_MISS": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00100100", }, "CHA.TOR_INSERTS.ALL_MISS": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00100101", }, "CHA.TOR_INSERTS.IA": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00110001", }, "CHA.TOR_INSERTS.IPQ": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "bxxxx1xxx", }, "CHA.TOR_INSERTS.HIT": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "bxxx1xxxx", }, "CHA.TOR_INSERTS.IA_HIT": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00010001", }, "CHA.TOR_INSERTS.IO": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00110100", }, "CHA.TOR_INSERTS.IA_MISS": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00100001", }, "CHA.TOR_INSERTS.IO_HIT": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00010100", }, "CHA.TOR_INSERTS.MISS": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "bxx1xxxxx", }, "CHA.TOR_INSERTS.ALL_HIT": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00010101", }, "CHA.TOR_INSERTS.IRQ": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "bxxxxxxx1", }, "CHA.TOR_INSERTS.ALL_IO_IA": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00110101", }, "CHA.TOR_INSERTS.PRQ": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": "0-3", "Defn": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent.", "Desc": "TOR Inserts", "EvSel": 53, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "bxxxxx1xx", }, "CHA.TOR_OCCUPANCY": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", }, "CHA.TOR_OCCUPANCY.IO_MISS": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00100100", }, "CHA.TOR_OCCUPANCY.EVICT": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "bxxxxxx1x", }, "CHA.TOR_OCCUPANCY.ALL_MISS": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00100111", }, "CHA.TOR_OCCUPANCY.HIT": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "bxxx1xxxx", }, "CHA.TOR_OCCUPANCY.IA_HIT": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00010001", }, "CHA.TOR_OCCUPANCY.IA": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00110001", }, "CHA.TOR_OCCUPANCY.IPQ": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "bxxxx1xxx", }, "CHA.TOR_OCCUPANCY.IO_HIT": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00010100", }, "CHA.TOR_OCCUPANCY.MISS": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "bxx1xxxxx", }, "CHA.TOR_OCCUPANCY.ALL_HIT": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00010111", }, "CHA.TOR_OCCUPANCY.IO": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00110100", }, "CHA.TOR_OCCUPANCY.IA_MISS": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00100001", }, "CHA.TOR_OCCUPANCY.ALL": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "b00110111", }, "CHA.TOR_OCCUPANCY.PRQ": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "bxxxxx1xx", }, "CHA.TOR_OCCUPANCY.IRQ": { "Box": "CHA", "Category": "CHA TOR Events", "Counters": 0, "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T", "Desc": "TOR Occupancy", "EvSel": 54, "ExtSel": "", "Notes": "HW does not strictly OR each subevent. The equation is roughly (IRQ|EVICT|PRQ|IPQ|RRQ|WBQ) & (HIT|MISS). Meaing it is necessary to set one of the queue bits before one can measure .HIT or .MISS. Also note this event is subect to CHA Filter1 which allows a user to opcode match against TOR entries, distinguish those requests satsified locally vs. those that came from a remote node, etc.", "Umask": "bxxxxxxx1", }, "CHA.TxR_HORZ_ADS_USED": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal ADS Used", "EvSel": 157, "ExtSel": "", }, "CHA.TxR_HORZ_ADS_USED.BL_BNC": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal ADS Used", "EvSel": 157, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.TxR_HORZ_ADS_USED.AK_BNC": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal ADS Used", "EvSel": 157, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.TxR_HORZ_ADS_USED.AD_BNC": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal ADS Used", "EvSel": 157, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.TxR_HORZ_ADS_USED.BL_CRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal ADS Used", "EvSel": 157, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.TxR_HORZ_ADS_USED.AD_CRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal ADS Used", "EvSel": 157, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.TxR_HORZ_BYPASS": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 159, "ExtSel": "", }, "CHA.TxR_HORZ_BYPASS.AD_CRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 159, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.TxR_HORZ_BYPASS.IV_BNC": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 159, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.TxR_HORZ_BYPASS.BL_CRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 159, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.TxR_HORZ_BYPASS.AK_BNC": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 159, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.TxR_HORZ_BYPASS.AD_BNC": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 159, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.TxR_HORZ_BYPASS.BL_BNC": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Horizontal Bypass Used", "EvSel": 159, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.TxR_HORZ_CYCLES_FULL": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 150, "ExtSel": "", }, "CHA.TxR_HORZ_CYCLES_FULL.BL_BNC": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 150, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.TxR_HORZ_CYCLES_FULL.IV_BNC": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 150, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.TxR_HORZ_CYCLES_FULL.BL_CRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 150, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.TxR_HORZ_CYCLES_FULL.AD_CRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 150, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.TxR_HORZ_CYCLES_FULL.AD_BNC": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 150, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.TxR_HORZ_CYCLES_FULL.AK_BNC": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Full", "EvSel": 150, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.TxR_HORZ_CYCLES_NE": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 151, "ExtSel": "", }, "CHA.TxR_HORZ_CYCLES_NE.AD_CRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 151, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.TxR_HORZ_CYCLES_NE.IV_BNC": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 151, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.TxR_HORZ_CYCLES_NE.BL_CRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 151, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.TxR_HORZ_CYCLES_NE.AD_BNC": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 151, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.TxR_HORZ_CYCLES_NE.AK_BNC": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 151, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.TxR_HORZ_CYCLES_NE.BL_BNC": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "Cycles CMS Horizontal Egress Queue is Not Empty", "EvSel": 151, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.TxR_HORZ_INSERTS": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 149, "ExtSel": "", }, "CHA.TxR_HORZ_INSERTS.BL_BNC": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 149, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.TxR_HORZ_INSERTS.AD_BNC": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 149, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.TxR_HORZ_INSERTS.AK_BNC": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 149, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.TxR_HORZ_INSERTS.AD_CRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 149, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.TxR_HORZ_INSERTS.BL_CRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 149, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.TxR_HORZ_INSERTS.IV_BNC": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Inserts", "EvSel": 149, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.TxR_HORZ_NACK": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 153, "ExtSel": "", }, "CHA.TxR_HORZ_NACK.BL_BNC": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 153, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.TxR_HORZ_NACK.AK_BNC": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 153, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.TxR_HORZ_NACK.AD_BNC": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 153, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.TxR_HORZ_NACK.BL_CRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 153, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.TxR_HORZ_NACK.IV_BNC": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 153, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.TxR_HORZ_NACK.AD_CRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Horizontal Ring", "Desc": "CMS Horizontal Egress NACKs", "EvSel": 153, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.TxR_HORZ_OCCUPANCY": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 148, "ExtSel": "", }, "CHA.TxR_HORZ_OCCUPANCY.BL_BNC": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 148, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.TxR_HORZ_OCCUPANCY.AD_BNC": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 148, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.TxR_HORZ_OCCUPANCY.AK_BNC": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 148, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.TxR_HORZ_OCCUPANCY.IV_BNC": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 148, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.TxR_HORZ_OCCUPANCY.BL_CRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 148, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.TxR_HORZ_OCCUPANCY.AD_CRD": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.", "Desc": "CMS Horizontal Egress Occupancy", "EvSel": 148, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.TxR_HORZ_STARVED": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.", "Desc": "CMS Horizontal Egress Injection Starvation", "EvSel": 155, "ExtSel": "", }, "CHA.TxR_HORZ_STARVED.BL_BNC": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.", "Desc": "CMS Horizontal Egress Injection Starvation", "EvSel": 155, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.TxR_HORZ_STARVED.AK_BNC": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.", "Desc": "CMS Horizontal Egress Injection Starvation", "EvSel": 155, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.TxR_HORZ_STARVED.AD_BNC": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.", "Desc": "CMS Horizontal Egress Injection Starvation", "EvSel": 155, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.TxR_HORZ_STARVED.IV_BNC": { "Box": "CHA", "Category": "CHA CMS Horizontal EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.", "Desc": "CMS Horizontal Egress Injection Starvation", "EvSel": 155, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.TxR_VERT_ADS_USED": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 156, "ExtSel": "", }, "CHA.TxR_VERT_ADS_USED.AK_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 156, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.TxR_VERT_ADS_USED.AK_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 156, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.TxR_VERT_ADS_USED.AD_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 156, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.TxR_VERT_ADS_USED.BL_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 156, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.TxR_VERT_ADS_USED.BL_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 156, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.TxR_VERT_ADS_USED.AD_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 156, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.TxR_VERT_BYPASS": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 158, "ExtSel": "", }, "CHA.TxR_VERT_BYPASS.BL_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 158, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.TxR_VERT_BYPASS.BL_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 158, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.TxR_VERT_BYPASS.AD_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 158, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.TxR_VERT_BYPASS.AD_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 158, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.TxR_VERT_BYPASS.AK_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 158, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.TxR_VERT_BYPASS.AK_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 158, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.TxR_VERT_BYPASS.IV": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.", "Desc": "CMS Vertical ADS Used", "EvSel": 158, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.TxR_VERT_CYCLES_FULL": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 146, "ExtSel": "", }, "CHA.TxR_VERT_CYCLES_FULL.BL_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 146, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.TxR_VERT_CYCLES_FULL.BL_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 146, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.TxR_VERT_CYCLES_FULL.AD_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 146, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.TxR_VERT_CYCLES_FULL.AD_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 146, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.TxR_VERT_CYCLES_FULL.AK_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 146, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.TxR_VERT_CYCLES_FULL.AK_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 146, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.TxR_VERT_CYCLES_FULL.IV": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Full", "EvSel": 146, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.TxR_VERT_CYCLES_NE": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 147, "ExtSel": "", }, "CHA.TxR_VERT_CYCLES_NE.AD_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 147, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.TxR_VERT_CYCLES_NE.AK_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 147, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.TxR_VERT_CYCLES_NE.AK_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 147, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.TxR_VERT_CYCLES_NE.IV": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 147, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.TxR_VERT_CYCLES_NE.BL_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 147, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.TxR_VERT_CYCLES_NE.BL_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 147, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.TxR_VERT_CYCLES_NE.AD_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "Cycles CMS Vertical Egress Queue Is Not Empty", "EvSel": 147, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.TxR_VERT_INSERTS": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 145, "ExtSel": "", }, "CHA.TxR_VERT_INSERTS.AK_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 145, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.TxR_VERT_INSERTS.AD_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 145, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.TxR_VERT_INSERTS.AK_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 145, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.TxR_VERT_INSERTS.IV": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 145, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.TxR_VERT_INSERTS.BL_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 145, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.TxR_VERT_INSERTS.BL_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 145, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.TxR_VERT_INSERTS.AD_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Allocations", "EvSel": 145, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.TxR_VERT_NACK": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", }, "CHA.TxR_VERT_NACK.BL_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.TxR_VERT_NACK.AD_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.TxR_VERT_NACK.BL_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.TxR_VERT_NACK.IV": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.TxR_VERT_NACK.AK_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.TxR_VERT_NACK.AD_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.TxR_VERT_NACK.AK_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Counts number of Egress packets NACK'ed on to the Vertical Ring", "Desc": "CMS Vertical Egress NACKs", "EvSel": 152, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.TxR_VERT_OCCUPANCY": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", }, "CHA.TxR_VERT_OCCUPANCY.AD_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.TxR_VERT_OCCUPANCY.AK_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.TxR_VERT_OCCUPANCY.AK_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.TxR_VERT_OCCUPANCY.IV": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.TxR_VERT_OCCUPANCY.BL_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.TxR_VERT_OCCUPANCY.AD_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.TxR_VERT_OCCUPANCY.BL_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.", "Desc": "CMS Vert Egress Occupancy", "EvSel": 144, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.TxR_VERT_STARVED": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", }, "CHA.TxR_VERT_STARVED.AK_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.TxR_VERT_STARVED.AD_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.TxR_VERT_STARVED.AK_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.TxR_VERT_STARVED.IV": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.TxR_VERT_STARVED.BL_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.TxR_VERT_STARVED.AD_AG0": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.TxR_VERT_STARVED.BL_AG1": { "Box": "CHA", "Category": "CHA CMS Vertical EGRESS Events", "Defn": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.", "Desc": "CMS Vertical Egress Injection Starvation", "EvSel": 154, "ExtSel": "", "Umask": "bx1xxxxxx", }, "CHA.VERT_RING_AD_IN_USE": { "Box": "CHA", "Category": "CHA Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AD Ring In Use", "EvSel": 166, "ExtSel": "", }, "CHA.VERT_RING_AD_IN_USE.UP_EVEN": { "Box": "CHA", "Category": "CHA Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AD Ring In Use", "EvSel": 166, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.VERT_RING_AD_IN_USE.DN_ODD": { "Box": "CHA", "Category": "CHA Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AD Ring In Use", "EvSel": 166, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.VERT_RING_AD_IN_USE.UP_ODD": { "Box": "CHA", "Category": "CHA Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AD Ring In Use", "EvSel": 166, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.VERT_RING_AD_IN_USE.DN_EVEN": { "Box": "CHA", "Category": "CHA Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AD Ring In Use", "EvSel": 166, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.VERT_RING_AK_IN_USE": { "Box": "CHA", "Category": "CHA Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AK Ring In Use", "EvSel": 168, "ExtSel": "", }, "CHA.VERT_RING_AK_IN_USE.UP_EVEN": { "Box": "CHA", "Category": "CHA Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AK Ring In Use", "EvSel": 168, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.VERT_RING_AK_IN_USE.DN_ODD": { "Box": "CHA", "Category": "CHA Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AK Ring In Use", "EvSel": 168, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.VERT_RING_AK_IN_USE.UP_ODD": { "Box": "CHA", "Category": "CHA Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AK Ring In Use", "EvSel": 168, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.VERT_RING_AK_IN_USE.DN_EVEN": { "Box": "CHA", "Category": "CHA Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical AK Ring In Use", "EvSel": 168, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.VERT_RING_BL_IN_USE": { "Box": "CHA", "Category": "CHA Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical BL Ring in Use", "EvSel": 170, "ExtSel": "", }, "CHA.VERT_RING_BL_IN_USE.UP_EVEN": { "Box": "CHA", "Category": "CHA Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical BL Ring in Use", "EvSel": 170, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.VERT_RING_BL_IN_USE.DN_ODD": { "Box": "CHA", "Category": "CHA Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical BL Ring in Use", "EvSel": 170, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.VERT_RING_BL_IN_USE.UP_ODD": { "Box": "CHA", "Category": "CHA Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical BL Ring in Use", "EvSel": 170, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.VERT_RING_BL_IN_USE.DN_EVEN": { "Box": "CHA", "Category": "CHA Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.", "Desc": "Vertical BL Ring in Use", "EvSel": 170, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.VERT_RING_IV_IN_USE": { "Box": "CHA", "Category": "CHA Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "Vertical IV Ring in Use", "EvSel": 172, "ExtSel": "", }, "CHA.VERT_RING_IV_IN_USE.DN": { "Box": "CHA", "Category": "CHA Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "Vertical IV Ring in Use", "EvSel": 172, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.VERT_RING_IV_IN_USE.UP": { "Box": "CHA", "Category": "CHA Vertical In Use RING Events", "Defn": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DN_EVEN. To monitor the \"Odd\" ring, they should select both UP_ODD and DN_ODD.", "Desc": "Vertical IV Ring in Use", "EvSel": 172, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.WB_PUSH_MTOI": { "Box": "CHA", "Category": "CHA HA WBPUSHMTOI Events", "Defn": "Counts the number of times when the CHA was received WbPushMtoI", "Desc": "WbPushMtoI", "EvSel": 86, "ExtSel": "", }, "CHA.WB_PUSH_MTOI.LLC": { "Box": "CHA", "Category": "CHA HA WBPUSHMTOI Events", "Defn": "Counts the number of times when the CHA was received WbPushMtoI", "Desc": "WbPushMtoI", "EvSel": 86, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.WB_PUSH_MTOI.MEM": { "Box": "CHA", "Category": "CHA HA WBPUSHMTOI Events", "Defn": "Counts the number of times when the CHA was received WbPushMtoI", "Desc": "WbPushMtoI", "EvSel": 86, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.WRITE_NO_CREDITS": { "Box": "CHA", "Category": "CHA HA READ WRITE Events", "Defn": "Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue.", "Desc": "CHA iMC CHNx WRITE Credits Empty", "EvSel": 90, "ExtSel": "", }, "CHA.WRITE_NO_CREDITS.MC1_SMI1": { "Box": "CHA", "Category": "CHA HA READ WRITE Events", "Defn": "Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue.", "Desc": "CHA iMC CHNx WRITE Credits Empty", "EvSel": 90, "ExtSel": "", "Umask": "bxxxxxx1x", }, "CHA.WRITE_NO_CREDITS.EDC2_SMI4": { "Box": "CHA", "Category": "CHA HA READ WRITE Events", "Defn": "Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue.", "Desc": "CHA iMC CHNx WRITE Credits Empty", "EvSel": 90, "ExtSel": "", "Umask": "bxxx1xxxx", }, "CHA.WRITE_NO_CREDITS.EDC0_SMI2": { "Box": "CHA", "Category": "CHA HA READ WRITE Events", "Defn": "Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue.", "Desc": "CHA iMC CHNx WRITE Credits Empty", "EvSel": 90, "ExtSel": "", "Umask": "bxxxxx1xx", }, "CHA.WRITE_NO_CREDITS.MC0_SMI0": { "Box": "CHA", "Category": "CHA HA READ WRITE Events", "Defn": "Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue.", "Desc": "CHA iMC CHNx WRITE Credits Empty", "EvSel": 90, "ExtSel": "", "Umask": "bxxxxxxx1", }, "CHA.WRITE_NO_CREDITS.EDC1_SMI3": { "Box": "CHA", "Category": "CHA HA READ WRITE Events", "Defn": "Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue.", "Desc": "CHA iMC CHNx WRITE Credits Empty", "EvSel": 90, "ExtSel": "", "Umask": "bxxxx1xxx", }, "CHA.WRITE_NO_CREDITS.EDC3_SMI5": { "Box": "CHA", "Category": "CHA HA READ WRITE Events", "Defn": "Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue.", "Desc": "CHA iMC CHNx WRITE Credits Empty", "EvSel": 90, "ExtSel": "", "Umask": "bxx1xxxxx", }, "CHA.XSNP_RESP": { "Box": "CHA", "Category": "CHA ISMQ Events", "Counters": "0-3", "Defn": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.", "Desc": "Core Cross Snoop Responses", "EvSel": 50, "ExtSel": "", }, "CHA.XSNP_RESP.EXT_RSPI_FWDM": { "Box": "CHA", "Category": "CHA ISMQ Events", "Counters": "0-3", "Defn": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.", "Desc": "Core Cross Snoop Responses", "EvSel": 50, "ExtSel": "", "Umask": "b00110000", }, "CHA.XSNP_RESP.ANY_RSP_HITFSE": { "Box": "CHA", "Category": "CHA ISMQ Events", "Counters": "0-3", "Defn": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.", "Desc": "Core Cross Snoop Responses", "EvSel": 50, "ExtSel": "", "Umask": "b11100001", }, "CHA.XSNP_RESP.CORE_RSPS_FWDFE": { "Box": "CHA", "Category": "CHA ISMQ Events", "Counters": "0-3", "Defn": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.", "Desc": "Core Cross Snoop Responses", "EvSel": 50, "ExtSel": "", "Umask": "b01000010", }, "CHA.XSNP_RESP.EVICT_RSPS_FWDFE": { "Box": "CHA", "Category": "CHA ISMQ Events", "Counters": "0-3", "Defn": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.", "Desc": "Core Cross Snoop Responses", "EvSel": 50, "ExtSel": "", "Umask": "b10000010", }, "CHA.XSNP_RESP.CORE_RSPI_FWDFE": { "Box": "CHA", "Category": "CHA ISMQ Events", "Counters": "0-3", "Defn": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.", "Desc": "Core Cross Snoop Responses", "EvSel": 50, "ExtSel": "", "Umask": "b01000100", }, "CHA.XSNP_RESP.EVICT_RSP_HITFSE": { "Box": "CHA", "Category": "CHA ISMQ Events", "Counters": "0-3", "Defn": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.", "Desc": "Core Cross Snoop Responses", "EvSel": 50, "ExtSel": "", "Umask": "b10000001", }, "CHA.XSNP_RESP.CORE_RSPI_FWDM": { "Box": "CHA", "Category": "CHA ISMQ Events", "Counters": "0-3", "Defn": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.", "Desc": "Core Cross Snoop Responses", "EvSel": 50, "ExtSel": "", "Umask": "b01010000", }, "CHA.XSNP_RESP.EXT_RSPS_FWDFE": { "Box": "CHA", "Category": "CHA ISMQ Events", "Counters": "0-3", "Defn": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.", "Desc": "Core Cross Snoop Responses", "EvSel": 50, "ExtSel": "", "Umask": "b00100010", }, "CHA.XSNP_RESP.EXT_RSPS_FWDM": { "Box": "CHA", "Category": "CHA ISMQ Events", "Counters": "0-3", "Defn": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.", "Desc": "Core Cross Snoop Responses", "EvSel": 50, "ExtSel": "", "Umask": "b00101000", }, "CHA.XSNP_RESP.EVICT_RSPI_FWDM": { "Box": "CHA", "Category": "CHA ISMQ Events", "Counters": "0-3", "Defn": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.", "Desc": "Core Cross Snoop Responses", "EvSel": 50, "ExtSel": "", "Umask": "b10010000", }, "CHA.XSNP_RESP.CORE_RSP_HITFSE": { "Box": "CHA", "Category": "CHA ISMQ Events", "Counters": "0-3", "Defn": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.", "Desc": "Core Cross Snoop Responses", "EvSel": 50, "ExtSel": "", "Umask": "b01000001", }, "CHA.XSNP_RESP.EXT_RSP_HITFSE": { "Box": "CHA", "Category": "CHA ISMQ Events", "Counters": "0-3", "Defn": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.", "Desc": "Core Cross Snoop Responses", "EvSel": 50, "ExtSel": "", "Umask": "b00100001", }, "CHA.XSNP_RESP.ANY_RSPS_FWDFE": { "Box": "CHA", "Category": "CHA ISMQ Events", "Counters": "0-3", "Defn": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.", "Desc": "Core Cross Snoop Responses", "EvSel": 50, "ExtSel": "", "Umask": "b11100010", }, "CHA.XSNP_RESP.EVICT_RSPS_FWDM": { "Box": "CHA", "Category": "CHA ISMQ Events", "Counters": "0-3", "Defn": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.", "Desc": "Core Cross Snoop Responses", "EvSel": 50, "ExtSel": "", "Umask": "b10001000", }, "CHA.XSNP_RESP.EXT_RSPI_FWDFE": { "Box": "CHA", "Category": "CHA ISMQ Events", "Counters": "0-3", "Defn": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.", "Desc": "Core Cross Snoop Responses", "EvSel": 50, "ExtSel": "", "Umask": "b00100100", }, "CHA.XSNP_RESP.ANY_RSPI_FWDFE": { "Box": "CHA", "Category": "CHA ISMQ Events", "Counters": "0-3", "Defn": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.", "Desc": "Core Cross Snoop Responses", "EvSel": 50, "ExtSel": "", "Umask": "b11100100", }, "CHA.XSNP_RESP.ANY_RSPS_FWDM": { "Box": "CHA", "Category": "CHA ISMQ Events", "Counters": "0-3", "Defn": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.", "Desc": "Core Cross Snoop Responses", "EvSel": 50, "ExtSel": "", "Umask": "b11101000", }, "CHA.XSNP_RESP.EVICT_RSPI_FWDFE": { "Box": "CHA", "Category": "CHA ISMQ Events", "Counters": "0-3", "Defn": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.", "Desc": "Core Cross Snoop Responses", "EvSel": 50, "ExtSel": "", "Umask": "b10000100", }, "CHA.XSNP_RESP.CORE_RSPS_FWDM": { "Box": "CHA", "Category": "CHA ISMQ Events", "Counters": "0-3", "Defn": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.", "Desc": "Core Cross Snoop Responses", "EvSel": 50, "ExtSel": "", "Umask": "b01001000", }, # UBOX: "UBOX.EVENT_MSG": { "Box": "UBOX", "Category": "UBOX EVENT_MSG Events", "Counters": "0-1", "Defn": "Virtual Logical Wire (legacy) message were received from Uncore.", "Desc": "Message Received", "EvSel": 66, "ExtSel": "", }, "UBOX.EVENT_MSG.VLW_RCVD": { "Box": "UBOX", "Category": "UBOX EVENT_MSG Events", "Counters": "0-1", "Defn": "Virtual Logical Wire (legacy) message were received from Uncore.", "Desc": "Message Received", "EvSel": 66, "ExtSel": "", "Umask": "bxxxxxxx1", }, "UBOX.EVENT_MSG.MSI_RCVD": { "Box": "UBOX", "Category": "UBOX EVENT_MSG Events", "Counters": "0-1", "Defn": "Virtual Logical Wire (legacy) message were received from Uncore.", "Desc": "Message Received", "EvSel": 66, "ExtSel": "", "Umask": "bxxxxxx1x", }, "UBOX.EVENT_MSG.DOORBELL_RCVD": { "Box": "UBOX", "Category": "UBOX EVENT_MSG Events", "Counters": "0-1", "Defn": "Virtual Logical Wire (legacy) message were received from Uncore.", "Desc": "Message Received", "EvSel": 66, "ExtSel": "", "Umask": "bxxxx1xxx", }, "UBOX.EVENT_MSG.IPI_RCVD": { "Box": "UBOX", "Category": "UBOX EVENT_MSG Events", "Counters": "0-1", "Defn": "Virtual Logical Wire (legacy) message were received from Uncore.", "Desc": "Message Received", "EvSel": 66, "ExtSel": "", "Umask": "bxxxxx1xx", }, "UBOX.EVENT_MSG.INT_PRIO": { "Box": "UBOX", "Category": "UBOX EVENT_MSG Events", "Counters": "0-1", "Defn": "Virtual Logical Wire (legacy) message were received from Uncore.", "Desc": "Message Received", "EvSel": 66, "ExtSel": "", "Umask": "bxxx1xxxx", }, "UBOX.LOCK_CYCLES": { "Box": "UBOX", "Category": "UBOX LOCK Events", "Counters": "0-1", "Defn": "Number of times an IDI Lock/SplitLock sequence was started", "Desc": "IDI Lock/SplitLock Cycles", "EvSel": 68, "ExtSel": "", }, "UBOX.PHOLD_CYCLES": { "Box": "UBOX", "Category": "UBOX PHOLD Events", "Counters": "0-1", "Defn": "PHOLD cycles.", "Desc": "Cycles PHOLD Assert to Ack", "EvSel": 69, "ExtSel": "", }, "UBOX.PHOLD_CYCLES.ASSERT_TO_ACK": { "Box": "UBOX", "Category": "UBOX PHOLD Events", "Counters": "0-1", "Defn": "PHOLD cycles.", "Desc": "Cycles PHOLD Assert to Ack", "EvSel": 69, "ExtSel": "", "Umask": "bxxxxxxx1", }, "UBOX.RACU_DRNG": { "Box": "UBOX", "Category": "UBOX RACU Events", "EvSel": 76, "ExtSel": "", }, "UBOX.RACU_DRNG.RDSEED": { "Box": "UBOX", "Category": "UBOX RACU Events", "EvSel": 76, "ExtSel": "", "Umask": "bxxxxxx1x", }, "UBOX.RACU_DRNG.PFTCH_BUF_EMPTY": { "Box": "UBOX", "Category": "UBOX RACU Events", "EvSel": 76, "ExtSel": "", "Umask": "bxxxxx1xx", }, "UBOX.RACU_DRNG.RDRAND": { "Box": "UBOX", "Category": "UBOX RACU Events", "EvSel": 76, "ExtSel": "", "Umask": "bxxxxxxx1", }, "UBOX.RACU_REQUESTS": { "Box": "UBOX", "Category": "UBOX RACU Events", "Counters": "0-1", "Defn": "Number outstanding register requests within message channel tracker", "Desc": "RACU Request", "EvSel": 70, "ExtSel": "", }, # UPI_LL: "UPI_LL.CLOCKTICKS": { "Box": "UPI_LL", "Category": "UPI_LL CFCLK Events", "Counters": "0-3", "Defn": "Counts the number of clocks in the UPI LL. This clock runs at 1/8th the \"GT/s\" speed of the UPI link. For example, a 8GT/s link will have qfclk or 1GHz. Current products do not support dynamic link speeds, so this frequency is fixed.", "Desc": "Number of kfclks", "EvSel": 1, "ExtSel": "", }, "UPI_LL.DIRECT_ATTEMPTS": { "Box": "UPI_LL", "Category": "UPI_LL DIRECT2CORE Events", "Counters": "0-3", "Defn": "Counts the number of Data Response(DRS) packets UPI attempted to send directly to the core or to a different UPI link. Note: This only counts attempts on valid candidates such as DRS packets destined for CHAs.", "Desc": "Direct packet attempts", "EvSel": 18, "ExtSel": "", }, "UPI_LL.DIRECT_ATTEMPTS.D2U": { "Box": "UPI_LL", "Category": "UPI_LL DIRECT2CORE Events", "Counters": "0-3", "Defn": "Counts the number of Data Response(DRS) packets UPI attempted to send directly to the core or to a different UPI link. Note: This only counts attempts on valid candidates such as DRS packets destined for CHAs.", "Desc": "Direct packet attempts", "EvSel": 18, "ExtSel": "", "Umask": "bxxxxxx1x", }, "UPI_LL.DIRECT_ATTEMPTS.D2C": { "Box": "UPI_LL", "Category": "UPI_LL DIRECT2CORE Events", "Counters": "0-3", "Defn": "Counts the number of Data Response(DRS) packets UPI attempted to send directly to the core or to a different UPI link. Note: This only counts attempts on valid candidates such as DRS packets destined for CHAs.", "Desc": "Direct packet attempts", "EvSel": 18, "ExtSel": "", "Umask": "bxxxxxxx1", }, "UPI_LL.FLOWQ_NO_VNA_CRD": { "Box": "UPI_LL", "Category": "UPI_LL LL to M3 Events", "Counters": "0-3", "EvSel": 24, "ExtSel": "", }, "UPI_LL.FLOWQ_NO_VNA_CRD.AD_VNA_EQ2": { "Box": "UPI_LL", "Category": "UPI_LL LL to M3 Events", "Counters": "0-3", "EvSel": 24, "ExtSel": "", "Umask": "bxxxxx1xx", }, "UPI_LL.FLOWQ_NO_VNA_CRD.AK_VNA_EQ3": { "Box": "UPI_LL", "Category": "UPI_LL LL to M3 Events", "Counters": "0-3", "EvSel": 24, "ExtSel": "", "Umask": "b1xxxxxxx", }, "UPI_LL.FLOWQ_NO_VNA_CRD.BL_VNA_EQ0": { "Box": "UPI_LL", "Category": "UPI_LL LL to M3 Events", "Counters": "0-3", "EvSel": 24, "ExtSel": "", "Umask": "bxxxx1xxx", }, "UPI_LL.FLOWQ_NO_VNA_CRD.AK_VNA_EQ2": { "Box": "UPI_LL", "Category": "UPI_LL LL to M3 Events", "Counters": "0-3", "EvSel": 24, "ExtSel": "", "Umask": "bx1xxxxxx", }, "UPI_LL.FLOWQ_NO_VNA_CRD.AD_VNA_EQ1": { "Box": "UPI_LL", "Category": "UPI_LL LL to M3 Events", "Counters": "0-3", "EvSel": 24, "ExtSel": "", "Umask": "bxxxxxx1x", }, "UPI_LL.FLOWQ_NO_VNA_CRD.AK_VNA_EQ1": { "Box": "UPI_LL", "Category": "UPI_LL LL to M3 Events", "Counters": "0-3", "EvSel": 24, "ExtSel": "", "Umask": "bxx1xxxxx", }, "UPI_LL.FLOWQ_NO_VNA_CRD.AK_VNA_EQ0": { "Box": "UPI_LL", "Category": "UPI_LL LL to M3 Events", "Counters": "0-3", "EvSel": 24, "ExtSel": "", "Umask": "bxxx1xxxx", }, "UPI_LL.FLOWQ_NO_VNA_CRD.AD_VNA_EQ0": { "Box": "UPI_LL", "Category": "UPI_LL LL to M3 Events", "Counters": "0-3", "EvSel": 24, "ExtSel": "", "Umask": "bxxxxxxx1", }, "UPI_LL.L1_POWER_CYCLES": { "Box": "UPI_LL", "Category": "UPI_LL POWER Events", "Counters": "0-3", "Defn": "Number of UPI qfclk cycles spent in L1 power mode. L1 is a mode that totally shuts down a UPI link. Use edge detect to count the number of instances when the UPI link entered L1. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. Because L1 totally shuts down the link, it takes a good amount of time to exit this mode.", "Desc": "Cycles in L1", "EvSel": 33, "ExtSel": "", }, "UPI_LL.M3_BYP_BLOCKED": { "Box": "UPI_LL", "Category": "UPI_LL LL to M3 Events", "Counters": "0-3", "EvSel": 20, "ExtSel": "", }, "UPI_LL.M3_BYP_BLOCKED.BGF_CRD": { "Box": "UPI_LL", "Category": "UPI_LL LL to M3 Events", "Counters": "0-3", "EvSel": 20, "ExtSel": "", "Umask": "bxxxx1xxx", }, "UPI_LL.M3_BYP_BLOCKED.FLOWQ_AK_VNA_LE3": { "Box": "UPI_LL", "Category": "UPI_LL LL to M3 Events", "Counters": "0-3", "EvSel": 20, "ExtSel": "", "Umask": "bxxxxx1xx", }, "UPI_LL.M3_BYP_BLOCKED.FLOWQ_AD_VNA_LE2": { "Box": "UPI_LL", "Category": "UPI_LL LL to M3 Events", "Counters": "0-3", "EvSel": 20, "ExtSel": "", "Umask": "bxxxxxxx1", }, "UPI_LL.M3_BYP_BLOCKED.GV_BLOCK": { "Box": "UPI_LL", "Category": "UPI_LL LL to M3 Events", "Counters": "0-3", "EvSel": 20, "ExtSel": "", "Umask": "bxxx1xxxx", }, "UPI_LL.M3_BYP_BLOCKED.FLOWQ_BL_VNA_EQ0": { "Box": "UPI_LL", "Category": "UPI_LL LL to M3 Events", "Counters": "0-3", "EvSel": 20, "ExtSel": "", "Umask": "bxxxxxx1x", }, "UPI_LL.M3_CRD_RETURN_BLOCKED": { "Box": "UPI_LL", "Category": "UPI_LL LL to M3 Events", "Counters": "0-3", "EvSel": 22, "ExtSel": "", }, "UPI_LL.M3_RXQ_BLOCKED": { "Box": "UPI_LL", "Category": "UPI_LL LL to M3 Events", "Counters": "0-3", "EvSel": 21, "ExtSel": "", }, "UPI_LL.M3_RXQ_BLOCKED.FLOWQ_AK_VNA_LE3": { "Box": "UPI_LL", "Category": "UPI_LL LL to M3 Events", "Counters": "0-3", "EvSel": 21, "ExtSel": "", "Umask": "bxxx1xxxx", }, "UPI_LL.M3_RXQ_BLOCKED.FLOWQ_AD_VNA_BTW_2_THRESH": { "Box": "UPI_LL", "Category": "UPI_LL LL to M3 Events", "Counters": "0-3", "EvSel": 21, "ExtSel": "", "Umask": "bxxxxxx1x", }, "UPI_LL.M3_RXQ_BLOCKED.BGF_CRD": { "Box": "UPI_LL", "Category": "UPI_LL LL to M3 Events", "Counters": "0-3", "EvSel": 21, "ExtSel": "", "Umask": "bxx1xxxxx", }, "UPI_LL.M3_RXQ_BLOCKED.FLOWQ_BL_VNA_EQ0": { "Box": "UPI_LL", "Category": "UPI_LL LL to M3 Events", "Counters": "0-3", "EvSel": 21, "ExtSel": "", "Umask": "bxxxxx1xx", }, "UPI_LL.M3_RXQ_BLOCKED.FLOWQ_BL_VNA_BTW_0_THRESH": { "Box": "UPI_LL", "Category": "UPI_LL LL to M3 Events", "Counters": "0-3", "EvSel": 21, "ExtSel": "", "Umask": "bxxxx1xxx", }, "UPI_LL.M3_RXQ_BLOCKED.GV_BLOCK": { "Box": "UPI_LL", "Category": "UPI_LL LL to M3 Events", "Counters": "0-3", "EvSel": 21, "ExtSel": "", "Umask": "bx1xxxxxx", }, "UPI_LL.M3_RXQ_BLOCKED.FLOWQ_AD_VNA_LE2": { "Box": "UPI_LL", "Category": "UPI_LL LL to M3 Events", "Counters": "0-3", "EvSel": 21, "ExtSel": "", "Umask": "bxxxxxxx1", }, "UPI_LL.PHY_INIT_CYCLES": { "Box": "UPI_LL", "Category": "UPI_LL POWER Events", "Counters": "0-3", "Desc": "Cycles where phy is not in L0, L0c, L0p, L1", "EvSel": 32, "ExtSel": "", }, "UPI_LL.POWER_L1_NACK": { "Box": "UPI_LL", "Category": "UPI_LL POWER Events", "Counters": "0-3", "Defn": "Counts the number of times a link sends/receives a LinkReqNAck. When the UPI links would like to change power state, the Tx side initiates a request to the Rx side requesting to change states. This requests can either be accepted or denied. If the Rx side replies with an Ack, the power mode will change. If it replies with NAck, no change will take place. This can be filtered based on Rx and Tx. An Rx LinkReqNAck refers to receiving an NAck (meaning this agent's Tx originally requested the power change). A Tx LinkReqNAck refers to sending this command (meaning the peer agent's Tx originally requested the power change and this agent accepted it).", "Desc": "L1 Req Nack", "EvSel": 35, "ExtSel": "", "Notes": "L1 only", }, "UPI_LL.POWER_L1_REQ": { "Box": "UPI_LL", "Category": "UPI_LL POWER Events", "Counters": "0-3", "Defn": "Counts the number of times a link sends/receives a LinkReqAck. When the UPI links would like to change power state, the Tx side initiates a request to the Rx side requesting to change states. This requests can either be accepted or denied. If the Rx side replies with an Ack, the power mode will change. If it replies with NAck, no change will take place. This can be filtered based on Rx and Tx. An Rx LinkReqAck refers to receiving an Ack (meaning this agent's Tx originally requested the power change). A Tx LinkReqAck refers to sending this command (meaning the peer agent's Tx originally requested the power change and this agent accepted it).", "Desc": "L1 Req (same as L1 Ack).", "EvSel": 34, "ExtSel": "", "Notes": "L1 only", }, "UPI_LL.REQ_SLOT2_FROM_M3": { "Box": "UPI_LL", "Category": "UPI_LL VNA_CREDIT_RETURN Events", "Counters": "0-3", "EvSel": 70, "ExtSel": "", }, "UPI_LL.REQ_SLOT2_FROM_M3.VN0": { "Box": "UPI_LL", "Category": "UPI_LL VNA_CREDIT_RETURN Events", "Counters": "0-3", "EvSel": 70, "ExtSel": "", "Umask": "bxxxxxx1x", }, "UPI_LL.REQ_SLOT2_FROM_M3.VNA": { "Box": "UPI_LL", "Category": "UPI_LL VNA_CREDIT_RETURN Events", "Counters": "0-3", "EvSel": 70, "ExtSel": "", "Umask": "bxxxxxxx1", }, "UPI_LL.REQ_SLOT2_FROM_M3.ACK": { "Box": "UPI_LL", "Category": "UPI_LL VNA_CREDIT_RETURN Events", "Counters": "0-3", "EvSel": 70, "ExtSel": "", "Umask": "bxxxx1xxx", }, "UPI_LL.REQ_SLOT2_FROM_M3.VN1": { "Box": "UPI_LL", "Category": "UPI_LL VNA_CREDIT_RETURN Events", "Counters": "0-3", "EvSel": 70, "ExtSel": "", "Umask": "bxxxxx1xx", }, "UPI_LL.RxL0P_POWER_CYCLES": { "Box": "UPI_LL", "Category": "UPI_LL POWER_RX Events", "Counters": "0-3", "Defn": "Number of UPI qfclk cycles spent in L0p power mode. L0p is a mode where we disable 60% of the UPI lanes, decreasing our bandwidth in order to save power. It increases snoop and data transfer latencies and decreases overall bandwidth. This mode can be very useful in NUMA optimized workloads that largely only utilize UPI for snoops and their responses. Use edge detect to count the number of instances when the UPI link entered L0p. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another.", "Desc": "Cycles link in L0p. Receive side.", "EvSel": 37, "ExtSel": "", "Notes": "Using .edge_det to count transitions does not function if L1_POWER_CYCLES > 0.", }, "UPI_LL.RxL0_POWER_CYCLES": { "Box": "UPI_LL", "Category": "UPI_LL POWER_RX Events", "Counters": "0-3", "Defn": "Number of UPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.", "Desc": "Cycles in L0. Receive side.", "EvSel": 36, "ExtSel": "", }, "UPI_LL.RxL_BASIC_HDR_MATCH": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Desc": "Matches on Receive path of a UPI Port", "EvSel": 5, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "Notes": "This event is subject to finer grain filtering. See doc for more information. Filters available in the counter control's umask extention field b[55:32] - message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", }, "UPI_LL.RxL_BASIC_HDR_MATCH.RSP_NODATA": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Desc": "Matches on Receive path of a UPI Port", "EvSel": 5, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "Notes": "This event is subject to finer grain filtering. See doc for more information. Filters available in the counter control's umask extention field b[55:32] - message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bxxxx1010", "UmaskExt": 0x0, }, "UPI_LL.RxL_BASIC_HDR_MATCH.NCS": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Desc": "Matches on Receive path of a UPI Port", "EvSel": 5, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "Notes": "This event is subject to finer grain filtering. See doc for more information. Filters available in the counter control's umask extention field b[55:32] - message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bxxxx1111", "UmaskExt": 0x0, }, "UPI_LL.RxL_BASIC_HDR_MATCH.RSP_DATA_OPC": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Desc": "Matches on Receive path of a UPI Port", "EvSel": 5, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "Notes": "This event is subject to finer grain filtering. See doc for more information. Filters available in the counter control's umask extention field b[55:32] - message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bXXXX1100", "UmaskExt": 0x1, }, "UPI_LL.RxL_BASIC_HDR_MATCH.WB_OPC": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Desc": "Matches on Receive path of a UPI Port", "EvSel": 5, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "Notes": "This event is subject to finer grain filtering. See doc for more information. Filters available in the counter control's umask extention field b[55:32] - message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bxxxx1101", "UmaskExt": 0x1, }, "UPI_LL.RxL_BASIC_HDR_MATCH.REQ_OPC": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Desc": "Matches on Receive path of a UPI Port", "EvSel": 5, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "Notes": "This event is subject to finer grain filtering. See doc for more information. Filters available in the counter control's umask extention field b[55:32] - message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bXXXX1000", "UmaskExt": 0x1, }, "UPI_LL.RxL_BASIC_HDR_MATCH.RSPCNFLT": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Desc": "Matches on Receive path of a UPI Port", "EvSel": 5, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "Notes": "This event is subject to finer grain filtering. See doc for more information. Filters available in the counter control's umask extention field b[55:32] - message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "b10101010", "UmaskExt": 0x1, }, "UPI_LL.RxL_BASIC_HDR_MATCH.NCB_OPC": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Desc": "Matches on Receive path of a UPI Port", "EvSel": 5, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "Notes": "This event is subject to finer grain filtering. See doc for more information. Filters available in the counter control's umask extention field b[55:32] - message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bxxxx1110", "UmaskExt": 0x1, }, "UPI_LL.RxL_BASIC_HDR_MATCH.RSP_NODATA_OPC": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Desc": "Matches on Receive path of a UPI Port", "EvSel": 5, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "Notes": "This event is subject to finer grain filtering. See doc for more information. Filters available in the counter control's umask extention field b[55:32] - message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bXXXX1010", "UmaskExt": 0x1, }, "UPI_LL.RxL_BASIC_HDR_MATCH.NCB": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Desc": "Matches on Receive path of a UPI Port", "EvSel": 5, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "Notes": "This event is subject to finer grain filtering. See doc for more information. Filters available in the counter control's umask extention field b[55:32] - message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bxxxx1110", "UmaskExt": 0x0, }, "UPI_LL.RxL_BASIC_HDR_MATCH.SNP": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Desc": "Matches on Receive path of a UPI Port", "EvSel": 5, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "Notes": "This event is subject to finer grain filtering. See doc for more information. Filters available in the counter control's umask extention field b[55:32] - message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bxxxx1001", "UmaskExt": 0x0, }, "UPI_LL.RxL_BASIC_HDR_MATCH.REQ": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Desc": "Matches on Receive path of a UPI Port", "EvSel": 5, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "Notes": "This event is subject to finer grain filtering. See doc for more information. Filters available in the counter control's umask extention field b[55:32] - message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bxxxx1000", "UmaskExt": 0x0, }, "UPI_LL.RxL_BASIC_HDR_MATCH.NCS_OPC": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Desc": "Matches on Receive path of a UPI Port", "EvSel": 5, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "Notes": "This event is subject to finer grain filtering. See doc for more information. Filters available in the counter control's umask extention field b[55:32] - message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bxxxx1111", "UmaskExt": 0x1, }, "UPI_LL.RxL_BASIC_HDR_MATCH.SNP_OPC": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Desc": "Matches on Receive path of a UPI Port", "EvSel": 5, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "Notes": "This event is subject to finer grain filtering. See doc for more information. Filters available in the counter control's umask extention field b[55:32] - message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "b1001", "UmaskExt": 0x1, }, "UPI_LL.RxL_BASIC_HDR_MATCH.RSPI": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Desc": "Matches on Receive path of a UPI Port", "EvSel": 5, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "Notes": "This event is subject to finer grain filtering. See doc for more information. Filters available in the counter control's umask extention field b[55:32] - message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "b00101010", "UmaskExt": 0x1, }, "UPI_LL.RxL_BASIC_HDR_MATCH.WB": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Desc": "Matches on Receive path of a UPI Port", "EvSel": 5, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "Notes": "This event is subject to finer grain filtering. See doc for more information. Filters available in the counter control's umask extention field b[55:32] - message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bxxxx1101", "UmaskExt": 0x0, }, "UPI_LL.RxL_BASIC_HDR_MATCH.RSP_DATA": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Desc": "Matches on Receive path of a UPI Port", "EvSel": 5, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "Notes": "This event is subject to finer grain filtering. See doc for more information. Filters available in the counter control's umask extention field b[55:32] - message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bxxxx1100", "UmaskExt": 0x0, }, "UPI_LL.RxL_BYPASSED": { "Box": "UPI_LL", "Category": "UPI_LL RXQ Events", "Counters": "0-3", "Defn": "Counts the number of times that an incoming flit was able to bypass the flit buffer and pass directly and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of flits transfered, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.", "Desc": "RxQ Flit Buffer Bypassed", "EvSel": 49, "ExtSel": "", }, "UPI_LL.RxL_BYPASSED.SLOT1": { "Box": "UPI_LL", "Category": "UPI_LL RXQ Events", "Counters": "0-3", "Defn": "Counts the number of times that an incoming flit was able to bypass the flit buffer and pass directly and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of flits transfered, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.", "Desc": "RxQ Flit Buffer Bypassed", "EvSel": 49, "ExtSel": "", "Umask": "bxxxxxx1x", }, "UPI_LL.RxL_BYPASSED.SLOT0": { "Box": "UPI_LL", "Category": "UPI_LL RXQ Events", "Counters": "0-3", "Defn": "Counts the number of times that an incoming flit was able to bypass the flit buffer and pass directly and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of flits transfered, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.", "Desc": "RxQ Flit Buffer Bypassed", "EvSel": 49, "ExtSel": "", "Umask": "bxxxxxxx1", }, "UPI_LL.RxL_BYPASSED.SLOT2": { "Box": "UPI_LL", "Category": "UPI_LL RXQ Events", "Counters": "0-3", "Defn": "Counts the number of times that an incoming flit was able to bypass the flit buffer and pass directly and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of flits transfered, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.", "Desc": "RxQ Flit Buffer Bypassed", "EvSel": 49, "ExtSel": "", "Umask": "bxxxxx1xx", }, "UPI_LL.RxL_CREDITS_CONSUMED_VN0": { "Box": "UPI_LL", "Category": "UPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN0 Credit Consumed", "EvSel": 57, "ExtSel": "", }, "UPI_LL.RxL_CREDITS_CONSUMED_VN1": { "Box": "UPI_LL", "Category": "UPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VN1 Credit Consumed", "EvSel": 58, "ExtSel": "", }, "UPI_LL.RxL_CREDITS_CONSUMED_VNA": { "Box": "UPI_LL", "Category": "UPI_LL RX_CREDITS_CONSUMED Events", "Counters": "0-3", "Defn": "Counts the number of times that an RxQ VNA credit was consumed (i.e. message uses a VNA credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.", "Desc": "VNA Credit Consumed", "EvSel": 56, "ExtSel": "", }, "UPI_LL.RxL_FLITS": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Received", "EvSel": 3, "ExtSel": "", "Notes": "When Umask is set to all 1's then all Flits should be counted as 3 since a full flit is counted for each valid slot. By counting all legal flit time we exclude impact of L0p, L0c, and the 5/6 ratio in L0. Slot 0 Dual is counted in slot 0 and slot 1 (as a protocol header)", }, "UPI_LL.RxL_FLITS.NULL": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Received", "EvSel": 3, "ExtSel": "", "Notes": "When Umask is set to all 1's then all Flits should be counted as 3 since a full flit is counted for each valid slot. By counting all legal flit time we exclude impact of L0p, L0c, and the 5/6 ratio in L0. Slot 0 Dual is counted in slot 0 and slot 1 (as a protocol header)", "Umask": "bxx1xxxxx", }, "UPI_LL.RxL_FLITS.SLOT0": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Received", "EvSel": 3, "ExtSel": "", "Notes": "When Umask is set to all 1's then all Flits should be counted as 3 since a full flit is counted for each valid slot. By counting all legal flit time we exclude impact of L0p, L0c, and the 5/6 ratio in L0. Slot 0 Dual is counted in slot 0 and slot 1 (as a protocol header)", "Umask": "bxxxxxxx1", }, "UPI_LL.RxL_FLITS.ALL_NULL": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Received", "EvSel": 3, "ExtSel": "", "Notes": "When Umask is set to all 1's then all Flits should be counted as 3 since a full flit is counted for each valid slot. By counting all legal flit time we exclude impact of L0p, L0c, and the 5/6 ratio in L0. Slot 0 Dual is counted in slot 0 and slot 1 (as a protocol header)", "Umask": "b00100111", }, "UPI_LL.RxL_FLITS.NON_DATA": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Received", "EvSel": 3, "ExtSel": "", "Notes": "When Umask is set to all 1's then all Flits should be counted as 3 since a full flit is counted for each valid slot. By counting all legal flit time we exclude impact of L0p, L0c, and the 5/6 ratio in L0. Slot 0 Dual is counted in slot 0 and slot 1 (as a protocol header)", "Umask": "b10010111", }, "UPI_LL.RxL_FLITS.IDLE": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Received", "EvSel": 3, "ExtSel": "", "Notes": "When Umask is set to all 1's then all Flits should be counted as 3 since a full flit is counted for each valid slot. By counting all legal flit time we exclude impact of L0p, L0c, and the 5/6 ratio in L0. Slot 0 Dual is counted in slot 0 and slot 1 (as a protocol header)", "Umask": "b01000111", }, "UPI_LL.RxL_FLITS.LLCTRL": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Received", "EvSel": 3, "ExtSel": "", "Notes": "When Umask is set to all 1's then all Flits should be counted as 3 since a full flit is counted for each valid slot. By counting all legal flit time we exclude impact of L0p, L0c, and the 5/6 ratio in L0. Slot 0 Dual is counted in slot 0 and slot 1 (as a protocol header)", "Umask": "bx1xxxxxx", }, "UPI_LL.RxL_FLITS.ALL_DATA": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Received", "EvSel": 3, "ExtSel": "", "Notes": "When Umask is set to all 1's then all Flits should be counted as 3 since a full flit is counted for each valid slot. By counting all legal flit time we exclude impact of L0p, L0c, and the 5/6 ratio in L0. Slot 0 Dual is counted in slot 0 and slot 1 (as a protocol header)", "Umask": "b00001111", }, "UPI_LL.RxL_FLITS.SLOT1": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Received", "EvSel": 3, "ExtSel": "", "Notes": "When Umask is set to all 1's then all Flits should be counted as 3 since a full flit is counted for each valid slot. By counting all legal flit time we exclude impact of L0p, L0c, and the 5/6 ratio in L0. Slot 0 Dual is counted in slot 0 and slot 1 (as a protocol header)", "Umask": "bxxxxxx1x", }, "UPI_LL.RxL_FLITS.PROTHDR": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Received", "EvSel": 3, "ExtSel": "", "Notes": "When Umask is set to all 1's then all Flits should be counted as 3 since a full flit is counted for each valid slot. By counting all legal flit time we exclude impact of L0p, L0c, and the 5/6 ratio in L0. Slot 0 Dual is counted in slot 0 and slot 1 (as a protocol header)", "Umask": "b1xxxxxxx", }, "UPI_LL.RxL_FLITS.DATA": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Received", "EvSel": 3, "ExtSel": "", "Notes": "When Umask is set to all 1's then all Flits should be counted as 3 since a full flit is counted for each valid slot. By counting all legal flit time we exclude impact of L0p, L0c, and the 5/6 ratio in L0. Slot 0 Dual is counted in slot 0 and slot 1 (as a protocol header)", "Umask": "bxxxx1xxx", }, "UPI_LL.RxL_FLITS.LLCRD": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Received", "EvSel": 3, "ExtSel": "", "Notes": "When Umask is set to all 1's then all Flits should be counted as 3 since a full flit is counted for each valid slot. By counting all legal flit time we exclude impact of L0p, L0c, and the 5/6 ratio in L0. Slot 0 Dual is counted in slot 0 and slot 1 (as a protocol header)", "Umask": "bxxx1xxxx", }, "UPI_LL.RxL_FLITS.SLOT2": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Received", "EvSel": 3, "ExtSel": "", "Notes": "When Umask is set to all 1's then all Flits should be counted as 3 since a full flit is counted for each valid slot. By counting all legal flit time we exclude impact of L0p, L0c, and the 5/6 ratio in L0. Slot 0 Dual is counted in slot 0 and slot 1 (as a protocol header)", "Umask": "bxxxxx1xx", }, "UPI_LL.RxL_INSERTS": { "Box": "UPI_LL", "Category": "UPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.", "Desc": "RxQ Flit Buffer Allocations", "EvSel": 48, "ExtSel": "", }, "UPI_LL.RxL_INSERTS.SLOT2": { "Box": "UPI_LL", "Category": "UPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.", "Desc": "RxQ Flit Buffer Allocations", "EvSel": 48, "ExtSel": "", "Umask": "bxxxxx1xx", }, "UPI_LL.RxL_INSERTS.SLOT0": { "Box": "UPI_LL", "Category": "UPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.", "Desc": "RxQ Flit Buffer Allocations", "EvSel": 48, "ExtSel": "", "Umask": "bxxxxxxx1", }, "UPI_LL.RxL_INSERTS.SLOT1": { "Box": "UPI_LL", "Category": "UPI_LL RXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.", "Desc": "RxQ Flit Buffer Allocations", "EvSel": 48, "ExtSel": "", "Umask": "bxxxxxx1x", }, "UPI_LL.RxL_OCCUPANCY": { "Box": "UPI_LL", "Category": "UPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the UPI RxQ in each cycle. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.", "Desc": "RxQ Occupancy - All Packets", "EvSel": 50, "ExtSel": "", }, "UPI_LL.RxL_OCCUPANCY.SLOT1": { "Box": "UPI_LL", "Category": "UPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the UPI RxQ in each cycle. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.", "Desc": "RxQ Occupancy - All Packets", "EvSel": 50, "ExtSel": "", "Umask": "bxxxxxx1x", }, "UPI_LL.RxL_OCCUPANCY.SLOT0": { "Box": "UPI_LL", "Category": "UPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the UPI RxQ in each cycle. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.", "Desc": "RxQ Occupancy - All Packets", "EvSel": 50, "ExtSel": "", "Umask": "bxxxxxxx1", }, "UPI_LL.RxL_OCCUPANCY.SLOT2": { "Box": "UPI_LL", "Category": "UPI_LL RXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of elements in the UPI RxQ in each cycle. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.", "Desc": "RxQ Occupancy - All Packets", "EvSel": 50, "ExtSel": "", "Umask": "bxxxxx1xx", }, "UPI_LL.RxL_SLOT_BYPASS": { "Box": "UPI_LL", "Category": "UPI_LL RXQ Events", "Counters": "0-3", "EvSel": 51, "ExtSel": "", }, "UPI_LL.RxL_SLOT_BYPASS.S0_RXQ2": { "Box": "UPI_LL", "Category": "UPI_LL RXQ Events", "Counters": "0-3", "EvSel": 51, "ExtSel": "", "Umask": "bxxxxxx1x", }, "UPI_LL.RxL_SLOT_BYPASS.S0_RXQ1": { "Box": "UPI_LL", "Category": "UPI_LL RXQ Events", "Counters": "0-3", "EvSel": 51, "ExtSel": "", "Umask": "bxxxxxxx1", }, "UPI_LL.RxL_SLOT_BYPASS.S1_RXQ0": { "Box": "UPI_LL", "Category": "UPI_LL RXQ Events", "Counters": "0-3", "EvSel": 51, "ExtSel": "", "Umask": "bxxxxx1xx", }, "UPI_LL.RxL_SLOT_BYPASS.S2_RXQ1": { "Box": "UPI_LL", "Category": "UPI_LL RXQ Events", "Counters": "0-3", "EvSel": 51, "ExtSel": "", "Umask": "bxx1xxxxx", }, "UPI_LL.RxL_SLOT_BYPASS.S1_RXQ2": { "Box": "UPI_LL", "Category": "UPI_LL RXQ Events", "Counters": "0-3", "EvSel": 51, "ExtSel": "", "Umask": "bxxxx1xxx", }, "UPI_LL.RxL_SLOT_BYPASS.S2_RXQ0": { "Box": "UPI_LL", "Category": "UPI_LL RXQ Events", "Counters": "0-3", "EvSel": 51, "ExtSel": "", "Umask": "bxxx1xxxx", }, "UPI_LL.TxL0P_CLK_ACTIVE": { "Box": "UPI_LL", "Category": "UPI_LL POWER_TX Events", "Counters": "0-3", "EvSel": 42, "ExtSel": "", }, "UPI_LL.TxL0P_CLK_ACTIVE.RXQ_CRED": { "Box": "UPI_LL", "Category": "UPI_LL POWER_TX Events", "Counters": "0-3", "EvSel": 42, "ExtSel": "", "Umask": "bxxxx1xxx", }, "UPI_LL.TxL0P_CLK_ACTIVE.RXQ": { "Box": "UPI_LL", "Category": "UPI_LL POWER_TX Events", "Counters": "0-3", "EvSel": 42, "ExtSel": "", "Umask": "bxxxxxx1x", }, "UPI_LL.TxL0P_CLK_ACTIVE.RETRY": { "Box": "UPI_LL", "Category": "UPI_LL POWER_TX Events", "Counters": "0-3", "EvSel": 42, "ExtSel": "", "Umask": "bxx1xxxxx", }, "UPI_LL.TxL0P_CLK_ACTIVE.RXQ_BYPASS": { "Box": "UPI_LL", "Category": "UPI_LL POWER_TX Events", "Counters": "0-3", "EvSel": 42, "ExtSel": "", "Umask": "bxxxxx1xx", }, "UPI_LL.TxL0P_CLK_ACTIVE.DFX": { "Box": "UPI_LL", "Category": "UPI_LL POWER_TX Events", "Counters": "0-3", "EvSel": 42, "ExtSel": "", "Umask": "bx1xxxxxx", }, "UPI_LL.TxL0P_CLK_ACTIVE.CFG_CTL": { "Box": "UPI_LL", "Category": "UPI_LL POWER_TX Events", "Counters": "0-3", "EvSel": 42, "ExtSel": "", "Umask": "bxxxxxxx1", }, "UPI_LL.TxL0P_CLK_ACTIVE.SPARE": { "Box": "UPI_LL", "Category": "UPI_LL POWER_TX Events", "Counters": "0-3", "EvSel": 42, "ExtSel": "", "Umask": "b1xxxxxxx", }, "UPI_LL.TxL0P_CLK_ACTIVE.TXQ": { "Box": "UPI_LL", "Category": "UPI_LL POWER_TX Events", "Counters": "0-3", "EvSel": 42, "ExtSel": "", "Umask": "bxxx1xxxx", }, "UPI_LL.TxL0P_POWER_CYCLES": { "Box": "UPI_LL", "Category": "UPI_LL POWER_TX Events", "Counters": "0-3", "Defn": "Number of UPI qfclk cycles spent in L0p power mode. L0p is a mode where we disable 60% of the UPI lanes, decreasing our bandwidth in order to save power. It increases snoop and data transfer latencies and decreases overall bandwidth. This mode can be very useful in NUMA optimized workloads that largely only utilize UPI for snoops and their responses. Use edge detect to count the number of instances when the UPI link entered L0p. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another.", "Desc": "Cycles in L0p. Transmit side.", "EvSel": 39, "ExtSel": "", "Notes": "Using .edge_det to count transitions does not function if L1_POWER_CYCLES > 0.", }, "UPI_LL.TxL0P_POWER_CYCLES_LL_ENTER": { "Box": "UPI_LL", "Category": "UPI_LL POWER_TX Events", "Counters": "0-3", "EvSel": 40, "ExtSel": "", }, "UPI_LL.TxL0P_POWER_CYCLES_M3_EXIT": { "Box": "UPI_LL", "Category": "UPI_LL POWER_TX Events", "Counters": "0-3", "EvSel": 41, "ExtSel": "", }, "UPI_LL.TxL0_POWER_CYCLES": { "Box": "UPI_LL", "Category": "UPI_LL POWER_TX Events", "Counters": "0-3", "Defn": "Number of UPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.", "Desc": "Cycles in L0. Transmit side.", "EvSel": 38, "ExtSel": "", "Notes": "Includes L0p cycles. To get just L0, subtract TxL0P_POWER_CYCLES", }, "UPI_LL.TxL_BASIC_HDR_MATCH": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Desc": "Matches on Transmit path of a UPI Port", "EvSel": 4, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "Notes": "This event is subject to finer grain filtering. See doc for more information. Filters available in the counter control's umask extention field b[55:32] - message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", }, "UPI_LL.TxL_BASIC_HDR_MATCH.RSP_DATA": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Desc": "Matches on Transmit path of a UPI Port", "EvSel": 4, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "Notes": "This event is subject to finer grain filtering. See doc for more information. Filters available in the counter control's umask extention field b[55:32] - message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bxxxx1100", "UmaskExt": 0x0, }, "UPI_LL.TxL_BASIC_HDR_MATCH.WB": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Desc": "Matches on Transmit path of a UPI Port", "EvSel": 4, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "Notes": "This event is subject to finer grain filtering. See doc for more information. Filters available in the counter control's umask extention field b[55:32] - message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bxxxx1101", "UmaskExt": 0x0, }, "UPI_LL.TxL_BASIC_HDR_MATCH.RSPI": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Desc": "Matches on Transmit path of a UPI Port", "EvSel": 4, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "Notes": "This event is subject to finer grain filtering. See doc for more information. Filters available in the counter control's umask extention field b[55:32] - message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "b00101010", "UmaskExt": 0x1, }, "UPI_LL.TxL_BASIC_HDR_MATCH.SNP_OPC": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Desc": "Matches on Transmit path of a UPI Port", "EvSel": 4, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "Notes": "This event is subject to finer grain filtering. See doc for more information. Filters available in the counter control's umask extention field b[55:32] - message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bXXXX1001", "UmaskExt": 0x1, }, "UPI_LL.TxL_BASIC_HDR_MATCH.NCS_OPC": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Desc": "Matches on Transmit path of a UPI Port", "EvSel": 4, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "Notes": "This event is subject to finer grain filtering. See doc for more information. Filters available in the counter control's umask extention field b[55:32] - message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bxxxx1111", "UmaskExt": 0x1, }, "UPI_LL.TxL_BASIC_HDR_MATCH.REQ": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Desc": "Matches on Transmit path of a UPI Port", "EvSel": 4, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "Notes": "This event is subject to finer grain filtering. See doc for more information. Filters available in the counter control's umask extention field b[55:32] - message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bxxxx1000", "UmaskExt": 0x0, }, "UPI_LL.TxL_BASIC_HDR_MATCH.NCB": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Desc": "Matches on Transmit path of a UPI Port", "EvSel": 4, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "Notes": "This event is subject to finer grain filtering. See doc for more information. Filters available in the counter control's umask extention field b[55:32] - message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bxxxx1110", "UmaskExt": 0x0, }, "UPI_LL.TxL_BASIC_HDR_MATCH.SNP": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Desc": "Matches on Transmit path of a UPI Port", "EvSel": 4, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "Notes": "This event is subject to finer grain filtering. See doc for more information. Filters available in the counter control's umask extention field b[55:32] - message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bxxxx1001", "UmaskExt": 0x0, }, "UPI_LL.TxL_BASIC_HDR_MATCH.RSP_NODATA_OPC": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Desc": "Matches on Transmit path of a UPI Port", "EvSel": 4, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "Notes": "This event is subject to finer grain filtering. See doc for more information. Filters available in the counter control's umask extention field b[55:32] - message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bXXXX1010", "UmaskExt": 0x1, }, "UPI_LL.TxL_BASIC_HDR_MATCH.NCB_OPC": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Desc": "Matches on Transmit path of a UPI Port", "EvSel": 4, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "Notes": "This event is subject to finer grain filtering. See doc for more information. Filters available in the counter control's umask extention field b[55:32] - message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bxxxx1110", "UmaskExt": 0x1, }, "UPI_LL.TxL_BASIC_HDR_MATCH.RSPCNFLT": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Desc": "Matches on Transmit path of a UPI Port", "EvSel": 4, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "Notes": "This event is subject to finer grain filtering. See doc for more information. Filters available in the counter control's umask extention field b[55:32] - message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "b10101010", "UmaskExt": 0x1, }, "UPI_LL.TxL_BASIC_HDR_MATCH.WB_OPC": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Desc": "Matches on Transmit path of a UPI Port", "EvSel": 4, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "Notes": "This event is subject to finer grain filtering. See doc for more information. Filters available in the counter control's umask extention field b[55:32] - message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bxxxx1101", "UmaskExt": 0x1, }, "UPI_LL.TxL_BASIC_HDR_MATCH.REQ_OPC": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Desc": "Matches on Transmit path of a UPI Port", "EvSel": 4, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "Notes": "This event is subject to finer grain filtering. See doc for more information. Filters available in the counter control's umask extention field b[55:32] - message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bXXXX1000", "UmaskExt": 0x1, }, "UPI_LL.TxL_BASIC_HDR_MATCH.RSP_DATA_OPC": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Desc": "Matches on Transmit path of a UPI Port", "EvSel": 4, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "Notes": "This event is subject to finer grain filtering. See doc for more information. Filters available in the counter control's umask extention field b[55:32] - message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bXXXX1100", "UmaskExt": 0x1, }, "UPI_LL.TxL_BASIC_HDR_MATCH.NCS": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Desc": "Matches on Transmit path of a UPI Port", "EvSel": 4, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "Notes": "This event is subject to finer grain filtering. See doc for more information. Filters available in the counter control's umask extention field b[55:32] - message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bxxxx1111", "UmaskExt": 0x0, }, "UPI_LL.TxL_BASIC_HDR_MATCH.RSP_NODATA": { "Box": "UPI_LL", "Category": "UPI_LL FLIT match Events", "Counters": "0-3", "Desc": "Matches on Transmit path of a UPI Port", "EvSel": 4, "Filter": "CtrCtrl[55:32]", "ExtSel": "", "Notes": "This event is subject to finer grain filtering. See doc for more information. Filters available in the counter control's umask extention field b[55:32] - message class, opcode, local, remote, datahdr, ndatahdr, dual slot header, single slot header and pe) ANDed per Slot. Then slots are ORed.", "Umask": "bxxxx1010", "UmaskExt": 0x0, }, "UPI_LL.TxL_BYPASSED": { "Box": "UPI_LL", "Category": "UPI_LL TXQ Events", "Counters": "0-3", "Defn": "Counts the number of times that an incoming flit was able to bypass the Tx flit buffer and pass directly out the UPI Link. Generally, when data is transmitted across UPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link.", "Desc": "Tx Flit Buffer Bypassed", "EvSel": 65, "ExtSel": "", }, "UPI_LL.TxL_FLITS": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Sent", "EvSel": 2, "ExtSel": "", "Notes": "You can OR any of 5 MSB together and apply against any combination of slots and they will be added together, but a slot MUST be selected.", }, "UPI_LL.TxL_FLITS.SLOT2": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Sent", "EvSel": 2, "ExtSel": "", "Notes": "You can OR any of 5 MSB together and apply against any combination of slots and they will be added together, but a slot MUST be selected.", "Umask": "bxxxxx1xx", }, "UPI_LL.TxL_FLITS.DATA": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Sent", "EvSel": 2, "ExtSel": "", "Notes": "You can OR any of 5 MSB together and apply against any combination of slots and they will be added together, but a slot MUST be selected.", "Umask": "bxxxx1xxx", }, "UPI_LL.TxL_FLITS.LLCRD": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Sent", "EvSel": 2, "ExtSel": "", "Notes": "You can OR any of 5 MSB together and apply against any combination of slots and they will be added together, but a slot MUST be selected.", "Umask": "bxxx1xxxx", }, "UPI_LL.TxL_FLITS.SLOT1": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Sent", "EvSel": 2, "ExtSel": "", "Notes": "You can OR any of 5 MSB together and apply against any combination of slots and they will be added together, but a slot MUST be selected.", "Umask": "bxxxxxx1x", }, "UPI_LL.TxL_FLITS.PROTHDR": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Sent", "EvSel": 2, "ExtSel": "", "Notes": "You can OR any of 5 MSB together and apply against any combination of slots and they will be added together, but a slot MUST be selected.", "Umask": "b1xxxxxxx", }, "UPI_LL.TxL_FLITS.IDLE": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Sent", "EvSel": 2, "ExtSel": "", "Notes": "You can OR any of 5 MSB together and apply against any combination of slots and they will be added together, but a slot MUST be selected.", "Umask": "b01000111", }, "UPI_LL.TxL_FLITS.ALL_DATA": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Sent", "EvSel": 2, "ExtSel": "", "Notes": "You can OR any of 5 MSB together and apply against any combination of slots and they will be added together, but a slot MUST be selected.", "Umask": "b00001111", }, "UPI_LL.TxL_FLITS.LLCTRL": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Sent", "EvSel": 2, "ExtSel": "", "Notes": "You can OR any of 5 MSB together and apply against any combination of slots and they will be added together, but a slot MUST be selected.", "Umask": "bx1xxxxxx", }, "UPI_LL.TxL_FLITS.SLOT0": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Sent", "EvSel": 2, "ExtSel": "", "Notes": "You can OR any of 5 MSB together and apply against any combination of slots and they will be added together, but a slot MUST be selected.", "Umask": "bxxxxxxx1", }, "UPI_LL.TxL_FLITS.ALL_NULL": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Sent", "EvSel": 2, "ExtSel": "", "Notes": "You can OR any of 5 MSB together and apply against any combination of slots and they will be added together, but a slot MUST be selected.", "Umask": "b00100111", }, "UPI_LL.TxL_FLITS.NON_DATA": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Sent", "EvSel": 2, "ExtSel": "", "Notes": "You can OR any of 5 MSB together and apply against any combination of slots and they will be added together, but a slot MUST be selected.", "Umask": "b10010111", }, "UPI_LL.TxL_FLITS.NULL": { "Box": "UPI_LL", "Category": "UPI_LL Flit Events", "Counters": "0-3", "Defn": "Shows legal flit time (hides impact of L0p and L0c).", "Desc": "Valid Flits Sent", "EvSel": 2, "ExtSel": "", "Notes": "You can OR any of 5 MSB together and apply against any combination of slots and they will be added together, but a slot MUST be selected.", "Umask": "bxx1xxxxx", }, "UPI_LL.TxL_INSERTS": { "Box": "UPI_LL", "Category": "UPI_LL TXQ Events", "Counters": "0-3", "Defn": "Number of allocations into the UPI Tx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.", "Desc": "Tx Flit Buffer Allocations", "EvSel": 64, "ExtSel": "", }, "UPI_LL.TxL_OCCUPANCY": { "Box": "UPI_LL", "Category": "UPI_LL TXQ Events", "Counters": "0-3", "Defn": "Accumulates the number of flits in the TxQ. Generally, when data is transmitted across UPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This can be used with the cycles not empty event to track average occupancy, or the allocations event to track average lifetime in the TxQ.", "Desc": "Tx Flit Buffer Occupancy", "EvSel": 66, "ExtSel": "", }, "UPI_LL.VNA_CREDIT_RETURN_BLOCKED_VN01": { "Box": "UPI_LL", "Category": "UPI_LL VNA_CREDIT_RETURN Events", "Counters": "0-3", "EvSel": 69, "ExtSel": "", }, "UPI_LL.VNA_CREDIT_RETURN_OCCUPANCY": { "Box": "UPI_LL", "Category": "UPI_LL VNA_CREDIT_RETURN Events", "Counters": "0-3", "Defn": "Number of VNA credits in the Rx side that are waitng to be returned back across the link.", "Desc": "VNA Credits Pending Return - Occupancy", "EvSel": 68, "ExtSel": "", }, # iMC: "iMC.ACT_COUNT": { "Box": "iMC", "Category": "iMC ACT Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.", "Desc": "DRAM Activate Count", "EvSel": 1, "ExtSel": "", }, "iMC.ACT_COUNT.BYP": { "Box": "iMC", "Category": "iMC ACT Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.", "Desc": "DRAM Activate Count", "EvSel": 1, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.ACT_COUNT.RD": { "Box": "iMC", "Category": "iMC ACT Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.", "Desc": "DRAM Activate Count", "EvSel": 1, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.ACT_COUNT.WR": { "Box": "iMC", "Category": "iMC ACT Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.", "Desc": "DRAM Activate Count", "EvSel": 1, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.BYP_CMDS": { "Box": "iMC", "Category": "iMC BYPASS Command Events", "Counters": "0-3", "EvSel": 161, "ExtSel": "", }, "iMC.BYP_CMDS.PRE": { "Box": "iMC", "Category": "iMC BYPASS Command Events", "Counters": "0-3", "EvSel": 161, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.BYP_CMDS.ACT": { "Box": "iMC", "Category": "iMC BYPASS Command Events", "Counters": "0-3", "EvSel": 161, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.BYP_CMDS.CAS": { "Box": "iMC", "Category": "iMC BYPASS Command Events", "Counters": "0-3", "EvSel": 161, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.CAS_COUNT": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Desc": "DRAM CAS (Column Address Strobe) Commands.", "EvSel": 4, "ExtSel": "", }, "iMC.CAS_COUNT.WR_WMM": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Desc": "DRAM CAS (Column Address Strobe) Commands.", "EvSel": 4, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.CAS_COUNT.RD_RMM": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Desc": "DRAM CAS (Column Address Strobe) Commands.", "EvSel": 4, "ExtSel": "", "Umask": "bxx1xxxxx", }, "iMC.CAS_COUNT.ALL": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Desc": "DRAM CAS (Column Address Strobe) Commands.", "EvSel": 4, "ExtSel": "", "Umask": "b00001111", }, "iMC.CAS_COUNT.RD_REG": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Desc": "DRAM CAS (Column Address Strobe) Commands.", "EvSel": 4, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.CAS_COUNT.WR_RMM": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Desc": "DRAM CAS (Column Address Strobe) Commands.", "EvSel": 4, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.CAS_COUNT.RD": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Desc": "DRAM CAS (Column Address Strobe) Commands.", "EvSel": 4, "ExtSel": "", "Umask": "b00000011", }, "iMC.CAS_COUNT.WR_ISOCH": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Desc": "DRAM CAS (Column Address Strobe) Commands.", "EvSel": 4, "ExtSel": "", "Umask": "b1xxxxxxx", }, "iMC.CAS_COUNT.WR": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Desc": "DRAM CAS (Column Address Strobe) Commands.", "EvSel": 4, "ExtSel": "", "Umask": "b00001100", }, "iMC.CAS_COUNT.RD_ISOCH": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Desc": "DRAM CAS (Column Address Strobe) Commands.", "EvSel": 4, "ExtSel": "", "Umask": "bx1xxxxxx", }, "iMC.CAS_COUNT.RD_WMM": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Desc": "DRAM CAS (Column Address Strobe) Commands.", "EvSel": 4, "ExtSel": "", "Umask": "bxxx1xxxx", }, "iMC.CAS_COUNT.RD_UNDERFILL": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Desc": "DRAM CAS (Column Address Strobe) Commands.", "EvSel": 4, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.CLOCKTICKS": { "Box": "iMC", "Category": "iMC DCLK Events", "Counters": "0-3", "Desc": "DRAM Clockticks", "EvSel": 0, "ExtSel": "", }, "iMC.DRAM_PRE_ALL": { "Box": "iMC", "Category": "iMC DRAM_PRE_ALL Events", "Counters": "0-3", "Defn": "Counts the number of times that the precharge all command was sent.", "Desc": "DRAM Precharge All Commands", "EvSel": 6, "ExtSel": "", }, "iMC.DRAM_REFRESH": { "Box": "iMC", "Category": "iMC DRAM_REFRESH Events", "Counters": "0-3", "Defn": "Counts the number of refreshes issued.", "Desc": "Number of DRAM Refreshes Issued", "EvSel": 5, "ExtSel": "", }, "iMC.DRAM_REFRESH.HIGH": { "Box": "iMC", "Category": "iMC DRAM_REFRESH Events", "Counters": "0-3", "Defn": "Counts the number of refreshes issued.", "Desc": "Number of DRAM Refreshes Issued", "EvSel": 5, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.DRAM_REFRESH.PANIC": { "Box": "iMC", "Category": "iMC DRAM_REFRESH Events", "Counters": "0-3", "Defn": "Counts the number of refreshes issued.", "Desc": "Number of DRAM Refreshes Issued", "EvSel": 5, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.ECC_CORRECTABLE_ERRORS": { "Box": "iMC", "Category": "iMC ECC Events", "Counters": "0-3", "Defn": "Counts the number of ECC errors detected and corrected by the iMC on this channel. This counter is only useful with ECC DRAM devices. This count will increment one time for each correction regardless of the number of bits corrected. The iMC can correct up to 4 bit errors in independent channel mode and 8 bit erros in lockstep mode.", "Desc": "ECC Correctable Errors", "EvSel": 9, "ExtSel": "", }, "iMC.MAJOR_MODES": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Defn": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.", "Desc": "Cycles in a Major Mode", "EvSel": 7, "ExtSel": "", }, "iMC.MAJOR_MODES.WRITE": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Defn": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.", "Desc": "Cycles in a Major Mode", "EvSel": 7, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.MAJOR_MODES.PARTIAL": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Defn": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.", "Desc": "Cycles in a Major Mode", "EvSel": 7, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.MAJOR_MODES.READ": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Defn": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.", "Desc": "Cycles in a Major Mode", "EvSel": 7, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.MAJOR_MODES.ISOCH": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Defn": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.", "Desc": "Cycles in a Major Mode", "EvSel": 7, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.POWER_CHANNEL_DLLOFF": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles when all the ranks in the channel are in CKE Slow (DLLOFF) mode.", "Desc": "Channel DLLOFF Cycles", "EvSel": 132, "ExtSel": "", "Notes": "IBT = Input Buffer Termination = Off", }, "iMC.POWER_CHANNEL_PPD": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles when all the ranks in the channel are in PPD mode. If IBT=off is enabled, then this can be used to count those cycles. If it is not enabled, then this can count the number of cycles when that could have been taken advantage of.", "Desc": "Channel PPD Cycles", "EvSel": 133, "ExtSel": "", "Notes": "IBT = Input Buffer Termination = On", }, "iMC.POWER_CKE_CYCLES": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "ExtSel": "", }, "iMC.POWER_CKE_CYCLES.RANK3": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "ExtSel": "", "Umask": "b00001000", }, "iMC.POWER_CKE_CYCLES.RANK2": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "ExtSel": "", "Umask": "b00000100", }, "iMC.POWER_CKE_CYCLES.RANK5": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "ExtSel": "", "Umask": "b00100000", }, "iMC.POWER_CKE_CYCLES.RANK0": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "ExtSel": "", "Umask": "b00000001", }, "iMC.POWER_CKE_CYCLES.RANK1": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "ExtSel": "", "Umask": "b00000010", }, "iMC.POWER_CKE_CYCLES.RANK4": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "ExtSel": "", "Umask": "b00010000", }, "iMC.POWER_CKE_CYCLES.RANK7": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "ExtSel": "", "Umask": "b10000000", }, "iMC.POWER_CKE_CYCLES.RANK6": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).", "Desc": "CKE_ON_CYCLES by Rank", "EvSel": 131, "ExtSel": "", "Umask": "b01000000", }, "iMC.POWER_CRITICAL_THROTTLE_CYCLES": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the iMC is in critical thermal throttling. When this happens, all traffic is blocked. This should be rare unless something bad is going on in the platform. There is no filtering by rank for this event.", "Desc": "Critical Throttle Cycles", "EvSel": 134, "ExtSel": "", }, "iMC.POWER_PCU_THROTTLING": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "EvSel": 66, "ExtSel": "", }, "iMC.POWER_SELF_REFRESH": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the iMC is in self-refresh and the iMC still has a clock. This happens in some package C-states. For example, the PCU may ask the iMC to enter self-refresh even though some of the cores are still processing. One use of this is for Monroe technology. Self-refresh is required during package C3 and C6, but there is no clock in the iMC at this time, so it is not possible to count these cases.", "Desc": "Clock-Enabled Self-Refresh", "EvSel": 67, "ExtSel": "", }, "iMC.POWER_THROTTLE_CYCLES": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 65, "ExtSel": "", }, "iMC.POWER_THROTTLE_CYCLES.RANK6": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 65, "ExtSel": "", "Umask": "bx1xxxxxx", }, "iMC.POWER_THROTTLE_CYCLES.RANK7": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 65, "ExtSel": "", "Umask": "b1xxxxxxx", }, "iMC.POWER_THROTTLE_CYCLES.RANK4": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 65, "ExtSel": "", "Umask": "bxxx1xxxx", }, "iMC.POWER_THROTTLE_CYCLES.RANK1": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 65, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.POWER_THROTTLE_CYCLES.RANK0": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 65, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.POWER_THROTTLE_CYCLES.RANK5": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 65, "ExtSel": "", "Umask": "bxx1xxxxx", }, "iMC.POWER_THROTTLE_CYCLES.RANK2": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 65, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.POWER_THROTTLE_CYCLES.RANK3": { "Box": "iMC", "Category": "iMC POWER Events", "Counters": "0-3", "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.", "Desc": "Throttle Cycles for Rank 0", "EvSel": 65, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.PREEMPTION": { "Box": "iMC", "Category": "iMC PREEMPTION Events", "Counters": "0-3", "Defn": "Counts the number of times a read in the iMC preempts another read or write. Generally reads to an open page are issued ahead of requests to closed pages. This improves the page hit rate of the system. However, high priority requests can cause pages of active requests to be closed in order to get them out. This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.", "Desc": "Read Preemption Count", "EvSel": 8, "ExtSel": "", }, "iMC.PREEMPTION.RD_PREEMPT_RD": { "Box": "iMC", "Category": "iMC PREEMPTION Events", "Counters": "0-3", "Defn": "Counts the number of times a read in the iMC preempts another read or write. Generally reads to an open page are issued ahead of requests to closed pages. This improves the page hit rate of the system. However, high priority requests can cause pages of active requests to be closed in order to get them out. This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.", "Desc": "Read Preemption Count", "EvSel": 8, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.PREEMPTION.RD_PREEMPT_WR": { "Box": "iMC", "Category": "iMC PREEMPTION Events", "Counters": "0-3", "Defn": "Counts the number of times a read in the iMC preempts another read or write. Generally reads to an open page are issued ahead of requests to closed pages. This improves the page hit rate of the system. However, high priority requests can cause pages of active requests to be closed in order to get them out. This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.", "Desc": "Read Preemption Count", "EvSel": 8, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.PRE_COUNT": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Precharge commands sent on this channel.", "Desc": "DRAM Precharge commands.", "EvSel": 2, "ExtSel": "", }, "iMC.PRE_COUNT.BYP": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Precharge commands sent on this channel.", "Desc": "DRAM Precharge commands.", "EvSel": 2, "ExtSel": "", "Umask": "bxxx1xxxx", }, "iMC.PRE_COUNT.RD": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Precharge commands sent on this channel.", "Desc": "DRAM Precharge commands.", "EvSel": 2, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.PRE_COUNT.WR": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Precharge commands sent on this channel.", "Desc": "DRAM Precharge commands.", "EvSel": 2, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.PRE_COUNT.PAGE_CLOSE": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Precharge commands sent on this channel.", "Desc": "DRAM Precharge commands.", "EvSel": 2, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.PRE_COUNT.PAGE_MISS": { "Box": "iMC", "Category": "iMC PRE Events", "Counters": "0-3", "Defn": "Counts the number of DRAM Precharge commands sent on this channel.", "Desc": "DRAM Precharge commands.", "EvSel": 2, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.RD_CAS_PRIO": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "EvSel": 160, "ExtSel": "", }, "iMC.RD_CAS_PRIO.HIGH": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "EvSel": 160, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.RD_CAS_PRIO.LOW": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "EvSel": 160, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.RD_CAS_PRIO.MED": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "EvSel": 160, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.RD_CAS_PRIO.PANIC": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "EvSel": 160, "ExtSel": "", "Umask": "bxxxx1xxx", }, "iMC.RD_CAS_RANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", }, "iMC.RD_CAS_RANK0.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00000101", }, "iMC.RD_CAS_RANK0.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00001100", }, "iMC.RD_CAS_RANK0.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00000010", }, "iMC.RD_CAS_RANK0.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00001001", }, "iMC.RD_CAS_RANK0.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00000111", }, "iMC.RD_CAS_RANK0.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00010001", }, "iMC.RD_CAS_RANK0.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00010100", }, "iMC.RD_CAS_RANK0.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00010000", }, "iMC.RD_CAS_RANK0.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00000011", }, "iMC.RD_CAS_RANK0.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00001111", }, "iMC.RD_CAS_RANK0.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00010010", }, "iMC.RD_CAS_RANK0.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00010011", }, "iMC.RD_CAS_RANK0.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00000001", }, "iMC.RD_CAS_RANK0.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00001010", }, "iMC.RD_CAS_RANK0.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00001000", }, "iMC.RD_CAS_RANK0.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00001110", }, "iMC.RD_CAS_RANK0.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00000100", }, "iMC.RD_CAS_RANK0.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00001101", }, "iMC.RD_CAS_RANK0.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00001011", }, "iMC.RD_CAS_RANK0.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00000000", }, "iMC.RD_CAS_RANK0.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 0", "EvSel": 176, "ExtSel": "", "Umask": "b00000110", }, "iMC.RD_CAS_RANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", }, "iMC.RD_CAS_RANK1.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00001101", }, "iMC.RD_CAS_RANK1.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00000100", }, "iMC.RD_CAS_RANK1.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00000000", }, "iMC.RD_CAS_RANK1.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00000110", }, "iMC.RD_CAS_RANK1.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00001011", }, "iMC.RD_CAS_RANK1.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00000001", }, "iMC.RD_CAS_RANK1.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00001000", }, "iMC.RD_CAS_RANK1.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00001110", }, "iMC.RD_CAS_RANK1.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00001010", }, "iMC.RD_CAS_RANK1.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00000011", }, "iMC.RD_CAS_RANK1.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00010000", }, "iMC.RD_CAS_RANK1.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00010010", }, "iMC.RD_CAS_RANK1.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00010011", }, "iMC.RD_CAS_RANK1.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00001111", }, "iMC.RD_CAS_RANK1.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00000010", }, "iMC.RD_CAS_RANK1.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00001100", }, "iMC.RD_CAS_RANK1.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00000101", }, "iMC.RD_CAS_RANK1.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00001001", }, "iMC.RD_CAS_RANK1.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00000111", }, "iMC.RD_CAS_RANK1.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00010100", }, "iMC.RD_CAS_RANK1.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 1", "EvSel": 177, "ExtSel": "", "Umask": "b00010001", }, "iMC.RD_CAS_RANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 2", "EvSel": 178, "ExtSel": "", }, "iMC.RD_CAS_RANK2.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 2", "EvSel": 178, "ExtSel": "", "Umask": "b00001101", }, "iMC.RD_CAS_RANK2.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 2", "EvSel": 178, "ExtSel": "", "Umask": "b00000100", }, "iMC.RD_CAS_RANK2.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 2", "EvSel": 178, "ExtSel": "", "Umask": "b00000110", }, "iMC.RD_CAS_RANK2.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 2", "EvSel": 178, "ExtSel": "", "Umask": "b00000000", }, "iMC.RD_CAS_RANK2.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 2", "EvSel": 178, "ExtSel": "", "Umask": "b00001011", }, "iMC.RD_CAS_RANK2.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 2", "EvSel": 178, "ExtSel": "", "Umask": "b00000001", }, "iMC.RD_CAS_RANK2.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 2", "EvSel": 178, "ExtSel": "", "Umask": "b00001110", }, "iMC.RD_CAS_RANK2.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 2", "EvSel": 178, "ExtSel": "", "Umask": "b00001000", }, "iMC.RD_CAS_RANK2.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 2", "EvSel": 178, "ExtSel": "", "Umask": "b00001010", }, "iMC.RD_CAS_RANK2.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 2", "EvSel": 178, "ExtSel": "", "Umask": "b00000011", }, "iMC.RD_CAS_RANK2.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 2", "EvSel": 178, "ExtSel": "", "Umask": "b00010000", }, "iMC.RD_CAS_RANK2.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 2", "EvSel": 178, "ExtSel": "", "Umask": "b00010011", }, "iMC.RD_CAS_RANK2.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 2", "EvSel": 178, "ExtSel": "", "Umask": "b00010010", }, "iMC.RD_CAS_RANK2.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 2", "EvSel": 178, "ExtSel": "", "Umask": "b00001111", }, "iMC.RD_CAS_RANK2.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 2", "EvSel": 178, "ExtSel": "", "Umask": "b00000010", }, "iMC.RD_CAS_RANK2.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 2", "EvSel": 178, "ExtSel": "", "Umask": "b00001100", }, "iMC.RD_CAS_RANK2.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 2", "EvSel": 178, "ExtSel": "", "Umask": "b00000101", }, "iMC.RD_CAS_RANK2.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 2", "EvSel": 178, "ExtSel": "", "Umask": "b00010001", }, "iMC.RD_CAS_RANK2.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 2", "EvSel": 178, "ExtSel": "", "Umask": "b00010100", }, "iMC.RD_CAS_RANK2.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 2", "EvSel": 178, "ExtSel": "", "Umask": "b00000111", }, "iMC.RD_CAS_RANK2.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 2", "EvSel": 178, "ExtSel": "", "Umask": "b00001001", }, "iMC.RD_CAS_RANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 3", "EvSel": 179, "ExtSel": "", }, "iMC.RD_CAS_RANK3.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 3", "EvSel": 179, "ExtSel": "", "Umask": "b00000001", }, "iMC.RD_CAS_RANK3.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 3", "EvSel": 179, "ExtSel": "", "Umask": "b00001010", }, "iMC.RD_CAS_RANK3.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 3", "EvSel": 179, "ExtSel": "", "Umask": "b00001110", }, "iMC.RD_CAS_RANK3.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 3", "EvSel": 179, "ExtSel": "", "Umask": "b00001000", }, "iMC.RD_CAS_RANK3.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 3", "EvSel": 179, "ExtSel": "", "Umask": "b00000100", }, "iMC.RD_CAS_RANK3.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 3", "EvSel": 179, "ExtSel": "", "Umask": "b00001101", }, "iMC.RD_CAS_RANK3.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 3", "EvSel": 179, "ExtSel": "", "Umask": "b00001011", }, "iMC.RD_CAS_RANK3.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 3", "EvSel": 179, "ExtSel": "", "Umask": "b00000110", }, "iMC.RD_CAS_RANK3.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 3", "EvSel": 179, "ExtSel": "", "Umask": "b00000000", }, "iMC.RD_CAS_RANK3.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 3", "EvSel": 179, "ExtSel": "", "Umask": "b00001100", }, "iMC.RD_CAS_RANK3.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 3", "EvSel": 179, "ExtSel": "", "Umask": "b00000101", }, "iMC.RD_CAS_RANK3.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 3", "EvSel": 179, "ExtSel": "", "Umask": "b00000010", }, "iMC.RD_CAS_RANK3.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 3", "EvSel": 179, "ExtSel": "", "Umask": "b00010001", }, "iMC.RD_CAS_RANK3.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 3", "EvSel": 179, "ExtSel": "", "Umask": "b00010100", }, "iMC.RD_CAS_RANK3.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 3", "EvSel": 179, "ExtSel": "", "Umask": "b00001001", }, "iMC.RD_CAS_RANK3.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 3", "EvSel": 179, "ExtSel": "", "Umask": "b00000111", }, "iMC.RD_CAS_RANK3.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 3", "EvSel": 179, "ExtSel": "", "Umask": "b00000011", }, "iMC.RD_CAS_RANK3.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 3", "EvSel": 179, "ExtSel": "", "Umask": "b00010000", }, "iMC.RD_CAS_RANK3.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 3", "EvSel": 179, "ExtSel": "", "Umask": "b00001111", }, "iMC.RD_CAS_RANK3.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 3", "EvSel": 179, "ExtSel": "", "Umask": "b00010011", }, "iMC.RD_CAS_RANK3.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 3", "EvSel": 179, "ExtSel": "", "Umask": "b00010010", }, "iMC.RD_CAS_RANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", }, "iMC.RD_CAS_RANK4.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00000001", }, "iMC.RD_CAS_RANK4.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00001010", }, "iMC.RD_CAS_RANK4.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00001000", }, "iMC.RD_CAS_RANK4.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00001110", }, "iMC.RD_CAS_RANK4.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00000100", }, "iMC.RD_CAS_RANK4.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00001101", }, "iMC.RD_CAS_RANK4.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00001011", }, "iMC.RD_CAS_RANK4.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00000000", }, "iMC.RD_CAS_RANK4.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00000110", }, "iMC.RD_CAS_RANK4.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00000101", }, "iMC.RD_CAS_RANK4.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00001100", }, "iMC.RD_CAS_RANK4.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00000010", }, "iMC.RD_CAS_RANK4.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00000111", }, "iMC.RD_CAS_RANK4.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00001001", }, "iMC.RD_CAS_RANK4.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00010100", }, "iMC.RD_CAS_RANK4.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00010001", }, "iMC.RD_CAS_RANK4.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00000011", }, "iMC.RD_CAS_RANK4.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00010000", }, "iMC.RD_CAS_RANK4.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00001111", }, "iMC.RD_CAS_RANK4.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00010010", }, "iMC.RD_CAS_RANK4.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 4", "EvSel": 180, "ExtSel": "", "Umask": "b00010011", }, "iMC.RD_CAS_RANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", }, "iMC.RD_CAS_RANK5.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00001101", }, "iMC.RD_CAS_RANK5.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00000100", }, "iMC.RD_CAS_RANK5.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00000110", }, "iMC.RD_CAS_RANK5.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00000000", }, "iMC.RD_CAS_RANK5.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00001011", }, "iMC.RD_CAS_RANK5.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00000001", }, "iMC.RD_CAS_RANK5.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00001000", }, "iMC.RD_CAS_RANK5.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00001110", }, "iMC.RD_CAS_RANK5.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00001010", }, "iMC.RD_CAS_RANK5.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00000011", }, "iMC.RD_CAS_RANK5.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00010000", }, "iMC.RD_CAS_RANK5.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00010011", }, "iMC.RD_CAS_RANK5.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00010010", }, "iMC.RD_CAS_RANK5.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00001111", }, "iMC.RD_CAS_RANK5.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00000010", }, "iMC.RD_CAS_RANK5.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00001100", }, "iMC.RD_CAS_RANK5.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00000101", }, "iMC.RD_CAS_RANK5.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00010001", }, "iMC.RD_CAS_RANK5.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00010100", }, "iMC.RD_CAS_RANK5.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00001001", }, "iMC.RD_CAS_RANK5.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 5", "EvSel": 181, "ExtSel": "", "Umask": "b00000111", }, "iMC.RD_CAS_RANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", }, "iMC.RD_CAS_RANK6.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00010001", }, "iMC.RD_CAS_RANK6.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00010100", }, "iMC.RD_CAS_RANK6.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00001001", }, "iMC.RD_CAS_RANK6.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00000111", }, "iMC.RD_CAS_RANK6.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00000101", }, "iMC.RD_CAS_RANK6.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00001100", }, "iMC.RD_CAS_RANK6.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00000010", }, "iMC.RD_CAS_RANK6.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00001111", }, "iMC.RD_CAS_RANK6.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00010011", }, "iMC.RD_CAS_RANK6.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00010010", }, "iMC.RD_CAS_RANK6.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00000011", }, "iMC.RD_CAS_RANK6.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00010000", }, "iMC.RD_CAS_RANK6.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00001010", }, "iMC.RD_CAS_RANK6.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00001110", }, "iMC.RD_CAS_RANK6.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00001000", }, "iMC.RD_CAS_RANK6.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00000001", }, "iMC.RD_CAS_RANK6.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00001011", }, "iMC.RD_CAS_RANK6.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00000110", }, "iMC.RD_CAS_RANK6.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00000000", }, "iMC.RD_CAS_RANK6.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00000100", }, "iMC.RD_CAS_RANK6.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 6", "EvSel": 182, "ExtSel": "", "Umask": "b00001101", }, "iMC.RD_CAS_RANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", }, "iMC.RD_CAS_RANK7.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00001011", }, "iMC.RD_CAS_RANK7.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00000110", }, "iMC.RD_CAS_RANK7.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00000000", }, "iMC.RD_CAS_RANK7.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00000100", }, "iMC.RD_CAS_RANK7.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00001101", }, "iMC.RD_CAS_RANK7.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00001010", }, "iMC.RD_CAS_RANK7.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00001000", }, "iMC.RD_CAS_RANK7.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00001110", }, "iMC.RD_CAS_RANK7.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00000001", }, "iMC.RD_CAS_RANK7.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00001111", }, "iMC.RD_CAS_RANK7.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00010011", }, "iMC.RD_CAS_RANK7.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00010010", }, "iMC.RD_CAS_RANK7.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00000011", }, "iMC.RD_CAS_RANK7.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00010000", }, "iMC.RD_CAS_RANK7.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00010001", }, "iMC.RD_CAS_RANK7.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00010100", }, "iMC.RD_CAS_RANK7.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00001001", }, "iMC.RD_CAS_RANK7.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00000111", }, "iMC.RD_CAS_RANK7.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00000101", }, "iMC.RD_CAS_RANK7.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00001100", }, "iMC.RD_CAS_RANK7.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "RD_CAS Access to Rank 7", "EvSel": 183, "ExtSel": "", "Umask": "b00000010", }, "iMC.RPQ_CYCLES_FULL": { "Box": "iMC", "Category": "iMC RPQ Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the Read Pending Queue is full. When the RPQ is full, the HA will not be able to issue any additional read requests into the iMC. This count should be similar count in the HA which tracks the number of cycles that the HA has no RPQ credits, just somewhat smaller to account for the credit return overhead. We generally do not expect to see RPQ become full except for potentially during Write Major Mode or while running with slow DRAM. This event only tracks non-ISOC queue entries.", "Desc": "Read Pending Queue Full Cycles", "EvSel": 18, "ExtSel": "", }, "iMC.RPQ_CYCLES_NE": { "Box": "iMC", "Category": "iMC RPQ Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the Read Pending Queue is not empty. This can then be used to calculate the average occupancy (in conjunction with the Read Pending Queue Occupancy count). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This filter is to be used in conjunction with the occupancy filter so that one can correctly track the average occupancies for schedulable entries and scheduled requests.", "Desc": "Read Pending Queue Not Empty", "EvSel": 17, "ExtSel": "", }, "iMC.RPQ_INSERTS": { "Box": "iMC", "Category": "iMC RPQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.", "Desc": "Read Pending Queue Allocations", "EvSel": 16, "ExtSel": "", }, "iMC.RPQ_OCCUPANCY": { "Box": "iMC", "Category": "iMC RPQ Events", "Counters": "0-3", "Defn": "Accumulates the occupancies of the Read Pending Queue each cycle. This can then be used to calculate both the average occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory.", "Desc": "Read Pending Queue Occupancy", "EvSel": 128, "ExtSel": "", }, "iMC.WMM_TO_RMM": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Desc": "Transition from WMM to RMM because of low threshold", "EvSel": 192, "ExtSel": "", }, "iMC.WMM_TO_RMM.STARVE": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Desc": "Transition from WMM to RMM because of low threshold", "EvSel": 192, "ExtSel": "", "Umask": "bxxxxxx1x", }, "iMC.WMM_TO_RMM.VMSE_RETRY": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Desc": "Transition from WMM to RMM because of low threshold", "EvSel": 192, "ExtSel": "", "Umask": "bxxxxx1xx", }, "iMC.WMM_TO_RMM.LOW_THRESH": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Desc": "Transition from WMM to RMM because of low threshold", "EvSel": 192, "ExtSel": "", "Umask": "bxxxxxxx1", }, "iMC.WPQ_CYCLES_FULL": { "Box": "iMC", "Category": "iMC WPQ Events", "Counters": "0-3", "Defn": "Counts the number of cycles when the Write Pending Queue is full. When the WPQ is full, the HA will not be able to issue any additional write requests into the iMC. This count should be similar count in the CHA which tracks the number of cycles that the CHA has no WPQ credits, just somewhat smaller to account for the credit return overhead.", "Desc": "Write Pending Queue Full Cycles", "EvSel": 34, "ExtSel": "", }, "iMC.WPQ_CYCLES_NE": { "Box": "iMC", "Category": "iMC WPQ Events", "Counters": "0-3", "Defn": "Counts the number of cycles that the Write Pending Queue is not empty. This can then be used to calculate the average queue occupancy (in conjunction with the WPQ Occupancy Accumulation count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have \"posted\" to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies.", "Desc": "Write Pending Queue Not Empty", "EvSel": 33, "ExtSel": "", }, "iMC.WPQ_INSERTS": { "Box": "iMC", "Category": "iMC WPQ Events", "Counters": "0-3", "Defn": "Counts the number of allocations into the Write Pending Queue. This can then be used to calculate the average queuing latency (in conjunction with the WPQ occupancy count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have \"posted\" to the iMC.", "Desc": "Write Pending Queue Allocations", "EvSel": 32, "ExtSel": "", }, "iMC.WPQ_READ_HIT": { "Box": "iMC", "Category": "iMC WPQ Events", "Counters": "0-3", "Defn": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.", "Desc": "Write Pending Queue CAM Match", "EvSel": 35, "ExtSel": "", }, "iMC.WPQ_WRITE_HIT": { "Box": "iMC", "Category": "iMC WPQ Events", "Counters": "0-3", "Defn": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.", "Desc": "Write Pending Queue CAM Match", "EvSel": 36, "ExtSel": "", }, "iMC.WRONG_MM": { "Box": "iMC", "Category": "iMC MAJOR_MODES Events", "Counters": "0-3", "Desc": "Not getting the requested Major Mode", "EvSel": 193, "ExtSel": "", }, "iMC.WR_CAS_RANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", }, "iMC.WR_CAS_RANK0.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00000001", }, "iMC.WR_CAS_RANK0.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00001110", }, "iMC.WR_CAS_RANK0.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00001000", }, "iMC.WR_CAS_RANK0.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00001010", }, "iMC.WR_CAS_RANK0.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00001101", }, "iMC.WR_CAS_RANK0.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00000100", }, "iMC.WR_CAS_RANK0.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00000000", }, "iMC.WR_CAS_RANK0.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00000110", }, "iMC.WR_CAS_RANK0.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00001011", }, "iMC.WR_CAS_RANK0.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00000010", }, "iMC.WR_CAS_RANK0.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00000101", }, "iMC.WR_CAS_RANK0.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00001100", }, "iMC.WR_CAS_RANK0.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00000111", }, "iMC.WR_CAS_RANK0.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00001001", }, "iMC.WR_CAS_RANK0.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00010001", }, "iMC.WR_CAS_RANK0.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00010100", }, "iMC.WR_CAS_RANK0.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00000011", }, "iMC.WR_CAS_RANK0.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00010000", }, "iMC.WR_CAS_RANK0.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00010010", }, "iMC.WR_CAS_RANK0.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00010011", }, "iMC.WR_CAS_RANK0.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 0", "EvSel": 184, "ExtSel": "", "Umask": "b00001111", }, "iMC.WR_CAS_RANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", }, "iMC.WR_CAS_RANK1.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00001010", }, "iMC.WR_CAS_RANK1.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00001110", }, "iMC.WR_CAS_RANK1.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00001000", }, "iMC.WR_CAS_RANK1.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00000001", }, "iMC.WR_CAS_RANK1.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00001011", }, "iMC.WR_CAS_RANK1.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00000000", }, "iMC.WR_CAS_RANK1.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00000110", }, "iMC.WR_CAS_RANK1.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00000100", }, "iMC.WR_CAS_RANK1.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00001101", }, "iMC.WR_CAS_RANK1.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00000111", }, "iMC.WR_CAS_RANK1.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00001001", }, "iMC.WR_CAS_RANK1.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00010001", }, "iMC.WR_CAS_RANK1.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00010100", }, "iMC.WR_CAS_RANK1.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00001100", }, "iMC.WR_CAS_RANK1.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00000101", }, "iMC.WR_CAS_RANK1.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00000010", }, "iMC.WR_CAS_RANK1.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00001111", }, "iMC.WR_CAS_RANK1.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00010010", }, "iMC.WR_CAS_RANK1.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00010011", }, "iMC.WR_CAS_RANK1.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00000011", }, "iMC.WR_CAS_RANK1.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 1", "EvSel": 185, "ExtSel": "", "Umask": "b00010000", }, "iMC.WR_CAS_RANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 2", "EvSel": 186, "ExtSel": "", }, "iMC.WR_CAS_RANK2.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 2", "EvSel": 186, "ExtSel": "", "Umask": "b00000110", }, "iMC.WR_CAS_RANK2.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 2", "EvSel": 186, "ExtSel": "", "Umask": "b00000000", }, "iMC.WR_CAS_RANK2.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 2", "EvSel": 186, "ExtSel": "", "Umask": "b00001011", }, "iMC.WR_CAS_RANK2.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 2", "EvSel": 186, "ExtSel": "", "Umask": "b00001101", }, "iMC.WR_CAS_RANK2.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 2", "EvSel": 186, "ExtSel": "", "Umask": "b00000100", }, "iMC.WR_CAS_RANK2.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 2", "EvSel": 186, "ExtSel": "", "Umask": "b00001000", }, "iMC.WR_CAS_RANK2.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 2", "EvSel": 186, "ExtSel": "", "Umask": "b00001110", }, "iMC.WR_CAS_RANK2.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 2", "EvSel": 186, "ExtSel": "", "Umask": "b00001010", }, "iMC.WR_CAS_RANK2.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 2", "EvSel": 186, "ExtSel": "", "Umask": "b00000001", }, "iMC.WR_CAS_RANK2.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 2", "EvSel": 186, "ExtSel": "", "Umask": "b00010011", }, "iMC.WR_CAS_RANK2.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 2", "EvSel": 186, "ExtSel": "", "Umask": "b00010010", }, "iMC.WR_CAS_RANK2.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 2", "EvSel": 186, "ExtSel": "", "Umask": "b00001111", }, "iMC.WR_CAS_RANK2.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 2", "EvSel": 186, "ExtSel": "", "Umask": "b00010000", }, "iMC.WR_CAS_RANK2.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 2", "EvSel": 186, "ExtSel": "", "Umask": "b00000011", }, "iMC.WR_CAS_RANK2.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 2", "EvSel": 186, "ExtSel": "", "Umask": "b00010001", }, "iMC.WR_CAS_RANK2.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 2", "EvSel": 186, "ExtSel": "", "Umask": "b00010100", }, "iMC.WR_CAS_RANK2.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 2", "EvSel": 186, "ExtSel": "", "Umask": "b00001001", }, "iMC.WR_CAS_RANK2.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 2", "EvSel": 186, "ExtSel": "", "Umask": "b00000111", }, "iMC.WR_CAS_RANK2.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 2", "EvSel": 186, "ExtSel": "", "Umask": "b00000010", }, "iMC.WR_CAS_RANK2.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 2", "EvSel": 186, "ExtSel": "", "Umask": "b00001100", }, "iMC.WR_CAS_RANK2.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 2", "EvSel": 186, "ExtSel": "", "Umask": "b00000101", }, "iMC.WR_CAS_RANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 3", "EvSel": 187, "ExtSel": "", }, "iMC.WR_CAS_RANK3.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 3", "EvSel": 187, "ExtSel": "", "Umask": "b00001111", }, "iMC.WR_CAS_RANK3.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 3", "EvSel": 187, "ExtSel": "", "Umask": "b00010010", }, "iMC.WR_CAS_RANK3.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 3", "EvSel": 187, "ExtSel": "", "Umask": "b00010011", }, "iMC.WR_CAS_RANK3.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 3", "EvSel": 187, "ExtSel": "", "Umask": "b00010000", }, "iMC.WR_CAS_RANK3.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 3", "EvSel": 187, "ExtSel": "", "Umask": "b00000011", }, "iMC.WR_CAS_RANK3.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 3", "EvSel": 187, "ExtSel": "", "Umask": "b00001001", }, "iMC.WR_CAS_RANK3.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 3", "EvSel": 187, "ExtSel": "", "Umask": "b00000111", }, "iMC.WR_CAS_RANK3.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 3", "EvSel": 187, "ExtSel": "", "Umask": "b00010100", }, "iMC.WR_CAS_RANK3.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 3", "EvSel": 187, "ExtSel": "", "Umask": "b00010001", }, "iMC.WR_CAS_RANK3.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 3", "EvSel": 187, "ExtSel": "", "Umask": "b00001100", }, "iMC.WR_CAS_RANK3.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 3", "EvSel": 187, "ExtSel": "", "Umask": "b00000101", }, "iMC.WR_CAS_RANK3.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 3", "EvSel": 187, "ExtSel": "", "Umask": "b00000010", }, "iMC.WR_CAS_RANK3.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 3", "EvSel": 187, "ExtSel": "", "Umask": "b00001011", }, "iMC.WR_CAS_RANK3.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 3", "EvSel": 187, "ExtSel": "", "Umask": "b00000000", }, "iMC.WR_CAS_RANK3.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 3", "EvSel": 187, "ExtSel": "", "Umask": "b00000110", }, "iMC.WR_CAS_RANK3.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 3", "EvSel": 187, "ExtSel": "", "Umask": "b00000100", }, "iMC.WR_CAS_RANK3.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 3", "EvSel": 187, "ExtSel": "", "Umask": "b00001101", }, "iMC.WR_CAS_RANK3.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 3", "EvSel": 187, "ExtSel": "", "Umask": "b00001010", }, "iMC.WR_CAS_RANK3.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 3", "EvSel": 187, "ExtSel": "", "Umask": "b00001110", }, "iMC.WR_CAS_RANK3.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 3", "EvSel": 187, "ExtSel": "", "Umask": "b00001000", }, "iMC.WR_CAS_RANK3.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 3", "EvSel": 187, "ExtSel": "", "Umask": "b00000001", }, "iMC.WR_CAS_RANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", }, "iMC.WR_CAS_RANK4.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00001111", }, "iMC.WR_CAS_RANK4.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00010010", }, "iMC.WR_CAS_RANK4.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00010011", }, "iMC.WR_CAS_RANK4.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00010000", }, "iMC.WR_CAS_RANK4.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00000011", }, "iMC.WR_CAS_RANK4.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00000111", }, "iMC.WR_CAS_RANK4.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00001001", }, "iMC.WR_CAS_RANK4.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00010100", }, "iMC.WR_CAS_RANK4.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00010001", }, "iMC.WR_CAS_RANK4.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00001100", }, "iMC.WR_CAS_RANK4.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00000101", }, "iMC.WR_CAS_RANK4.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00000010", }, "iMC.WR_CAS_RANK4.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00001011", }, "iMC.WR_CAS_RANK4.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00000000", }, "iMC.WR_CAS_RANK4.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00000110", }, "iMC.WR_CAS_RANK4.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00000100", }, "iMC.WR_CAS_RANK4.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00001101", }, "iMC.WR_CAS_RANK4.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00001010", }, "iMC.WR_CAS_RANK4.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00001110", }, "iMC.WR_CAS_RANK4.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00001000", }, "iMC.WR_CAS_RANK4.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 4", "EvSel": 188, "ExtSel": "", "Umask": "b00000001", }, "iMC.WR_CAS_RANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", }, "iMC.WR_CAS_RANK5.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00000001", }, "iMC.WR_CAS_RANK5.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00001010", }, "iMC.WR_CAS_RANK5.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00001110", }, "iMC.WR_CAS_RANK5.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00001000", }, "iMC.WR_CAS_RANK5.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00000100", }, "iMC.WR_CAS_RANK5.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00001101", }, "iMC.WR_CAS_RANK5.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00001011", }, "iMC.WR_CAS_RANK5.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00000110", }, "iMC.WR_CAS_RANK5.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00000000", }, "iMC.WR_CAS_RANK5.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00001100", }, "iMC.WR_CAS_RANK5.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00000101", }, "iMC.WR_CAS_RANK5.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00000010", }, "iMC.WR_CAS_RANK5.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00010100", }, "iMC.WR_CAS_RANK5.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00010001", }, "iMC.WR_CAS_RANK5.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00001001", }, "iMC.WR_CAS_RANK5.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00000111", }, "iMC.WR_CAS_RANK5.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00010000", }, "iMC.WR_CAS_RANK5.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00000011", }, "iMC.WR_CAS_RANK5.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00001111", }, "iMC.WR_CAS_RANK5.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00010011", }, "iMC.WR_CAS_RANK5.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 5", "EvSel": 189, "ExtSel": "", "Umask": "b00010010", }, "iMC.WR_CAS_RANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", }, "iMC.WR_CAS_RANK6.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00001110", }, "iMC.WR_CAS_RANK6.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00001000", }, "iMC.WR_CAS_RANK6.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00001010", }, "iMC.WR_CAS_RANK6.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00000001", }, "iMC.WR_CAS_RANK6.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00000110", }, "iMC.WR_CAS_RANK6.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00000000", }, "iMC.WR_CAS_RANK6.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00001011", }, "iMC.WR_CAS_RANK6.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00001101", }, "iMC.WR_CAS_RANK6.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00000100", }, "iMC.WR_CAS_RANK6.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00010001", }, "iMC.WR_CAS_RANK6.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00010100", }, "iMC.WR_CAS_RANK6.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00001001", }, "iMC.WR_CAS_RANK6.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00000111", }, "iMC.WR_CAS_RANK6.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00000010", }, "iMC.WR_CAS_RANK6.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00000101", }, "iMC.WR_CAS_RANK6.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00001100", }, "iMC.WR_CAS_RANK6.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00010011", }, "iMC.WR_CAS_RANK6.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00010010", }, "iMC.WR_CAS_RANK6.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00001111", }, "iMC.WR_CAS_RANK6.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00000011", }, "iMC.WR_CAS_RANK6.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 6", "EvSel": 190, "ExtSel": "", "Umask": "b00010000", }, "iMC.WR_CAS_RANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", }, "iMC.WR_CAS_RANK7.BANK2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00000010", }, "iMC.WR_CAS_RANK7.BANK5": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00000101", }, "iMC.WR_CAS_RANK7.BANK12": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00001100", }, "iMC.WR_CAS_RANK7.BANK9": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00001001", }, "iMC.WR_CAS_RANK7.BANK7": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00000111", }, "iMC.WR_CAS_RANK7.BANKG0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00010001", }, "iMC.WR_CAS_RANK7.BANKG3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00010100", }, "iMC.WR_CAS_RANK7.ALLBANKS": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00010000", }, "iMC.WR_CAS_RANK7.BANK3": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00000011", }, "iMC.WR_CAS_RANK7.BANKG1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00010010", }, "iMC.WR_CAS_RANK7.BANKG2": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00010011", }, "iMC.WR_CAS_RANK7.BANK15": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00001111", }, "iMC.WR_CAS_RANK7.BANK1": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00000001", }, "iMC.WR_CAS_RANK7.BANK8": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00001000", }, "iMC.WR_CAS_RANK7.BANK14": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00001110", }, "iMC.WR_CAS_RANK7.BANK10": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00001010", }, "iMC.WR_CAS_RANK7.BANK13": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00001101", }, "iMC.WR_CAS_RANK7.BANK4": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00000100", }, "iMC.WR_CAS_RANK7.BANK0": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00000000", }, "iMC.WR_CAS_RANK7.BANK6": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00000110", }, "iMC.WR_CAS_RANK7.BANK11": { "Box": "iMC", "Category": "iMC CAS Events", "Counters": "0-3", "Desc": "WR_CAS Access to Rank 7", "EvSel": 191, "ExtSel": "", "Umask": "b00001011", }, } derived = { # iMC: "iMC.MEM_BW_READS": { "Box": "iMC", "Category": "iMC PRE Events", "Defn": "Memory bandwidth consumed by reads. Expressed in bytes.", "Desc": "Read Memory Bandwidth", "Equation": "(CAS_COUNT.RD * 64)", }, "iMC.MEM_BW_TOTAL": { "Box": "iMC", "Category": "iMC PRE Events", "Defn": "Total memory bandwidth. Expressed in bytes.", "Desc": "Total Memory Bandwidth", "Equation": "MEM_BW_READS + MEM_BW_WRITES", }, "iMC.MEM_BW_WRITES": { "Box": "iMC", "Category": "iMC PRE Events", "Defn": "Memory bandwidth consumed by writes Expressed in bytes.", "Desc": "Write Memory Bandwidth", "Equation": "(CAS_COUNT.WR * 64)", }, "iMC.PCT_CYCLES_CRITICAL_THROTTLE": { "Box": "iMC", "Category": "iMC POWER Events", "Defn": "The percentage of cycles all DRAM ranks in critical thermal throttling", "Desc": "Percent Cycles Critical Throttle", "Equation": "POWER_CRITICAL_THROTTLE_CYCLES / MC_Chy_PCI_PMON_CTR_FIXED", }, "iMC.PCT_CYCLES_DLLOFF": { "Box": "iMC", "Category": "iMC POWER Events", "Defn": "The percentage of cycles all DRAM ranks in CKE slow (DLOFF) mode", "Desc": "Percent Cycles DLOFF", "Equation": "POWER_CHANNEL_DLLOFF / MC_Chy_PCI_PMON_CTR_FIXED", }, "iMC.PCT_CYCLES_DRAM_RANKx_IN_CKE": { "Box": "iMC", "Category": "iMC POWER Events", "Defn": "The percentage of cycles DRAM rank (x) spent in CKE ON mode.", "Desc": "Percent Cycles DRAM Rank x in CKE", "Equation": "POWER_CKE_CYCLES.RANKx / MC_Chy_PCI_PMON_CTR_FIXED", }, "iMC.PCT_CYCLES_DRAM_RANKx_IN_THR": { "Box": "iMC", "Category": "iMC POWER Events", "Defn": "The percentage of cycles DRAM rank (x) spent in thermal throttling.", "Desc": "Percent Cycles DRAM Rank x in CKE", "Equation": "POWER_THROTTLE_CYCLES.RANKx / MC_Chy_PCI_PMON_CTR_FIXED", }, "iMC.PCT_CYCLES_PPD": { "Box": "iMC", "Category": "iMC POWER Events", "Defn": "The percentage of cycles all DRAM ranks in PPD mode", "Desc": "Percent Cycles PPD", "Equation": "POWER_CHANNEL_PPD / MC_Chy_PCI_PMON_CTR_FIXED", }, "iMC.PCT_CYCLES_SELF_REFRESH": { "Box": "iMC", "Category": "iMC POWER Events", "Defn": "The percentage of cycles Memory is in self refresh power mode", "Desc": "Percent Cycles Self Refresh", "Equation": "POWER_SELF_REFRESH / MC_Chy_PCI_PMON_CTR_FIXED", }, "iMC.PCT_RD_REQUESTS": { "Box": "iMC", "Category": "iMC RPQ Events", "Defn": "Percentage of read requests from total requests.", "Desc": "Percent Read Requests", "Equation": "RPQ_INSERTS / (RPQ_INSERTS + WPQ_INSERTS)", }, "iMC.PCT_REQUESTS_PAGE_EMPTY": { "Box": "iMC", "Category": "iMC ACT Events", "Defn": "Percentage of memory requests that resulted in Page Empty", "Desc": "Percent Requests Page Empty", "Equation": "(ACT_COUNT - PRE_COUNT.PAGE_MISS)/ (CAS_COUNT.RD + CAS_COUNT.WR)", }, "iMC.PCT_REQUESTS_PAGE_HIT": { "Box": "iMC", "Category": "iMC ACT Events", "Defn": "Percentage of memory requests that resulted in Page Hits", "Desc": "Percent Requests Page Hit", "Equation": "1 - (PCT_REQUESTS_PAGE_EMPTY + PCT_REQUESTS_PAGE_MISS)", }, "iMC.PCT_REQUESTS_PAGE_MISS": { "Box": "iMC", "Category": "iMC PRE Events", "Defn": "Percentage of memory requests that resulted in Page Misses", "Desc": "Percent Requests Page Miss", "Equation": "PRE_COUNT.PAGE_MISS / (CAS_COUNT.RD + CAS_COUNT.WR)", }, "iMC.PCT_WR_REQUESTS": { "Box": "iMC", "Category": "iMC WPQ Events", "Defn": "Percentage of write requests from total requests.", "Desc": "Percent Write Requests", "Equation": "WPQ_INSERTS / (RPQ_INSERTS + WPQ_INSERTS)", }, # rxl_hdr: "rxl_hdr.AVG_DEMAND_RD_MISS_REMOTE_LATENCY": { "Box": "rxl_hdr", "Category": "rxl_hdr TOR Events", "Defn": "Average Latency of Data Reads from an iA Core that miss the LLC and were satsified by a Remote cache or Remote Memory", "Desc": "Average Data Read Remote Miss Latency", "Equation": "(TOR_OCCUPANCY.IA_MISS / TOR_INSERTS.IA_MISS) with:Cn_MSR_PMON_BOX_FILTER1.{opc0,not_nm,nm,all_opc,loc,rem}={0x202,1,1,0,0,1}", "Filter": "(CHAFilter1[28:19] | CHAFilter1[18:9]), CHAFilter1[1:0]", }, # UPI_LL: "DRS_E_FROM_UPI": { "Box": "UPI_LL", "Category": "UPI_LL CTO Events", "Defn": "DRS response in F or E states received from UPI in bytes. To calculate the total data response for each cache line state, it's necessary to add the contribution from three flavors {DataC, DataC_FrcAckCnflt, DataC_Cmp} of data response packets for each cache line state.", "Desc": "DRS Data in F or E From UPI", "Equation": "RxL_BASIC_HDR_MATCH with:{umask=0x1c,opc=1} * 64", }, "UPI_LL.DRS_M_FROM_UPI": { "Box": "UPI_LL", "Category": "UPI_LL CTO Events", "Defn": "Data Response DataM packets received from UPI. Expressed in bytes", "Desc": "DRS Data_Ms From UPI", "Equation": "RxL_BASIC_HDR_MATCH with:{umask=0x0C,opc=1} * 64", }, "UPI_LL.DRS_WB_FROM_UPI": { "Box": "UPI_LL", "Category": "UPI_LL CTO Events", "Defn": "DRS writeback packets received from UPI in bytes. This is the sum of Wb{I,S,E} DRS packets", "Desc": "DRS Writeback From UPI", "Equation": "DRS_WbI_FROM_UPI + DRS_WbS_FROM_UPI + DRS_WbE_FROM_UPI", }, "UPI_LL.DRS_WbE_FROM_UPI": { "Box": "UPI_LL", "Category": "UPI_LL CTO Events", "Defn": "DRS writeback 'change M to E state' packets received from UPI in bytes", "Desc": "DRS WbE From UPI", "Equation": "RxL_BASIC_HDR_MATCH with:{umask=0x2d,opc=1} *64", }, "UPI_LL.DRS_WbI_FROM_UPI": { "Box": "UPI_LL", "Category": "UPI_LL CTO Events", "Defn": "DRS writeback 'change M to I state' packets received from UPI in bytes", "Desc": "DRS WbI From UPI", "Equation": "RxL_BASIC_HDR_MATCH with:{umask=0x0D,opc=1} *64", }, "UPI_LL.DRS_WbS_FROM_UPI": { "Box": "UPI_LL", "Category": "UPI_LL CTO Events", "Defn": "DRS writeback 'change M to S state' packets received from UPI in bytes", "Desc": "DRS WbSFrom UPI", "Equation": "RxL_BASIC_HDR_MATCH with:{umask=0x1D,opc=1} *64", }, "UPI_LL.NCB_DATA_FROM_UPI_TO_NODEx": { "Box": "UPI_LL", "Category": "UPI_LL CTO Events", "Defn": "NCB Data packets (Any - Interrupts) received from UPI sent to Node ID 'x'. Expressed in bytes", "Desc": "NCB Data From UPI To Node x", "Equation": "RxL_BASIC_HDR_MATCH with:{umask=0xE,endnid=1,dnid=x} * 64", }, "UPI_LL.PCT_LINK_CRC_RETRY_CYCLES": { "Box": "UPI_LL", "Category": "UPI_LL CRC_ERRORS_RX Events", "Defn": "Percent of Cycles the UPI link layer is in retry mode due to CRC errors", "Desc": "Percent Link CRC Retry Cycles", "Equation": "RxL_CRC_CYCLES_IN_LLR / CLOCKTICKS", }, "UPI_LL.PCT_LINK_FULL_POWER_CYCLES": { "Box": "UPI_LL", "Category": "UPI_LL POWER_RX Events", "Defn": "Percent of Cycles the UPI link is at Full Power", "Desc": "Percent Link Full Power Cycles", "Equation": "RxL0_POWER_CYCLES / CLOCKTICKS", }, "UPI_LL.PCT_LINK_HALF_DISABLED_CYCLES": { "Box": "UPI_LL", "Category": "UPI_LL POWER_RX Events", "Defn": "Percent of Cycles the UPI link in power mode where 60% of the lanes are disabled.", "Desc": "Percent Link Half Disabled Cycles", "Equation": "RxL0P_POWER_CYCLES / CLOCKTICKS", }, "UPI_LL.PCT_LINK_SHUTDOWN_CYCLES": { "Box": "UPI_LL", "Category": "UPI_LL POWER Events", "Defn": "Percent of Cycles the UPI link is Shutdown", "Desc": "Percent Link Shutdown Cycles", "Equation": "L1_POWER_CYCLES / CLOCKTICKS", }, # CHA: "CHA.AVG_CRD_MISS_LATENCY": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "Average Latency of Code Reads from an iA Core that miss the LLC", "Desc": "Average Code Read Latency", "Equation": "(TOR_OCCUPANCY.IA_MISS / TOR_INSERTS.IA_MISS) with:Cn_MSR_PMON_BOX_FILTER1.{opc1,opc0,not_nm,nm,all_opc,loc,rem}={0x259,0x201,1,1,0,1,1}", "Filter": "(CHAFilter1[28:19] | CHAFilter1[18:9])", }, "CHA.AVG_DEMAND_RD_HIT_LATENCY": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "Average Latency of Data Reads that hit the LLC", "Desc": "Average Data Read Hit Latency", "Equation": "TOR_OCCUPANCY.ALL_HIT / (TOR_INSERTS.ALL_HIT with:Cn_MSR_PMON_BOX_FILTER1.{opc0,not_nm,nm,all_opc,loc,rem}={0x202,1,1,0,1,1})", "Filter": "(CHAFilter1[28:19] | CHAFilter1[18:9])", }, "CHA.AVG_DEMAND_RD_MISS_LOCAL_LATENCY": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "Average Latency of Data Reads from an IA Core that miss the LLC and were satsified by Local Memory", "Desc": "Average Data Read Local Miss Latency", "Equation": "(TOR_OCCUPANCY.IA_MISS / TOR_INSERTS.IA_MISS) with:Cn_MSR_PMON_BOX_FILTER1.{opc0,not_nm,nm,all_opc,loc,rem}={0x202,1,1,0,1,0}", "Filter": "(CHAFilter1[28:19] | CHAFilter1[18:9]), CHAFilter1[1:0]", }, "CHA.AVG_DRD_MISS_LATENCY": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "Average Latency of Data Reads or Data Read Prefetches from an IA Core that miss the LLC", "Desc": "Average Data Read Miss Latency", "Equation": "(TOR_OCCUPANCY.IA_MISS / TOR_INSERTS.IA_MISS) with:Cn_MSR_PMON_BOX_FILTER1.{opc1, opc0,not_nm,nm,all_opc,loc,rem}={0x25A,0x202,1,1,0,1,1}", "Filter": "(CHAFilter1[28:19] | CHAFilter1[18:9])", }, "CHA.AVG_IA_CRD_LLC_HIT_LATENCY": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "Average Latency of Code Reads from an iA Core that miss the LLC", "Desc": "Average Code Read Latency", "Equation": "(TOR_OCCUPANCY.IA_HIT / TOR_INSERTS.IA_HIT) with:Cn_MSR_PMON_BOX_FILTER1.{opc0,not_nm,nm,all_opc,loc,rem}={0x201,1,1,0,1,1}", "Filter": "(CHAFilter1[28:19] | CHAFilter1[18:9])", }, "CHA.AVG_INGRESS_DEPTH": { "Box": "CHA", "Category": "CHA INGRESS Events", "Defn": "Average Depth of the Ingress Queue through the sample interval", "Desc": "Average Ingress (from CMS) Depth", "Equation": "RxC_OCCUPANCY.IRQ / SAMPLE_INTERVAL", }, "CHA.AVG_INGRESS_LATENCY": { "Box": "CHA", "Category": "CHA INGRESS Events", "Defn": "Average Latency of Requests through the Ingress Queue in Uncore Clocks", "Desc": "Average Ingress (from CMS) Latency", "Equation": "RxC_OCCUPANCY.IRQ / RxC_INSERTS.IRQ", }, "CHA.AVG_INGRESS_LATENCY_WHEN_NE": { "Box": "CHA", "Category": "CHA INGRESS Events", "Defn": "Average Latency of Requests through the Ingress Queue in Uncore Clocks when Ingress Queue has at least one entry", "Desc": "Average Latency in Non-Empty Ingress (from CMS)", "Equation": "RxC_OCCUPANCY.IRQ / COUNTER0_OCCUPANCY{edge_det,thresh=0x1}", }, "CHA.AVG_RFO_MISS_LATENCY": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "Average Latency of RFOs from an iA Core that miss the LLC", "Desc": "Average RFO Latency", "Equation": "(TOR_OCCUPANCY.IA_MISS / TOR_INSERTS.IA_MISS) with:Cn_MSR_PMON_BOX_FILTER1.{opc0,not_nm,nm,all_opc,loc,rem}={0x200,1,1,0,1,1}", "Filter": "(CHAFilter1[28:19] | CHAFilter1[18:9])", }, "CHA.AVG_TOR_DRDS_MISS_WHEN_NE": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "Average Number of Data Read Entries that Miss the LLC when the TOR is not empty.", "Desc": "Average Data Read Misses in Non-Empty TOR", "Equation": "(TOR_OCCUPANCY.ALL_MISS / COUNTER0_OCCUPANCY{edge_det,thresh=0x1}) with:Cn_MSR_PMON_BOX_FILTER1.{opc0,not_nm,nm,all_opc,loc,rem}={0x202,1,1,0,1,1}", "Filter": "(CHAFilter1[28:19] | CHAFilter1[18:9])", }, "CHA.AVG_TOR_DRDS_WHEN_NE": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "Average Number of Data Read Entries when the TOR is not empty.", "Desc": "Average Data Reads in Non-Empty TOR", "Equation": "(TOR_OCCUPANCY.ALL / COUNTER0_OCCUPANCY{edge_det,thresh=0x1}) with:Cn_MSR_PMON_BOX_FILTER1.{opc0,not_nm,nm,all_opc,loc,rem}={0x202,1,1,0,1,1}", "Filter": "(CHAFilter1[28:19] | CHAFilter1[18:9])", }, "CHA.CYC_INGRESS_BLOCKED": { "Box": "CHA", "Category": "CHA INGRESS Events", "Defn": "Cycles the Ingress Request Queue arbiter was Blocked", "Desc": "Cycles Ingress (from CMS) Blocked", "Equation": "RxC_EXT_STARVED.IRQ / SAMPLE_INTERVAL", }, "CHA.FAST_STR_LLC_HIT": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "Number of ItoM (fast string) operations that reference the LLC", "Desc": "Fast String operations", "Equation": "TOR_INSERTS.IA_HIT with:Cn_MSR_PMON_BOX_FILTER1.{opc0,not_nm,nm,all_opc,loc,rem}={0x248,1,1,0,1,1}", "Filter": "(CHAFilter1[28:19] | CHAFilter1[18:9])", }, "CHA.FAST_STR_LLC_MISS": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "Number of ItoM (fast string) operations that miss the LLC", "Desc": "Fast String misses", "Equation": "TOR_INSERTS.IA_MISS with:Cn_MSR_PMON_BOX_FILTER1.{opc0,not_nm,nm,all_opc,loc,rem}={0x248,1,1,0,1,1}", "Filter": "(CHAFilter1[28:19] | CHAFilter1[18:9])", }, "CHA.INGRESS_REJ_V_INS": { "Box": "CHA", "Category": "CHA INGRESS Events", "Defn": "Ratio of Ingress Request Entries that were rejected vs. inserted", "Desc": "Ingress (from CMS) Rejects vs. Inserts", "Equation": "RxC_INSERTS.IRQ_REJECTED / RxC_INSERTS.IRQ", }, "CHA.LLC_CRD_MISS_TO_LOC_MEM": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "LLC Code Read and Code Prefetch misses satisfied by local memory.", "Desc": "LLC Code Read Misses to Local Memory", "Equation": "TOR_INSERTS.IA_MISS with:Cn_MSR_PMON_BOX_FILTER1.{opc1,opc0,not_nm,nm,all_opc,loc,rem}={0x259,0x201,1,1,0,1,0}", "Filter": "(CHAFilter1[28:19] | CHAFilter1[18:9]), CHAFilter1[1:0]", }, "CHA.LLC_CRD_MISS_TO_REM_MEM": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "LLC Code Read and Code Read Prefetch misses satisfied by a remote cache or remote memory.", "Desc": "LLC Code Read Misses to Remote Memory", "Equation": "TOR_INSERTS.IA_MISS with:Cn_MSR_PMON_BOX_FILTER1.{opc1,opc0,not_nm,nm,all_opc,loc,rem}={0x259,0x201,1,1,0,0,1}", "Filter": "(CHAFilter1[28:19] | CHAFilter1[18:9]), CHAFilter1[1:0]", }, "CHA.LLC_DRD_MISS_PCT": { "Box": "CHA", "Category": "CHA CACHE Events", "Desc": "LLC DRd Miss Percentage", "Equation": "LLC_LOOKUP.DATA_READ with:Cn_MSR_PMON_BOX_FILTER0.state=0x1 / LLC_LOOKUP.DATA_READ with:Cn_MSR_PMON_BOX_FILTER0.state=0xF1", "Filter": "CHAFilter0[26:17]", }, "CHA.LLC_DRD_MISS_TO_LOC_MEM": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "LLC Data Read and Data Prefetch misses satisfied by local memory.", "Desc": "LLC Data Read Misses to Local Memory", "Equation": "TOR_INSERTS.IA_MISS with:Cn_MSR_PMON_BOX_FILTER1.{opc1,opc0,not_nm,nm,all_opc,loc,rem}={0x25A,0x202,1,1,0,1,0}", "Filter": "(CHAFilter1[28:19] | CHAFilter1[18:9]), CHAFilter1[1:0]", }, "CHA.LLC_DRD_MISS_TO_REM_MEM": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "LLC Data Read and Data Prefetch misses satisfied by a remote cache or remote memory.", "Desc": "LLC Data Read Misses to Remote Memory", "Equation": "TOR_INSERTS.IA_MISS with:Cn_MSR_PMON_BOX_FILTER1.{opc1,opc0,not_nm,nm,all_opc,loc,rem}={0x25A,0x202,1,1,0,0,1}", "Filter": "(CHAFilter1[28:19] | CHAFilter1[18:9]), CHAFilter1[1:0]", }, "CHA.LLC_DRD_PREFETCH_HITS": { "Box": "CHA", "Category": "CHA TOR Events", "Desc": "DRd Prefetches that Hit the LLC", "Equation": "TOR_INSERTS.IA_HIT with:Cn_MSR_PMON_BOX_FILTER1.{opc0,not_nm,nm,all_opc,loc,rem}={0x25A,1,1,0,1,1}", "Filter": "(CHAFilter1[28:19] | CHAFilter1[18:9]), CHAFilter1[1:0]", }, "CHA.LLC_DRD_PREFETCH_MISSES": { "Box": "CHA", "Category": "CHA TOR Events", "Desc": "DRd Prefetches that Missed the LLC", "Equation": "TOR_INSERTS.IA_MISS with:Cn_MSR_PMON_BOX_FILTER1.{opc0,not_nm,nm,all_opc,loc,rem}={0x25A,1,1,0,1,1}", "Filter": "(CHAFilter1[28:19] | CHAFilter1[18:9]), CHAFilter1[1:0]", }, "CHA.LLC_IA_CRD_HITS": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "LLC Code Read and Code Prefetch misses satisfied by local memory.", "Desc": "LLC Code Read Misses to Local Memory", "Equation": "TOR_INSERTS.IA_HIT with:Cn_MSR_PMON_BOX_FILTER1.{opc0,not_nm,nm,all_opc,loc,rem}={0x201,1,1,0,1,0}", "Filter": "(CHAFilter1[28:19] | CHAFilter1[18:9]), CHAFilter1[1:0]", }, "CHA.LLC_PCIE_DATA_BYTES": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "LLC write miss (disk/network reads) bandwidth in MB", "Desc": "LLC Miss Data from PCIe", "Equation": "TOR_INSERTS.IO with:Cn_MSR_PMON_BOX_FILTER1.{opc0,not_nm,nm,all_opc,loc,rem}={0x248,1,1,0,1,1} * 64", "Filter": "CHAFilter0[8:0], (CHAFilter1[28:19] | CHAFilter1[18:9])", }, "CHA.LLC_RFO_MISS_PCT": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "LLC RFO Miss Ratio", "Desc": "LLC RFO Miss Ratio", "Equation": "(TOR_INSERTS.ALL_MISS / TOR_INSERTS.ALL) with:Cn_MSR_PMON_BOX_FILTER1.{opc0,not_nm,nm,all_opc,loc,rem}={0x200,1,1,0,1,1}", "Filter": "(CHAFilter1[28:19] | CHAFilter1[18:9])", }, "CHA.LLC_RFO_MISS_TO_LOC_MEM": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "LLC RFO and RFO Prefetch misses satisfied by local memory.", "Desc": "LLC RFO Misses to Local Memory", "Equation": "TOR_INSERTS.IA_MISS with:Cn_MSR_PMON_BOX_FILTER1.{opc1,opc0,not_nm,nm,all_opc,loc,rem}={0x258,0x200,1,1,0,1,0}", "Filter": "(CHAFilter1[28:19] | CHAFilter1[18:9]), CHAFilter1[1:0]", }, "CHA.LLC_RFO_MISS_TO_REM_MEM": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "LLC RFO and RFO Prefetch misses satisfied by a remote cache or remote memory.", "Desc": "LLC RFO Misses to Remote Memory", "Equation": "TOR_INSERTS.IA_MISS with:Cn_MSR_PMON_BOX_FILTER1.{opc1,opc0,not_nm,nm,all_opc,loc,rem}={0x258,0x200,1,1,0,0,1}", "Filter": "(CHAFilter1[28:19] | CHAFilter1[18:9]), CHAFilter1[1:0]", }, "CHA.LLC_RFO_PREFETCH_HITS": { "Box": "CHA", "Category": "CHA TOR Events", "Desc": "RFO Prefetches that Hit the LLC", "Equation": "TOR_INSERTS.IA_HIT with:Cn_MSR_PMON_BOX_FILTER1.{opc0,not_nm,nm,all_opc,loc,rem}={0x258,1,1,0,1,1}", "Filter": "(CHAFilter1[28:19] | CHAFilter1[18:9]), CHAFilter1[1:0]", }, "CHA.LLC_RFO_PREFETCH_MISSES": { "Box": "CHA", "Category": "CHA TOR Events", "Desc": "RFO Prefetches that Missed the LLC", "Equation": "TOR_INSERTS.IA_MISS with:Cn_MSR_PMON_BOX_FILTER1.{opc0,not_nm,nm,all_opc,loc,rem}={0x258,1,1,0,1,1}", "Filter": "(CHAFilter1[28:19] | CHAFilter1[18:9]), CHAFilter1[1:0]", }, "CHA.MMIO_READ_BW": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "IO Read Bandwidth in MB - Disk or Network Reads", "Desc": "IO Read Bandwidth", "Equation": "(TOR_INSERTS.IA_MISS with:Cn_MSR_PMON_BOX_FILTER1.{nc,opc0,not_nm,nm,all_opc,loc,rem}={1,0x20E,1,1,0,1,1}) * 64 / 1000000", "Filter": "CHAFilter0[8:0], (CHAFilter1[28:19] | CHAFilter1[18:9])", }, "CHA.MMIO_WRITE_BW": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "IO Write Bandwidth in MB - Disk or Network Writes", "Desc": "IO Write Bandwidth", "Equation": "(TOR_INSERTS.IA_MISS with:Cn_MSR_PMON_BOX_FILTER1.{nc,opc0,not_nm,nm,all_opc,loc,rem}={1,0x20F,1,1,0,1,1}) * 64 / 1000000", "Filter": "(CHAFilter1[28:19] | CHAFilter1[18:9])", }, "CHA.PCIE_FULL_WRITES": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "Number of full PCI writes", "Desc": "PCIe Data Traffic", "Equation": "TOR_INSERTS.IO with:Cn_MSR_PMON_BOX_FILTER1.{opc0,not_nm,nm,all_opc,loc,rem}={0x248,1,1,0,1,1}", "Filter": "CHAFilter0[8:0], (CHAFilter1[28:19] | CHAFilter1[18:9])", }, "CHA.PCI_PARTIAL_WRITES": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "Number of partial PCI writes", "Desc": "Partial PCI Writes", "Equation": "TOR_INSERTS.IO with:Cn_MSR_PMON_BOX_FILTER1.{opc0,not_nm,nm,all_opc,loc,rem}={0x200,1,1,0,1,1}", "Filter": "(CHAFilter1[28:19] | CHAFilter1[18:9])", }, "CHA.PCI_READS": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "Number of PCI reads", "Desc": "Partial PCI Reads", "Equation": "TOR_INSERTS.IO with:Cn_MSR_PMON_BOX_FILTER1.{opc0,not_nm,nm,all_opc,loc,rem}={0x21E,1,1,0,1,1}", "Filter": "CHAFilter0[8:0], (CHAFilter1[28:19] | CHAFilter1[18:9])", }, "CHA.PCT_RD_REQUESTS": { "Box": "CHA", "Category": "CHA HA REQUEST Events", "Defn": "Percentage of HA traffic that is from Read Requests", "Desc": "Percent Read Requests", "Equation": "REQUESTS.READS / (REQUESTS.READS + REQUESTS.WRITES)", }, "CHA.PCT_WR_REQUESTS": { "Box": "CHA", "Category": "CHA HA REQUEST Events", "Defn": "Percentage of HA traffic that is from Write Requests", "Desc": "Percent Write Requests", "Equation": "REQUESTS.WRITES / (REQUESTS.READS + REQUESTS.WRITES)", }, "CHA.STREAMED_FULL_STORES": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "Number of Streamed Store (of Full Cache Line) Transactions", "Desc": "Streaming Stores (Full Line)", "Equation": "TOR_INSERTS.IA with:Cn_MSR_PMON_BOX_FILTER1.{opc0,not_nm,nm,all_opc,loc,rem}={0x20C,1,1,0,1,1}", "Filter": "(CHAFilter1[28:19] | CHAFilter1[18:9])", }, "CHA.STREAMED_PART_STORES": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "Number of Streamed Store (of Partial Cache Line) Transactions", "Desc": "Streaming Stores (Partial Line)", "Equation": "TOR_INSERTS.IA with:Cn_MSR_PMON_BOX_FILTER1.{opc0,not_nm,nm,all_opc,loc,rem}={0x20D,1,1,0,1,1}", "Filter": "(CHAFilter1[28:19] | CHAFilter1[18:9])", }, "CHA.UC_READS": { "Box": "CHA", "Category": "CHA TOR Events", "Defn": "Uncachable Read Transactions", "Desc": "Uncacheable Reads", "Equation": "TOR_INSERTS.IA_MISS with:Cn_MSR_PMON_BOX_FILTER1.{opc0,not_nm,nm,all_opc,loc,rem}={0x207,1,1,0,1,1}", "Filter": "(CHAFilter1[28:19] | CHAFilter1[18:9])", }, # PCU: "PCU.PCT_CYC_FREQ_CURRENT_LTD": { "Box": "PCU", "Category": "PCU FREQ_MAX_LIMIT Events", "Defn": "Percentage of Cycles the Max Frequency is limited by current", "Desc": "Percent Frequency Current Limited", "Equation": "FREQ_MAX_CURRENT_CYCLES / CLOCKTICKS", }, "PCU.PCT_CYC_FREQ_POWER_LTD": { "Box": "PCU", "Category": "PCU FREQ_MAX_LIMIT Events", "Defn": "Percentage of Cycles the Max Frequency is limited by power", "Desc": "Percent Frequency Power Limited", "Equation": "FREQ_MAX_POWER_CYCLES / CLOCKTICKS", }, "PCU.PCT_CYC_FREQ_THERMAL_LTD": { "Box": "PCU", "Category": "PCU FREQ_MAX_LIMIT Events", "Defn": "Percentage of Cycles the Max Frequency is limited by thermal issues", "Desc": "Percent Frequency Thermal Limited", "Equation": "FREQ_MAX_CURRENT_CYCLES / CLOCKTICKS", }, "PCU.s": { "Box": "PCU", "Category": "PCU FREQ_MAX_LIMIT Events", "Defn": "Percentage of Cycles the Max Frequency is limited by the OS", "Desc": "Percent Frequency OS Limited", "Equation": "FREQ_MAX_OS_CYCLES / CLOCKTICKS", }, } categories = ( "CHA CACHE Events", "CHA CMS Horizontal EGRESS Events", "CHA CMS Transgress Credit Events", "CHA CMS Transgress INGRESS Events", "CHA CMS Vertical EGRESS Events", "CHA HA BYPASS Events", "CHA HA DIRECTORY Events", "CHA HA HitME Events", "CHA HA HitME Pipe Events", "CHA HA IODC Events", "CHA HA OSB Events", "CHA HA READ WRITE Events", "CHA HA REQUEST Events", "CHA HA SNOOP RESPONSE Events", "CHA HA WBPUSHMTOI Events", "CHA Horizontal RING Events", "CHA Horizontal In Use RING Events", "CHA INGRESS Events", "CHA INGRESS_RETRY Events", "CHA ISMQ Events", "CHA MISC Events", "CHA Misc Events", "CHA OCCUPANCY Events", "CHA TOR Events", "CHA UCLK Events", "CHA Vertical In Use RING Events", "CHA Vertical RING Events", "IIO CLOCK Events", "IIO Debug Events", "IIO Link Events", "IIO Miscellaneous Events", "IIO PCIe Completion Buffer Events", "IIO Payload Events", "IIO Transaction Events", "IIO VTd Events", "IRP AK Egress Events", "IRP BL Egress Events", "IRP CLOCK Events", "IRP Coherency Events", "IRP FAF Events", "IRP IRP Buffer Events", "IRP MISC Events", "IRP OUTBOUND_REQUESTS Events", "IRP P2P Events", "IRP STALL_CYCLES Events", "IRP TRANSACTIONS Events", "IRP WRITE_CACHE Events", "M2M AD CMS/Mesh Egress Credit Events", "M2M AD Egress Events", "M2M AD Ingress Events", "M2M AK CMS/Mesh Egress Credit Events", "M2M AK Egress Events", "M2M BL CMS/Mesh Egress Credit Events", "M2M BL Egress Events", "M2M BL Ingress Events", "M2M CAM Prefetch Events", "M2M CMS Horizontal EGRESS Events", "M2M CMS Transgress Credit Events", "M2M CMS Transgress INGRESS Events", "M2M CMS Vertical EGRESS Events", "M2M DIRECT2CORE Events", "M2M DIRECT2UPI Events", "M2M DIRECTORY Events", "M2M Directory State Events", "M2M Horizontal RING Events", "M2M Horizontal In Use RING Events", "M2M IMC Events", "M2M Misc Events", "M2M OUTBOUND_TX Events", "M2M PACKET MATCH Events", "M2M RPQ CREDIT Events", "M2M TRACKER Events", "M2M Transgress/M2MIngress Credit Events", "M2M UCLK Events", "M2M Vertical In Use RING Events", "M2M Vertical RING Events", "M2M WPQ_CREDITS Events", "M2PCIe CMS Horizontal EGRESS Events", "M2PCIe CMS Transgress Credit Events", "M2PCIe CMS Transgress INGRESS Events", "M2PCIe CMS Vertical EGRESS Events", "M2PCIe EGRESS Events", "M2PCIe Horizontal RING Events", "M2PCIe Horizontal In Use RING Events", "M2PCIe IIO_CREDITS Events", "M2PCIe INGRESS Events", "M2PCIe Misc Events", "M2PCIe UCLK Events", "M2PCIe Vertical In Use RING Events", "M2PCIe Vertical RING Events", "M3UPI ARB Events", "M3UPI CMS Horizontal EGRESS Events", "M3UPI CMS Transgress Credit Events", "M3UPI CMS Transgress INGRESS Events", "M3UPI CMS Vertical EGRESS Events", "M3UPI EGRESS Credit Events", "M3UPI FlowQ Events", "M3UPI Horizontal RING Events", "M3UPI Horizontal In Use RING Events", "M3UPI INGRESS Arbitration Events", "M3UPI INGRESS Credit Events", "M3UPI INGRESS Events", "M3UPI INGRESS Flit Events", "M3UPI INGRESS Sloting Events", "M3UPI Link VN Credit Events", "M3UPI Misc Events", "M3UPI Special Egress Events", "M3UPI UCLK Events", "M3UPI Vertical In Use RING Events", "M3UPI Vertical RING Events", "PCU CORE_C_STATE_TRANSITION Events", "PCU FIVR Events", "PCU FREQ_MAX_LIMIT Events", "PCU FREQ_MIN_LIMIT Events", "PCU FREQ_TRANS Events", "PCU MEMORY_PHASE_SHEDDING Events", "PCU Misc Events", "PCU PKG_C_STATE_RESIDENCY Events", "PCU PROCHOT Events", "PCU VR_HOT Events", "UBOX EVENT_MSG Events", "UBOX LOCK Events", "UBOX PHOLD Events", "UBOX RACU Events", "UPI_LL CFCLK Events", "UPI_LL CRC_ERRORS_RX Events", "UPI_LL CTO Events", "UPI_LL DIRECT2CORE Events", "UPI_LL FLIT match Events", "UPI_LL Flit Events", "UPI_LL LL to M3 Events", "UPI_LL POWER Events", "UPI_LL POWER_RX Events", "UPI_LL POWER_TX Events", "UPI_LL RXQ Events", "UPI_LL RX_CREDITS_CONSUMED Events", "UPI_LL TXQ Events", "UPI_LL VNA_CREDIT_RETURN Events", "iMC ACT Events", "iMC BYPASS Command Events", "iMC CAS Events", "iMC DCLK Events", "iMC DRAM_PRE_ALL Events", "iMC DRAM_REFRESH Events", "iMC ECC Events", "iMC MAJOR_MODES Events", "iMC POWER Events", "iMC PRE Events", "iMC PREEMPTION Events", "iMC RPQ Events", "iMC WPQ Events", "rxl_hdr TOR Events", );
1,242,861
Python
.py
26,676
36.707115
843
0.568509
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,964
bdx_extra.py
andikleen_pmu-tools/ucevent/bdx_extra.py
extra_derived = { # CBO "CBO.LLC_PCIE_MEM_WRITE_BYTES": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "LLC Miss Data from PCIe to memory written", "Desc": "LLC Miss Data from PCIe to memory written", "Equation": "TOR_INSERTS.OPCODE with:Cn_MSR_PMON_BOX_FILTER.opc=0x19e * 64", }, "CBO.LLC_PCIE_MEM_READ_BYTES": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "LLC Miss Data from PCIe read from memory", "Desc": "LLC Miss Data from PCIe read from memory", "Equation": "TOR_INSERTS.OPCODE with:Cn_MSR_PMON_BOX_FILTER.opc=0x19c * 64", }, "CBO.LLC_PCIE_MEM_TOTAL_BYTES": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "LLC Miss Data from PCIe read from memory", "Desc": "LLC Miss Data from PCIe read from memory", "Equation": "LLC_PCIE_MEM_READ_BYTES + LLC_PCIE_MEM_WRITE_BYTES" }, "CBO.LLC_DDIO_MEM_WRITE_BYTES": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "LLC Miss Data from DDIO to memory written", "Desc": "LLC Miss Data from DDIO to memory written", "Equation": "TOR_INSERTS.MISS_OPCODE with:Cn_MSR_PMON_BOX_FILTER.opc=0x19e * 64", }, "CBO.LLC_DDIO_MEM_READ_BYTES": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "LLC Miss Data from PCIe read from memory", "Desc": "LLC Miss Data from PCIe read from memory", "Equation": "TOR_INSERTS.MISS_OPCODE with:Cn_MSR_PMON_BOX_FILTER.opc=0x19c * 64", }, "CBO.LLC_DDIO_MEM_TOTAL_BYTES": { "Box": "CBO", "Category": "CBO TOR Events", "Defn": "LLC Miss Data from DDIO read from memory", "Desc": "LLC Miss Data from DDIO read from memory", "Equation": "LLC_DDIO_MEM_READ_BYTES + LLC_DDIO_MEM_WRITE_BYTES" }, "CBO.AVG_LLC_DATA_READ_MISS_LATENCY": { "Box": "CBO", "Category": "CBO CACHE Events", "Defn": "Average LLC data read (demand+prefetch) miss latency (core clocks)", "Desc": "Average LLC data read (demand+prefetch) miss latency (core clocks)", "Equation": "(TOR_OCCUPANCY.MISS_OPCODE / TOR_INSERTS.MISS_OPCODE) with:Cn_MSR_PMON_BOX_FILTER.opc=0x182" }, # PCU "PCU.PCT_FREQ_BAND0": { "Box": "PCU", "Category": "PCU FREQ_RESIDENCY Events", "Defn": "Counts the percent that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.", "Desc": "Frequency Residency", "Notes": "The PMON control registers in the PCU only update on a frequency transition. Changing the measuring threshold during a sample interval may introduce errors in the counts. This is especially true when running at a constant frequency for an extended period of time. There is a corner case here: we set this code on the GV transition. So, if we never GV we will never call this code. This event does not include transition times. It is handled on fast path.", "Equation": "FREQ_BAND0_CYCLES / CLOCKTICKS" }, "PCU.PCT_FREQ_BAND1": { "Box": "PCU", "Category": "PCU FREQ_RESIDENCY Events", "Defn": "Counts the percent that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.", "Desc": "Frequency Residency", "Notes": "The PMON control registers in the PCU only update on a frequency transition. Changing the measuring threshold during a sample interval may introduce errors in the counts. This is especially true when running at a constant frequency for an extended period of time. There is a corner case here: we set this code on the GV transition. So, if we never GV we will never call this code. This event does not include transition times. It is handled on fast path.", "Equation": "FREQ_BAND1_CYCLES / CLOCKTICKS" }, "PCU.PCT_FREQ_BAND2": { "Box": "PCU", "Category": "PCU FREQ_RESIDENCY Events", "Defn": "Counts the percent that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.", "Desc": "Frequency Residency", "Notes": "The PMON control registers in the PCU only update on a frequency transition. Changing the measuring threshold during a sample interval may introduce errors in the counts. This is especially true when running at a constant frequency for an extended period of time. There is a corner case here: we set this code on the GV transition. So, if we never GV we will never call this code. This event does not include transition times. It is handled on fast path.", "Equation": "FREQ_BAND2_CYCLES / CLOCKTICKS" }, "PCU.PCT_FREQ_BAND3": { "Box": "PCU", "Category": "PCU FREQ_RESIDENCY Events", "Defn": "Counts the percent that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.", "Desc": "Frequency Residency", "Notes": "The PMON control registers in the PCU only update on a frequency transition. Changing the measuring threshold during a sample interval may introduce errors in the counts. This is especially true when running at a constant frequency for an extended period of time. There is a corner case here: we set this code on the GV transition. So, if we never GV we will never call this code. This event does not include transition times. It is handled on fast path.", "Equation": "FREQ_BAND3_CYCLES / CLOCKTICKS" }, "QPI_LL.QPI_SPEED": { "Box": "QPI_LL", "Category": "QPI_LL CFCLK Events", "Counters": "0-3", "Defn": "QPI speed - GT/s", "Desc": "QPI speed - GT/s", "Equation": "CLOCKTICKS/NUM_R3QPI*8/1000000000", }, "iMC.DIMM_SPEED": { "Box": "iMC", "Category": "iMC CAS Events", "Defn": "DIMM Speed", "Desc": "DIMM Speed", "Equation": "MC_Chy_PCI_PMON_CTR_FIXED / 2", "Obscure": 1, }, }
7,755
Python
.py
101
67.455446
526
0.668279
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,965
ucaux.py
andikleen_pmu-tools/ucevent/ucaux.py
class Aux: limited_counters = { "r3qpi": 3, "ubox": 2 } filters = ("filter_nid", "mask", "match", "filter_opc", "filter_state") DEFAULT_COUNTERS = 4 MAX_RANK = 8 acronyms = { "TOR": "Table of Requests, pending transactions", "FLIT": "80-bit QPI packet", "RPQ": "Read Queue", "WPQ": "Write Queue", "CBO": "Last Level Cache Slice", "PCU": "Power Control Unit", "iMC": "Memory Controller", "HA": "Home Agent", "QPI_LL": "QPI Link Layer", } qual_alias = { "nid": "filter_nid", "opc": "filter_opc", "all_opc": "filter_all_op", "nm": "filter_nm", "not_nm": "filter_not_nm", "opc0": "filter_opc0", "opc1": "filter_opc1", "loc": "filter_loc", "rem": "filter_rem", "nc": "filter_nc", "Q_Py_PCI_PMON_PKT_MATCH0[12:00]": "match0", "Q_Py_PCI_PMON_PKT_MATCH1[19:16]": "match_rds", "Q_Py_PCI_PMON_PKT_MASK0[12:0]": "mask0", "Q_Py_PCI_PMON_PKT_MASK0[17:0]": "mask0", # why both? "Q_Py_PCI_PMON_PKT_MASK1[19:16]": "mask_rds", "Q_Py_PCI_PMON_PKT_MATCH0": "match0", "Q_Py_PCI_PMON_PKT_z_MATCH0": "match0", "Q_Py_PCI_PMON_PKT_z_MASK0[12:0]": "mask0", # XXX correct? "Q_Py_PCI_PMON_PKT_z_MASK0[17:0]": "mask0", # XXX correct? "edge_det": "edge", "Cn_MSR_PMON_BOX_FILTER.opc": "filter_opc", "Cn_MSR_PMON_BOX_FILTER0.opc": "filter_opc", "Cn_MSR_PMON_BOX_FILTER1.opc": "filter_opc", "Cn_MSR_PMON_BOX_FILTER.state": "filter_state", "Cn_MSR_PMON_BOX_FILTER0.state": "filter_state", "Cn_MSR_PMON_BOX_FILTER0.tid": "filter_tid", "Cn_MSR_PMON_BOX_FILTER0.nc": "filter_nc", "Cn_MSR_PMON_BOX_FILTER0.nm": "filter_nm", "Cn_MSR_PMON_BOX_FILTER0.all_opc": "filter_all_op", # XXX "Cn_MSR_PMON_BOX_FILTER0.opc1": "filter_opc1", "Cn_MSR_PMON_BOX_FILTER0.opc0": "filter_opc0", "Cn_MSR_PMON_BOX_FILTER0.loc": "filter_loc", "Cn_MSR_PMON_BOX_FILTER0.not_nm": "filter_not_nm", "Cn_MSR_PMON_BOX_FILTER1.nm": "filter_nm", "Cn_MSR_PMON_BOX_FILTER1.all_opc": "filter_all_op", # XXX "Cn_MSR_PMON_BOX_FILTER1.opc1": "filter_opc1", "Cn_MSR_PMON_BOX_FILTER1.opc0": "filter_opc0", "Cn_MSR_PMON_BOX_FILTER1.loc": "filter_loc", "Cn_MSR_PMON_BOX_FILTER1.not_nm": "filter_not_nm", "Q_Py_PCI_PMON_PKT_MATCH0.dnid": "match_dnid", "Q_Py_PCI_PMON_PKT_z_MATCH0.dnid": "match_dnid", # XXX "Q_Py_PCI_PMON_PKT_z_MATCH1": "match1", "Q_Py_PCI_PMON_PKT_z_MASK1": "mask1", "PCUFilter[7:0]": "filter_band0", "PCUFilter[15:8]": "filter_band1", "PCUFilter[23:16]": "filter_band2", "PCUFilter[31:24]": "filter_band3", "CBoFilter[31:23]": "filter_opc", "CBoFilter[17:10]": "filter_nid", "QPIMatch0[17:0]": "match0", "QPIMask0[17:0]": "mask0", "QPIMatch0[12:0]": "match0", "QPIMask0[12:0]": "mask0", "QPIMask1[19:16]": "mask_rds", "QPIMatch1[19:16]": "match_rds", "CBoFilter[22:18]": "filter_state", } qual_display_alias = { "QPIMask0[12:0]": "mask_mc, match_opc, match_vnw", "QPIMatch0[12:0]": "match_mc, match_opc, match_vnw", "QPIMatch0[17:0]": "match_mc, match_opc, match_vnw, match_dnid", } alias_events = { "MC_Chy_PCI_PMON_CTR_FIXED": "uncore_imc_INDEX/clockticks/" } clockticks = ( r"uncore_(cbox|ha|pcu)_?\d*/event=0x0/", r".*/clockticks/", r"uncore_(r2pcie|r3qpi)_?\d*/event=0x1/", r"uncore_qpi(_\d+)?/event=0x14/" )
3,702
Python
.py
88
33.738636
75
0.550014
andikleen/pmu-tools
1,984
331
178
GPL-2.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,966
setup.py
aerkalov_ebooklib/setup.py
import io import re from setuptools import setup def read(path): with io.open(path, mode="r", encoding="utf-8") as fd: content = fd.read() # Convert Markdown links to reStructuredText links return re.sub(r"\[([^]]+)\]\(([^)]+)\)", r"`\1 <\2>`_", content) setup( name = 'EbookLib', version = '0.18', author = 'Aleksandar Erkalovic', author_email = 'aerkalov@gmail.com', packages = ['ebooklib', 'ebooklib.plugins'], url = 'https://github.com/aerkalov/ebooklib', license = 'GNU Affero General Public License', description = 'Ebook library which can handle EPUB2/EPUB3 and Kindle format', long_description = read('README.md'), long_description_content_type = 'text/markdown', keywords = ['ebook', 'epub', 'kindle'], classifiers = [ "Development Status :: 4 - Beta", "Intended Audience :: Developers", "Operating System :: OS Independent", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Topic :: Software Development :: Libraries :: Python Modules" ], install_requires = [ "lxml", "six" ] )
1,359
Python
.py
36
31.944444
81
0.615793
aerkalov/ebooklib
1,451
226
102
AGPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,967
create.py
aerkalov_ebooklib/samples/09_create_image/create.py
# coding=utf-8 from ebooklib import epub if __name__ == '__main__': book = epub.EpubBook() # add metadata book.set_identifier('image123') book.set_title('Simple book with image') book.set_language('en') book.add_author('Aleksandar Erkalovic') # chapter with image c1 = epub.EpubHtml(title='Chapter with image', file_name='chapter_image.xhtml', lang='en') c1.content=u'''<html> <head></head> <body> <h1>The world famous chapter</h1> <p>Yes, this is the world famous chapter with image!</p> <img src="static/ebooklib.gif"/> </body> </html>''' image_content = open('ebooklib.gif', 'rb').read() img = epub.EpubImage(uid='image_1', file_name='static/ebooklib.gif', media_type='image/gif', content=image_content) # add chapters to the book book.add_item(c1) book.add_item(img) # create table of contents # - add section # - add auto created links to chapters book.toc = (c1, ) # add navigation files book.add_item(epub.EpubNcx()) book.add_item(epub.EpubNav()) # create spine book.spine = ['nav', c1] # create epub file epub.write_epub('test.epub', book, {})
1,188
Python
.py
35
29.057143
119
0.651982
aerkalov/ebooklib
1,451
226
102
AGPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,968
create.py
aerkalov_ebooklib/samples/05_plugins_create/create.py
# coding=utf-8 from ebooklib import epub from ebooklib.plugins.base import BasePlugin class SamplePlugin(BasePlugin): NAME = 'Sample Plugin' # Very useless but example of what can be done def html_before_write(self, book, chapter): if chapter.content is None: return try: from urlparse import urlparse, urljoin except ImportError: from urllib.parse import urlparse, urljoin from lxml import html, etree utf8_parser = html.HTMLParser(encoding='utf-8') tree = html.document_fromstring(chapter.content, parser=utf8_parser) root = tree.getroottree() if len(root.find('body')) != 0: body = tree.find('body') for _link in body.xpath("//a[@class='test']"): _link.set('href', 'http://www.binarni.net/') chapter.content = etree.tostring(tree, pretty_print=True, encoding='utf-8') if __name__ == '__main__': book = epub.EpubBook() # add metadata book.set_identifier('sample123456') book.set_title('Sample book') book.set_language('en') book.add_author('Aleksandar Erkalovic') # intro chapter c1 = epub.EpubHtml(title='Introduction', file_name='intro.xhtml', lang='en') c1.content=u'<html><head></head><body><h1>Introduction</h1><p>Introduction paragraph <a class="test">with a link</a> where i explain what is happening.</p></body></html>' # about chapter c2 = epub.EpubHtml(title='About this book', file_name='about.xhtml') c2.content='<h1>About this book</h1><p>Helou, this is my book! There are many books, but this one is mine.</p>' # add chapters to the book book.add_item(c1) book.add_item(c2) # create table of contents # - add section # - add auto created links to chapters book.toc = (epub.Link('intro.xhtml', 'Introduction', 'intro'), (epub.Section('Languages'), (c1, c2)) ) # add navigation files book.add_item(epub.EpubNcx()) book.add_item(epub.EpubNav()) # define css style style = ''' @namespace epub "http://www.idpf.org/2007/ops"; body { font-family: Cambria, Liberation Serif, Bitstream Vera Serif, Georgia, Times, Times New Roman, serif; } h2 { text-align: left; text-transform: uppercase; font-weight: 200; } ol { list-style-type: none; } ol > li:first-child { margin-top: 0.3em; } nav[epub|type~='toc'] > ol > li > ol { list-style-type:square; } nav[epub|type~='toc'] > ol > li > ol > li { margin-top: 0.3em; } ''' # add css file nav_css = epub.EpubItem(uid="style_nav", file_name="style/nav.css", media_type="text/css", content=style) book.add_item(nav_css) # create spine book.spine = ['nav', c1, c2] opts = {'plugins': [SamplePlugin()]} # create epub file epub.write_epub('test.epub', book, opts)
2,969
Python
.py
80
30.3875
174
0.632624
aerkalov/ebooklib
1,451
226
102
AGPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,969
create.py
aerkalov_ebooklib/samples/07_pagebreaks/create.py
# coding=utf-8 from ebooklib import epub from ebooklib.plugins import standard from ebooklib.utils import create_pagebreak if __name__ == '__main__': book = epub.EpubBook() # add metadata book.set_identifier('sample123456') book.set_title('Sample book') book.set_language('en') book.add_author('Aleksandar Erkalovic') # build the chapter HTML and add the page break c1 = epub.EpubHtml(title='Introduction', file_name='intro.xhtml', lang='en') c1.content = u'<h1>Introduction</h1><p><span id="page1" epub:type="pagebreak">1</span>This chapter has a visible page number.</p><p><span id="page2" epub:type="pagebreak">2</span>Something else now.</p>' c2 = epub.EpubHtml(title='Chapter the Second', file_name='chap02.xhtml', lang='en') c2.content = u'<html><head></head><body><h1>Chapter the Second</h1><p>This chapter has two page breaks, both with invisible page numbers.</p>' # Add invisible page numbers that match the printed text, for accessibility c2.content += create_pagebreak("2") # You can add more content after the page break c2.content += u'<p>This is the second page in the second chapter, after the invisible page break.</p>' # Add invisible page numbers that match the printed text, for accessibility c2.content += create_pagebreak("3", label="Page 3") # close the chapter c2.content += u'</body></html>' # add chapters to the book book.add_item(c1) book.add_item(c2) # create table of contents # - add manual link # - add section # - add auto created links to chapters book.toc = ((c1, c2, )) # add navigation files book.add_item(epub.EpubNcx()) book.add_item(epub.EpubNav()) # create spine book.spine = ['nav', c1, c2, ] # create epub file opts = {'plugins': [standard.SyntaxPlugin()]} epub.write_epub('test.epub', book, opts)
1,904
Python
.py
40
42.55
207
0.684582
aerkalov/ebooklib
1,451
226
102
AGPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,970
parse.py
aerkalov_ebooklib/samples/06_parse/parse.py
import sys import ebooklib from ebooklib import epub from ebooklib.utils import debug book = epub.read_epub(sys.argv[1]) debug(book.metadata) debug(book.spine) debug(book.toc) for x in book.get_items_of_type(ebooklib.ITEM_IMAGE): debug(x) for x in book.get_items_of_type(ebooklib.ITEM_DOCUMENT): debug(x)
320
Python
.py
12
24.583333
57
0.782178
aerkalov/ebooklib
1,451
226
102
AGPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,971
create.py
aerkalov_ebooklib/samples/02_cover_create/create.py
# coding=utf-8 from ebooklib import epub if __name__ == '__main__': book = epub.EpubBook() # add metadata book.set_identifier('sample123456') book.set_title('Sample book') book.set_language('en') book.add_author('Aleksandar Erkalovic') # add cover image book.set_cover("image.jpg", open('cover.jpg', 'rb').read()) # intro chapter c1 = epub.EpubHtml(title='Introduction', file_name='intro.xhtml', lang='hr') c1.content=u'<html><head></head><body><h1>Introduction</h1><p>Introduction paragraph where i explain what is happening.</p></body></html>' # about chapter c2 = epub.EpubHtml(title='About this book', file_name='about.xhtml') c2.content='<h1>About this book</h1><p>Helou, this is my book! There are many books, but this one is mine.</p><p><img src="image.jpg" alt="Cover Image"/></p>' # add chapters to the book book.add_item(c1) book.add_item(c2) # create table of contents # - add manual link # - add section # - add auto created links to chapters book.toc = (epub.Link('intro.xhtml', 'Introduction', 'intro'), (epub.Section('Languages'), (c1, c2)) ) # add navigation files book.add_item(epub.EpubNcx()) book.add_item(epub.EpubNav()) # define css style style = ''' @namespace epub "http://www.idpf.org/2007/ops"; body { font-family: Cambria, Liberation Serif, Bitstream Vera Serif, Georgia, Times, Times New Roman, serif; } h2 { text-align: left; text-transform: uppercase; font-weight: 200; } ol { list-style-type: none; } ol > li:first-child { margin-top: 0.3em; } nav[epub|type~='toc'] > ol > li > ol { list-style-type:square; } nav[epub|type~='toc'] > ol > li > ol > li { margin-top: 0.3em; } ''' # add css file nav_css = epub.EpubItem(uid="style_nav", file_name="style/nav.css", media_type="text/css", content=style) book.add_item(nav_css) # create spin, add cover page as first page book.spine = ['cover', 'nav', c1, c2] # create epub file epub.write_epub('test.epub', book, {})
2,160
Python
.py
62
29.532258
162
0.632012
aerkalov/ebooklib
1,451
226
102
AGPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,972
epub2markdown.py
aerkalov_ebooklib/samples/04_markdown_parse/epub2markdown.py
#!/usr/bin/env python import os.path import subprocess import sys from ebooklib import epub # This is just a basic example which can easily break in real world. if __name__ == '__main__': # read epub book = epub.read_epub(sys.argv[1]) # get base filename from the epub base_name = os.path.basename(os.path.splitext(sys.argv[1])[0]) for item in book.items: # convert into markdown if this is html if isinstance(item, epub.EpubHtml): proc = subprocess.Popen(['pandoc', '-f', 'html', '-t', 'markdown', '-'], stdin=subprocess.PIPE, stdout=subprocess.PIPE) content, error = proc.communicate(item.content) file_name = os.path.splitext(item.file_name)[0] + '.md' else: file_name = item.file_name content = item.content # create needed directories dir_name = '{0}/{1}'.format(base_name, os.path.dirname(file_name)) if not os.path.exists(dir_name): os.makedirs(dir_name) print('>> {0}'.format(file_name)) # write content to file with open('{0}/{1}'.format(base_name, file_name), 'w') as f: f.write(content)
1,255
Python
.py
30
32.266667
84
0.580937
aerkalov/ebooklib
1,451
226
102
AGPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,973
create.py
aerkalov_ebooklib/samples/03_advanced_create/create.py
# coding=utf-8 from ebooklib import epub if __name__ == '__main__': book = epub.EpubBook() # add metadata book.set_identifier('sample123456') book.set_title('Sample book') book.set_language('en') book.add_author('Aleksandar Erkalovic') # intro chapter c1 = epub.EpubHtml(title='Introduction', file_name='intro.xhtml', lang='hr') c1.content=u'<html><head></head><body><h1>Introduction</h1><p>Introduction paragraph where i explain what is happening.</p></body></html>' # defube style style = '''BODY { text-align: justify;}''' default_css = epub.EpubItem(uid="style_default", file_name="style/default.css", media_type="text/css", content=style) book.add_item(default_css) # about chapter c2 = epub.EpubHtml(title='About this book', file_name='about.xhtml') c2.content='<h1>About this book</h1><p>Helou, this is my book! There are many books, but this one is mine.</p>' c2.set_language('hr') c2.properties.append('rendition:layout-pre-paginated rendition:orientation-landscape rendition:spread-none') c2.add_item(default_css) # add chapters to the book book.add_item(c1) book.add_item(c2) # create table of contents # - add manual link # - add section # - add auto created links to chapters book.toc = (epub.Link('intro.xhtml', 'Introduction', 'intro'), (epub.Section('Languages'), (c1, c2)) ) # add navigation files book.add_item(epub.EpubNcx()) book.add_item(epub.EpubNav()) # define css style style = ''' @namespace epub "http://www.idpf.org/2007/ops"; body { font-family: Cambria, Liberation Serif, Bitstream Vera Serif, Georgia, Times, Times New Roman, serif; } h2 { text-align: left; text-transform: uppercase; font-weight: 200; } ol { list-style-type: none; } ol > li:first-child { margin-top: 0.3em; } nav[epub|type~='toc'] > ol > li > ol { list-style-type:square; } nav[epub|type~='toc'] > ol > li > ol > li { margin-top: 0.3em; } ''' # add css file nav_css = epub.EpubItem(uid="style_nav", file_name="style/nav.css", media_type="text/css", content=style) book.add_item(nav_css) # create spine book.spine = ['nav', c1, c2] # create epub file epub.write_epub('test.epub', book, {})
2,380
Python
.py
67
30.179104
142
0.645897
aerkalov/ebooklib
1,451
226
102
AGPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,974
create.py
aerkalov_ebooklib/samples/08_SMIL/create.py
# coding=utf-8 from ebooklib import epub if __name__ == '__main__': book = epub.EpubBook() # add metadata book.set_identifier('sample123456') book.set_title('Sample book') book.set_language('en') book.add_metadata(None, 'meta', 'Naro Narrator', {'property': 'media:narrator'}) book.add_metadata(None, 'meta', '0:10:10.500', {'property': 'media:duration'}) book.add_metadata(None, 'meta', '0:05:00.500', {'property': 'media:duration', 'refines': '#intro_overlay'}) book.add_metadata(None, 'meta', '-epub-media-overlay-active', {'property': 'media:active-class'}) book.add_author('Aleksandar Erkalovic') # intro chapter c1 = epub.EpubHtml(title='Introduction', file_name='intro.xhtml', lang='en', media_overlay='intro_overlay') c1.content=u'<html><head></head><body><section epub:type="frontmatter colophon"><h1><span id="header_1">Introduction</span></h1><p><span id="para_1">Introduction paragraph where i explain what is happening.</span></p></section></body></html>' s1 = epub.EpubSMIL(uid='intro_overlay', file_name='test.smil', content=open('test.smil', 'rt').read()) a1 = epub.EpubItem(file_name='chapter1_audio.mp3', content=open('chapter1_audio.mp3', 'rb').read(), media_type='audio/mpeg') # add chapters to the book book.add_item(c1) book.add_item(s1) book.add_item(a1) book.toc = [epub.Link('intro.xhtml', 'Introduction', 'intro')] # add navigation files book.add_item(epub.EpubNcx()) book.add_item(epub.EpubNav()) # create spine book.spine = ['nav', c1] # create epub file epub.write_epub('test.epub', book, {})
1,649
Python
.py
30
49.733333
246
0.666667
aerkalov/ebooklib
1,451
226
102
AGPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,975
parse.py
aerkalov_ebooklib/samples/08_SMIL/parse.py
import sys import ebooklib from ebooklib import epub from ebooklib.utils import debug book = epub.read_epub(sys.argv[1]) debug(book.metadata) debug(book.spine) debug(book.toc) debug('================================') debug('SMIL') for x in book.get_items_of_type(ebooklib.ITEM_SMIL): debug(x) debug('================================') debug('DOCUMENTS') for x in book.get_items_of_type(ebooklib.ITEM_DOCUMENT): if x.is_chapter(): debug('[{}] media_overlay={}'.format(x, x.media_overlay)) debug('================================')
556
Python
.py
18
28.611111
65
0.596987
aerkalov/ebooklib
1,451
226
102
AGPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,976
create.py
aerkalov_ebooklib/samples/01_basic_create/create.py
# coding=utf-8 from ebooklib import epub if __name__ == '__main__': book = epub.EpubBook() # add metadata book.set_identifier('sample123456') book.set_title('Sample book') book.set_language('en') book.add_author('Aleksandar Erkalovic') # intro chapter c1 = epub.EpubHtml(title='Introduction', file_name='intro.xhtml', lang='en') c1.content=u'<html><head></head><body><h1>Introduction</h1><p>Introduction paragraph where i explain what is happening.</p></body></html>' # about chapter c2 = epub.EpubHtml(title='About this book', file_name='about.xhtml') c2.content='<h1>About this book</h1><p>Helou, this is my book! There are many books, but this one is mine.</p>' # add chapters to the book book.add_item(c1) book.add_item(c2) # create table of contents # - add section # - add auto created links to chapters book.toc = (epub.Link('intro.xhtml', 'Introduction', 'intro'), (epub.Section('Languages'), (c1, c2)) ) # add navigation files book.add_item(epub.EpubNcx()) book.add_item(epub.EpubNav()) # define css style style = ''' @namespace epub "http://www.idpf.org/2007/ops"; body { font-family: Cambria, Liberation Serif, Bitstream Vera Serif, Georgia, Times, Times New Roman, serif; } h2 { text-align: left; text-transform: uppercase; font-weight: 200; } ol { list-style-type: none; } ol > li:first-child { margin-top: 0.3em; } nav[epub|type~='toc'] > ol > li > ol { list-style-type:square; } nav[epub|type~='toc'] > ol > li > ol > li { margin-top: 0.3em; } ''' # add css file nav_css = epub.EpubItem(uid="style_nav", file_name="style/nav.css", media_type="text/css", content=style) book.add_item(nav_css) # create spine book.spine = ['nav', c1, c2] # create epub file epub.write_epub('test.epub', book, {})
1,965
Python
.py
59
27.983051
142
0.632392
aerkalov/ebooklib
1,451
226
102
AGPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,977
conf.py
aerkalov_ebooklib/docs/conf.py
# -*- coding: utf-8 -*- # # EbookLib documentation build configuration file, created by # sphinx-quickstart on Fri Apr 25 11:49:49 2014. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('..')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'EbookLib' copyright = u'2014, Aleksandar Erkalovic' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.17' # The full version, including alpha/beta/rc tags. release = '0.17' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'EbookLibdoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'EbookLib.tex', u'EbookLib Documentation', u'Aleksandar Erkalovic', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'ebooklib', u'EbookLib Documentation', [u'Aleksandar Erkalovic'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'EbookLib', u'EbookLib Documentation', u'Aleksandar Erkalovic', 'EbookLib', 'Python library for EPUB and Kindle formats.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # -- Options for Epub output --------------------------------------------------- # Bibliographic Dublin Core info. epub_title = u'EbookLib' epub_author = u'Aleksandar Erkalovic' epub_publisher = u'Aleksandar Erkalovic' epub_copyright = u'2014, Aleksandar Erkalovic' # The language of the text. It defaults to the language option # or en if the language is not set. #epub_language = '' # The scheme of the identifier. Typical schemes are ISBN or URL. #epub_scheme = '' # The unique identifier of the text. This can be a ISBN number # or the project homepage. #epub_identifier = '' # A unique identification for the text. #epub_uid = '' # A tuple containing the cover image and cover page html template filenames. #epub_cover = () # HTML files that should be inserted before the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_pre_files = [] # HTML files shat should be inserted after the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_post_files = [] # A list of files that should not be packed into the epub file. #epub_exclude_files = [] # The depth of the table of contents in toc.ncx. #epub_tocdepth = 3 # Allow duplicate toc entries. #epub_tocdup = True # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {'http://docs.python.org/': None}
9,313
Python
.py
209
43.07177
148
0.729167
aerkalov/ebooklib
1,451
226
102
AGPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,978
utils.py
aerkalov_ebooklib/ebooklib/utils.py
# This file is part of EbookLib. # Copyright (c) 2013 Aleksandar Erkalovic <aerkalov@gmail.com> # # EbookLib is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # EbookLib is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with EbookLib. If not, see <http://www.gnu.org/licenses/>. import io import mimetypes from lxml import etree mimetype_initialised = False def debug(obj): import pprint pp = pprint.PrettyPrinter(indent=4) pp.pprint(obj) def parse_string(s): parser = etree.XMLParser(recover=True, resolve_entities=False) try: tree = etree.parse(io.BytesIO(s.encode('utf-8')) , parser=parser) except: tree = etree.parse(io.BytesIO(s) , parser=parser) return tree def parse_html_string(s): from lxml import html utf8_parser = html.HTMLParser(encoding='utf-8') html_tree = html.document_fromstring(s, parser=utf8_parser) return html_tree def guess_type(extenstion): global mimetype_initialised if not mimetype_initialised: mimetypes.init() mimetypes.add_type('application/xhtml+xml', '.xhtml') mimetype_initialised = True return mimetypes.guess_type(extenstion) def create_pagebreak(pageref, label=None, html=True): from ebooklib.epub import NAMESPACES pageref_attributes = { '{%s}type' % NAMESPACES['EPUB']: 'pagebreak', 'title': u'{}'.format(pageref), 'id': u'{}'.format(pageref), } pageref_elem = etree.Element('span', pageref_attributes, nsmap={'epub': NAMESPACES['EPUB']}) if label: pageref_elem.text = label if html: return etree.tostring(pageref_elem, encoding='unicode') return pageref_elem def get_headers(elem): for n in range(1, 7): headers = elem.xpath('./h{}'.format(n)) if len(headers) > 0: text = headers[0].text_content().strip() if len(text) > 0: return text return None def get_pages(item): body = parse_html_string(item.get_body_content()) pages = [] for elem in body.iter(): if 'epub:type' in elem.attrib: if elem.get('id') is not None: _text = None if elem.text is not None and elem.text.strip() != '': _text = elem.text.strip() if _text is None: _text = elem.get('aria-label') if _text is None: _text = get_headers(elem) pages.append((item.get_name(), elem.get('id'), _text or elem.get('id'))) return pages def get_pages_for_items(items): pages_from_docs = [get_pages(item) for item in items] return [item for pages in pages_from_docs for item in pages]
3,206
Python
.py
81
32.790123
96
0.659498
aerkalov/ebooklib
1,451
226
102
AGPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,979
__init__.py
aerkalov_ebooklib/ebooklib/__init__.py
# This file is part of EbookLib. # Copyright (c) 2013 Aleksandar Erkalovic <aerkalov@gmail.com> # # EbookLib is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # EbookLib is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with EbookLib. If not, see <http://www.gnu.org/licenses/>. # Version of ebook library VERSION = (0, 18, 1) # LIST OF POSSIBLE ITEMS ITEM_UNKNOWN = 0 ITEM_IMAGE = 1 ITEM_STYLE = 2 ITEM_SCRIPT = 3 ITEM_NAVIGATION = 4 ITEM_VECTOR = 5 ITEM_FONT = 6 ITEM_VIDEO = 7 ITEM_AUDIO = 8 ITEM_DOCUMENT = 9 ITEM_COVER = 10 ITEM_SMIL = 11 # EXTENSION MAPPER EXTENSIONS = {ITEM_IMAGE: ['.jpg', '.jpeg', '.gif', '.tiff', '.tif', '.png'], ITEM_STYLE: ['.css'], ITEM_VECTOR: ['.svg'], ITEM_FONT: ['.otf', '.woff', '.ttf'], ITEM_SCRIPT: ['.js'], ITEM_NAVIGATION: ['.ncx'], ITEM_VIDEO: ['.mov', '.mp4', '.avi'], ITEM_AUDIO: ['.mp3', '.ogg'], ITEM_COVER: ['.jpg', '.jpeg', '.png'], ITEM_SMIL: ['.smil'] }
1,509
Python
.py
42
31.5
77
0.643199
aerkalov/ebooklib
1,451
226
102
AGPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,980
epub.py
aerkalov_ebooklib/ebooklib/epub.py
# This file is part of EbookLib. # Copyright (c) 2013 Aleksandar Erkalovic <aerkalov@gmail.com> # # EbookLib is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # EbookLib is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with EbookLib. If not, see <http://www.gnu.org/licenses/>. import zipfile import six import sys import logging import uuid import warnings import posixpath as zip_path import os.path from collections import OrderedDict try: from urllib.parse import unquote except ImportError: from urllib import unquote from lxml import etree import ebooklib from ebooklib.utils import parse_string, parse_html_string, guess_type, get_pages_for_items # Version of EPUB library VERSION = (0, 18, 1) NAMESPACES = {'XML': 'http://www.w3.org/XML/1998/namespace', 'EPUB': 'http://www.idpf.org/2007/ops', 'DAISY': 'http://www.daisy.org/z3986/2005/ncx/', 'OPF': 'http://www.idpf.org/2007/opf', 'CONTAINERNS': 'urn:oasis:names:tc:opendocument:xmlns:container', 'DC': 'http://purl.org/dc/elements/1.1/', 'XHTML': 'http://www.w3.org/1999/xhtml'} # XML Templates CONTAINER_PATH = 'META-INF/container.xml' CONTAINER_XML = '''<?xml version="1.0" encoding="utf-8"?> <container xmlns="urn:oasis:names:tc:opendocument:xmlns:container" version="1.0"> <rootfiles> <rootfile media-type="application/oebps-package+xml" full-path="%(folder_name)s/content.opf"/> </rootfiles> </container> ''' NCX_XML = six.b('''<!DOCTYPE ncx PUBLIC "-//NISO//DTD ncx 2005-1//EN" "http://www.daisy.org/z3986/2005/ncx-2005-1.dtd"> <ncx xmlns="http://www.daisy.org/z3986/2005/ncx/" version="2005-1" />''') NAV_XML = six.b('''<?xml version="1.0" encoding="utf-8"?><!DOCTYPE html><html xmlns="http://www.w3.org/1999/xhtml" xmlns:epub="http://www.idpf.org/2007/ops"/>''') CHAPTER_XML = six.b('''<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE html><html xmlns="http://www.w3.org/1999/xhtml" xmlns:epub="http://www.idpf.org/2007/ops" epub:prefix="z3998: http://www.daisy.org/z3998/2012/vocab/structure/#"></html>''') COVER_XML = six.b('''<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE html> <html xmlns="http://www.w3.org/1999/xhtml" xmlns:epub="http://www.idpf.org/2007/ops" lang="en" xml:lang="en"> <head> <style> body { margin: 0em; padding: 0em; } img { max-width: 100%; max-height: 100%; } </style> </head> <body> <img src="" alt="" /> </body> </html>''') IMAGE_MEDIA_TYPES = ['image/jpeg', 'image/jpg', 'image/png', 'image/svg+xml'] # TOC and navigation elements class Section(object): def __init__(self, title, href=''): self.title = title self.href = href class Link(object): def __init__(self, href, title, uid=None): self.href = href self.title = title self.uid = uid # Exceptions class EpubException(Exception): def __init__(self, code, msg): self.code = code self.msg = msg def __str__(self): return repr(self.msg) # Items class EpubItem(object): """ Base class for the items in a book. """ def __init__(self, uid=None, file_name='', media_type='', content=six.b(''), manifest=True): """ :Args: - uid: Unique identifier for this item (optional) - file_name: File name for this item (optional) - media_type: Media type for this item (optional) - content: Content for this item (optional) - manifest: Manifest for this item (optional) """ self.id = uid self.file_name = file_name self.media_type = media_type self.content = content self.is_linear = True self.manifest = manifest self.book = None def get_id(self): """ Returns unique identifier for this item. :Returns: Returns uid number as string. """ return self.id def get_name(self): """ Returns name for this item. By default it is always file name but it does not have to be. :Returns: Returns file name for this item. """ return self.file_name def get_type(self): """ Guess type according to the file extension. Might not be the best way how to do it, but it works for now. Items can be of type: - ITEM_UNKNOWN = 0 - ITEM_IMAGE = 1 - ITEM_STYLE = 2 - ITEM_SCRIPT = 3 - ITEM_NAVIGATION = 4 - ITEM_VECTOR = 5 - ITEM_FONT = 6 - ITEM_VIDEO = 7 - ITEM_AUDIO = 8 - ITEM_DOCUMENT = 9 - ITEM_COVER = 10 We map type according to the extensions which are defined in ebooklib.EXTENSIONS. :Returns: Returns type of the item as number. """ _, ext = zip_path.splitext(self.get_name()) ext = ext.lower() for uid, ext_list in six.iteritems(ebooklib.EXTENSIONS): if ext in ext_list: return uid return ebooklib.ITEM_UNKNOWN def get_content(self, default=six.b('')): """ Returns content of the item. Content should be of type 'str' (Python 2) or 'bytes' (Python 3) :Args: - default: Default value for the content if it is not already defined. :Returns: Returns content of the item. """ return self.content or default def set_content(self, content): """ Sets content value for this item. :Args: - content: Content value """ self.content = content def __str__(self): return '<EpubItem:%s>' % self.id class EpubNcx(EpubItem): "Represents Navigation Control File (NCX) in the EPUB." def __init__(self, uid='ncx', file_name='toc.ncx'): super(EpubNcx, self).__init__(uid=uid, file_name=file_name, media_type='application/x-dtbncx+xml') def __str__(self): return '<EpubNcx:%s>' % self.id class EpubCover(EpubItem): """ Represents Cover image in the EPUB file. """ def __init__(self, uid='cover-img', file_name=''): super(EpubCover, self).__init__(uid=uid, file_name=file_name) def get_type(self): return ebooklib.ITEM_COVER def __str__(self): return '<EpubCover:%s:%s>' % (self.id, self.file_name) class EpubHtml(EpubItem): """ Represents HTML document in the EPUB file. """ _template_name = 'chapter' def __init__(self, uid=None, file_name='', media_type='', content=None, title='', lang=None, direction=None, media_overlay=None, media_duration=None): super(EpubHtml, self).__init__(uid, file_name, media_type, content) self.title = title self.lang = lang self.direction = direction self.media_overlay = media_overlay self.media_duration = media_duration self.links = [] self.properties = [] self.pages = [] def is_chapter(self): """ Returns if this document is chapter or not. :Returns: Returns book value. """ return True def get_type(self): """ Always returns ebooklib.ITEM_DOCUMENT as type of this document. :Returns: Always returns ebooklib.ITEM_DOCUMENT """ return ebooklib.ITEM_DOCUMENT def set_language(self, lang): """ Sets language for this book item. By default it will use language of the book but it can be overwritten with this call. """ self.lang = lang def get_language(self): """ Get language code for this book item. Language of the book item can be different from the language settings defined globaly for book. :Returns: As string returns language code. """ return self.lang def add_link(self, **kwgs): """ Add additional link to the document. Links will be embeded only inside of this document. >>> add_link(href='styles.css', rel='stylesheet', type='text/css') """ self.links.append(kwgs) if kwgs.get('type') == 'text/javascript': if 'scripted' not in self.properties: self.properties.append('scripted') def get_links(self): """ Returns list of additional links defined for this document. :Returns: As tuple return list of links. """ return (link for link in self.links) def get_links_of_type(self, link_type): """ Returns list of additional links of specific type. :Returns: As tuple returns list of links. """ return (link for link in self.links if link.get('type', '') == link_type) def add_item(self, item): """ Add other item to this document. It will create additional links according to the item type. :Args: - item: item we want to add defined as instance of EpubItem """ if item.get_type() == ebooklib.ITEM_STYLE: self.add_link(href=item.get_name(), rel='stylesheet', type='text/css') if item.get_type() == ebooklib.ITEM_SCRIPT: self.add_link(src=item.get_name(), type='text/javascript') def get_body_content(self): """ Returns content of BODY element for this HTML document. Content will be of type 'str' (Python 2) or 'bytes' (Python 3). :Returns: Returns content of this document. """ try: html_tree = parse_html_string(self.content) except: return '' html_root = html_tree.getroottree() if len(html_root.find('body')) != 0: body = html_tree.find('body') tree_str = etree.tostring(body, pretty_print=True, encoding='utf-8', xml_declaration=False) # this is so stupid if tree_str.startswith(six.b('<body>')): n = tree_str.rindex(six.b('</body>')) return tree_str[6:n] return tree_str return '' def get_content(self, default=None): """ Returns content for this document as HTML string. Content will be of type 'str' (Python 2) or 'bytes' (Python 3). :Args: - default: Default value for the content if it is not defined. :Returns: Returns content of this document. """ tree = parse_string(self.book.get_template(self._template_name)) tree_root = tree.getroot() tree_root.set('lang', self.lang or self.book.language) tree_root.attrib['{%s}lang' % NAMESPACES['XML']] = self.lang or self.book.language # add to the head also # <meta charset="utf-8" /> try: html_tree = parse_html_string(self.content) except: return '' html_root = html_tree.getroottree() # create and populate head _head = etree.SubElement(tree_root, 'head') if self.title != '': _title = etree.SubElement(_head, 'title') _title.text = self.title for lnk in self.links: if lnk.get('type') == 'text/javascript': _lnk = etree.SubElement(_head, 'script', lnk) # force <script></script> _lnk.text = '' else: _lnk = etree.SubElement(_head, 'link', lnk) # this should not be like this # head = html_root.find('head') # if head is not None: # for i in head.getchildren(): # if i.tag == 'title' and self.title != '': # continue # _head.append(i) # create and populate body _body = etree.SubElement(tree_root, 'body') if self.direction: _body.set('dir', self.direction) tree_root.set('dir', self.direction) body = html_tree.find('body') if body is not None: for i in body.getchildren(): _body.append(i) tree_str = etree.tostring(tree, pretty_print=True, encoding='utf-8', xml_declaration=True) return tree_str def __str__(self): return '<EpubHtml:%s:%s>' % (self.id, self.file_name) class EpubCoverHtml(EpubHtml): """ Represents Cover page in the EPUB file. """ def __init__(self, uid='cover', file_name='cover.xhtml', image_name='', title='Cover'): super(EpubCoverHtml, self).__init__(uid=uid, file_name=file_name, title=title) self.image_name = image_name self.is_linear = False def is_chapter(self): """ Returns if this document is chapter or not. :Returns: Returns book value. """ return False def get_content(self): """ Returns content for cover page as HTML string. Content will be of type 'str' (Python 2) or 'bytes' (Python 3). :Returns: Returns content of this document. """ self.content = self.book.get_template('cover') tree = parse_string(super(EpubCoverHtml, self).get_content()) tree_root = tree.getroot() images = tree_root.xpath('//xhtml:img', namespaces={'xhtml': NAMESPACES['XHTML']}) images[0].set('src', self.image_name) images[0].set('alt', self.title) tree_str = etree.tostring(tree, pretty_print=True, encoding='utf-8', xml_declaration=True) return tree_str def __str__(self): return '<EpubCoverHtml:%s:%s>' % (self.id, self.file_name) class EpubNav(EpubHtml): """ Represents Navigation Document in the EPUB file. """ def __init__(self, uid='nav', file_name='nav.xhtml', media_type='application/xhtml+xml', title='', direction=None): super(EpubNav, self).__init__(uid=uid, file_name=file_name, media_type=media_type, title=title, direction=direction) def is_chapter(self): """ Returns if this document is chapter or not. :Returns: Returns book value. """ return False def __str__(self): return '<EpubNav:%s:%s>' % (self.id, self.file_name) class EpubImage(EpubItem): """ Represents Image in the EPUB file. """ def __init__(self, *args, **kwargs): super(EpubImage, self).__init__(*args, **kwargs) def get_type(self): return ebooklib.ITEM_IMAGE def __str__(self): return '<EpubImage:%s:%s>' % (self.id, self.file_name) class EpubSMIL(EpubItem): def __init__(self, uid=None, file_name='', content=None): super(EpubSMIL, self).__init__(uid=uid, file_name=file_name, media_type='application/smil+xml', content=content) def get_type(self): return ebooklib.ITEM_SMIL def __str__(self): return '<EpubSMIL:%s:%s>' % (self.id, self.file_name) # EpubBook class EpubBook(object): def __init__(self): self.EPUB_VERSION = None self.reset() # we should have options here def reset(self): "Initialises all needed variables to default values" self.metadata = {} self.items = [] self.spine = [] self.guide = [] self.pages = [] self.toc = [] self.bindings = [] self.IDENTIFIER_ID = 'id' self.FOLDER_NAME = 'EPUB' self._id_html = 0 self._id_image = 0 self._id_static = 0 self.title = '' self.language = 'en' self.direction = None self.templates = { 'ncx': NCX_XML, 'nav': NAV_XML, 'chapter': CHAPTER_XML, 'cover': COVER_XML } self.add_metadata('OPF', 'generator', '', { 'name': 'generator', 'content': 'Ebook-lib %s' % '.'.join([str(s) for s in VERSION]) }) # default to using a randomly-unique identifier if one is not specified manually self.set_identifier(str(uuid.uuid4())) # custom prefixes and namespaces to be set to the content.opf doc self.prefixes = [] self.namespaces = {} def set_identifier(self, uid): """ Sets unique id for this epub :Args: - uid: Value of unique identifier for this book """ self.uid = uid self.set_unique_metadata('DC', 'identifier', self.uid, {'id': self.IDENTIFIER_ID}) def set_title(self, title): """ Set title. You can set multiple titles. :Args: - title: Title value """ self.title = title self.add_metadata('DC', 'title', self.title) def set_language(self, lang): """ Set language for this epub. You can set multiple languages. Specific items in the book can have different language settings. :Args: - lang: Language code """ self.language = lang self.add_metadata('DC', 'language', lang) def set_direction(self, direction): """ :Args: - direction: Options are "ltr", "rtl" and "default" """ self.direction = direction def set_cover(self, file_name, content, create_page=True): """ Set cover and create cover document if needed. :Args: - file_name: file name of the cover page - content: Content for the cover image - create_page: Should cover page be defined. Defined as bool value (optional). Default value is True. """ # as it is now, it can only be called once c0 = EpubCover(file_name=file_name) c0.content = content self.add_item(c0) if create_page: c1 = EpubCoverHtml(image_name=file_name) self.add_item(c1) self.add_metadata(None, 'meta', '', OrderedDict([('name', 'cover'), ('content', 'cover-img')])) def add_author(self, author, file_as=None, role=None, uid='creator'): "Add author for this document" self.add_metadata('DC', 'creator', author, {'id': uid}) if file_as: self.add_metadata(None, 'meta', file_as, {'refines': '#' + uid, 'property': 'file-as', 'scheme': 'marc:relators'}) if role: self.add_metadata(None, 'meta', role, {'refines': '#' + uid, 'property': 'role', 'scheme': 'marc:relators'}) def add_metadata(self, namespace, name, value, others=None): "Add metadata" if namespace in NAMESPACES: namespace = NAMESPACES[namespace] if namespace not in self.metadata: self.metadata[namespace] = {} if name not in self.metadata[namespace]: self.metadata[namespace][name] = [] self.metadata[namespace][name].append((value, others)) def get_metadata(self, namespace, name): "Retrieve metadata" if namespace in NAMESPACES: namespace = NAMESPACES[namespace] return self.metadata[namespace].get(name, []) def set_unique_metadata(self, namespace, name, value, others=None): "Add metadata if metadata with this identifier does not already exist, otherwise update existing metadata." if namespace in NAMESPACES: namespace = NAMESPACES[namespace] if namespace in self.metadata and name in self.metadata[namespace]: self.metadata[namespace][name] = [(value, others)] else: self.add_metadata(namespace, name, value, others) def add_item(self, item): """ Add additional item to the book. If not defined, media type and chapter id will be defined for the item. :Args: - item: Item instance """ if item.media_type == '': (has_guessed, media_type) = guess_type(item.get_name().lower()) if has_guessed: if media_type is not None: item.media_type = media_type else: item.media_type = has_guessed else: item.media_type = 'application/octet-stream' if not item.get_id(): # make chapter_, image_ and static_ configurable if isinstance(item, EpubHtml): item.id = 'chapter_%d' % self._id_html self._id_html += 1 # If there's a page list, append it to the book's page list self.pages += item.pages elif isinstance(item, EpubImage): item.id = 'image_%d' % self._id_image self._id_image += 1 else: item.id = 'static_%d' % self._id_static self._id_static += 1 item.book = self self.items.append(item) return item def get_item_with_id(self, uid): """ Returns item for defined UID. >>> book.get_item_with_id('image_001') :Args: - uid: UID for the item :Returns: Returns item object. Returns None if nothing was found. """ for item in self.get_items(): if item.id == uid: return item return None def get_item_with_href(self, href): """ Returns item for defined HREF. >>> book.get_item_with_href('EPUB/document.xhtml') :Args: - href: HREF for the item we are searching for :Returns: Returns item object. Returns None if nothing was found. """ for item in self.get_items(): if item.get_name() == href: return item return None def get_items(self): """ Returns all items attached to this book. :Returns: Returns all items as tuple. """ return (item for item in self.items) def get_items_of_type(self, item_type): """ Returns all items of specified type. >>> book.get_items_of_type(epub.ITEM_IMAGE) :Args: - item_type: Type for items we are searching for :Returns: Returns found items as tuple. """ return (item for item in self.items if item.get_type() == item_type) def get_items_of_media_type(self, media_type): """ Returns all items of specified media type. :Args: - media_type: Media type for items we are searching for :Returns: Returns found items as tuple. """ return (item for item in self.items if item.media_type == media_type) def set_template(self, name, value): """ Defines templates which are used to generate certain types of pages. When defining new value for the template we have to use content of type 'str' (Python 2) or 'bytes' (Python 3). At the moment we use these templates: - ncx - nav - chapter - cover :Args: - name: Name for the template - value: Content for the template """ self.templates[name] = value def get_template(self, name): """ Returns value for the template. :Args: - name: template name :Returns: Value of the template. """ return self.templates.get(name) def add_prefix(self, name, uri): """ Appends custom prefix to be added to the content.opf document >>> epub_book.add_prefix('bkterms', 'http://booktype.org/') :Args: - name: namespave name - uri: URI for the namespace """ self.prefixes.append('%s: %s' % (name, uri)) class EpubWriter(object): DEFAULT_OPTIONS = { 'epub2_guide': True, 'epub3_landmark': True, 'epub3_pages': True, 'landmark_title': 'Guide', 'pages_title': 'Pages', 'spine_direction': True, 'package_direction': False, 'play_order': { 'enabled': False, 'start_from': 1 }, 'raise_exceptions': False, 'compresslevel': 6 } def __init__(self, name, book, options=None): self.file_name = name self.book = book self.options = dict(self.DEFAULT_OPTIONS) if options: self.options.update(options) self._init_play_order() def _init_play_order(self): self._play_order = { 'enabled': False, 'start_from': 1 } try: self._play_order['enabled'] = self.options['play_order']['enabled'] self._play_order['start_from'] = self.options['play_order']['start_from'] except KeyError: pass def process(self): # should cache this html parsing so we don't do it for every plugin for plg in self.options.get('plugins', []): if hasattr(plg, 'before_write'): plg.before_write(self.book) for item in self.book.get_items(): if isinstance(item, EpubHtml): for plg in self.options.get('plugins', []): if hasattr(plg, 'html_before_write'): plg.html_before_write(self.book, item) def _write_container(self): container_xml = CONTAINER_XML % {'folder_name': self.book.FOLDER_NAME} self.out.writestr(CONTAINER_PATH, container_xml) def _write_opf_metadata(self, root): # This is really not needed # problem is uppercase/lowercase # for ns_name, values in six.iteritems(self.book.metadata): # if ns_name: # for n_id, ns_url in six.iteritems(NAMESPACES): # if ns_name == ns_url: # nsmap[n_id.lower()] = NAMESPACES[n_id] nsmap = {'dc': NAMESPACES['DC'], 'opf': NAMESPACES['OPF']} nsmap.update(self.book.namespaces) metadata = etree.SubElement(root, 'metadata', nsmap=nsmap) el = etree.SubElement(metadata, 'meta', {'property': 'dcterms:modified'}) if 'mtime' in self.options: mtime = self.options['mtime'] else: import datetime mtime = datetime.datetime.now() el.text = mtime.strftime('%Y-%m-%dT%H:%M:%SZ') for ns_name, values in six.iteritems(self.book.metadata): if ns_name == NAMESPACES['OPF']: for values in values.values(): for v in values: if 'property' in v[1] and v[1]['property'] == 'dcterms:modified': continue try: el = etree.SubElement(metadata, 'meta', v[1]) if v[0]: el.text = v[0] except ValueError: logging.error('Could not create metadata.') else: for name, values in six.iteritems(values): for v in values: try: if ns_name: el = etree.SubElement(metadata, '{%s}%s' % (ns_name, name), v[1]) else: el = etree.SubElement(metadata, '%s' % name, v[1]) el.text = v[0] except ValueError: logging.info('Could not create metadata "{}".'.format(name)) def _write_opf_manifest(self, root): manifest = etree.SubElement(root, 'manifest') _ncx_id = None # mathml, scripted, svg, remote-resources, and switch # nav # cover-image for item in self.book.get_items(): if not item.manifest: continue if isinstance(item, EpubNav): etree.SubElement(manifest, 'item', {'href': item.get_name(), 'id': item.id, 'media-type': item.media_type, 'properties': 'nav'}) elif isinstance(item, EpubNcx): _ncx_id = item.id etree.SubElement(manifest, 'item', {'href': item.file_name, 'id': item.id, 'media-type': item.media_type}) elif isinstance(item, EpubCover): etree.SubElement(manifest, 'item', {'href': item.file_name, 'id': item.id, 'media-type': item.media_type, 'properties': 'cover-image'}) else: opts = {'href': item.file_name, 'id': item.id, 'media-type': item.media_type} if hasattr(item, 'properties') and len(item.properties) > 0: opts['properties'] = ' '.join(item.properties) if hasattr(item, 'media_overlay') and item.media_overlay is not None: opts['media-overlay'] = item.media_overlay if hasattr(item, 'media_duration') and item.media_duration is not None: opts['duration'] = item.media_duration etree.SubElement(manifest, 'item', opts) return _ncx_id def _write_opf_spine(self, root, ncx_id): spine_attributes = {'toc': ncx_id or 'ncx'} if self.book.direction and self.options['spine_direction']: spine_attributes['page-progression-direction'] = self.book.direction spine = etree.SubElement(root, 'spine', spine_attributes) for _item in self.book.spine: # this is for now # later we should be able to fetch things from tuple is_linear = True if isinstance(_item, tuple): item = _item[0] if len(_item) > 1: if _item[1] == 'no': is_linear = False else: item = _item if isinstance(item, EpubHtml): opts = {'idref': item.get_id()} if not item.is_linear or not is_linear: opts['linear'] = 'no' elif isinstance(item, EpubItem): opts = {'idref': item.get_id()} if not item.is_linear or not is_linear: opts['linear'] = 'no' else: opts = {'idref': item} try: itm = self.book.get_item_with_id(item) if not itm.is_linear or not is_linear: opts['linear'] = 'no' except: pass etree.SubElement(spine, 'itemref', opts) def _write_opf_guide(self, root): # - http://www.idpf.org/epub/20/spec/OPF_2.0.1_draft.htm#Section2.6 if len(self.book.guide) > 0 and self.options.get('epub2_guide'): guide = etree.SubElement(root, 'guide', {}) for item in self.book.guide: if 'item' in item: chap = item.get('item') if chap: _href = chap.file_name _title = chap.title else: _href = item.get('href', '') _title = item.get('title', '') if _title is None: _title = '' ref = etree.SubElement(guide, 'reference', {'type': item.get('type', ''), 'title': _title, 'href': _href}) def _write_opf_bindings(self, root): if len(self.book.bindings) > 0: bindings = etree.SubElement(root, 'bindings', {}) for item in self.book.bindings: etree.SubElement(bindings, 'mediaType', item) def _write_opf_file(self, root): tree_str = etree.tostring(root, pretty_print=True, encoding='utf-8', xml_declaration=True) self.out.writestr('%s/content.opf' % self.book.FOLDER_NAME, tree_str) def _write_opf(self): package_attributes = {'xmlns': NAMESPACES['OPF'], 'unique-identifier': self.book.IDENTIFIER_ID, 'version': '3.0'} if self.book.direction and self.options['package_direction']: package_attributes['dir'] = self.book.direction root = etree.Element('package', package_attributes) prefixes = ['rendition: http://www.idpf.org/vocab/rendition/#'] + self.book.prefixes root.attrib['prefix'] = ' '.join(prefixes) # METADATA self._write_opf_metadata(root) # MANIFEST _ncx_id = self._write_opf_manifest(root) # SPINE self._write_opf_spine(root, _ncx_id) # GUIDE self._write_opf_guide(root) # BINDINGS self._write_opf_bindings(root) # WRITE FILE self._write_opf_file(root) def _get_nav(self, item): # just a basic navigation for now nav_xml = parse_string(self.book.get_template('nav')) root = nav_xml.getroot() root.set('lang', self.book.language) root.attrib['{%s}lang' % NAMESPACES['XML']] = self.book.language nav_dir_name = os.path.dirname(item.file_name) head = etree.SubElement(root, 'head') title = etree.SubElement(head, 'title') title.text = item.title or self.book.title # for now this just handles css files and ignores others for _link in item.links: _lnk = etree.SubElement(head, 'link', { 'href': _link.get('href', ''), 'rel': 'stylesheet', 'type': 'text/css' }) body = etree.SubElement(root, 'body') if item.direction: body.set('dir', item.direction) nav = etree.SubElement(body, 'nav', { '{%s}type' % NAMESPACES['EPUB']: 'toc', 'id': 'id', 'role': 'doc-toc', }) content_title = etree.SubElement(nav, 'h2') content_title.text = item.title or self.book.title def _create_section(itm, items): ol = etree.SubElement(itm, 'ol') for item in items: if isinstance(item, tuple) or isinstance(item, list): li = etree.SubElement(ol, 'li') if isinstance(item[0], EpubHtml): a = etree.SubElement(li, 'a', {'href': zip_path.relpath(item[0].file_name, nav_dir_name)}) elif isinstance(item[0], Section) and item[0].href != '': a = etree.SubElement(li, 'a', {'href': zip_path.relpath(item[0].href, nav_dir_name)}) elif isinstance(item[0], Link): a = etree.SubElement(li, 'a', {'href': zip_path.relpath(item[0].href, nav_dir_name)}) else: a = etree.SubElement(li, 'span') a.text = item[0].title _create_section(li, item[1]) elif isinstance(item, Link): li = etree.SubElement(ol, 'li') a = etree.SubElement(li, 'a', {'href': zip_path.relpath(item.href, nav_dir_name)}) a.text = item.title elif isinstance(item, EpubHtml): li = etree.SubElement(ol, 'li') a = etree.SubElement(li, 'a', {'href': zip_path.relpath(item.file_name, nav_dir_name)}) a.text = item.title _create_section(nav, self.book.toc) # LANDMARKS / GUIDE # - http://www.idpf.org/epub/30/spec/epub30-contentdocs.html#sec-xhtml-nav-def-types-landmarks if len(self.book.guide) > 0 and self.options.get('epub3_landmark'): # Epub2 guide types do not map completely to epub3 landmark types. guide_to_landscape_map = { 'notes': 'rearnotes', 'text': 'bodymatter' } guide_nav = etree.SubElement(body, 'nav', {'{%s}type' % NAMESPACES['EPUB']: 'landmarks'}) guide_content_title = etree.SubElement(guide_nav, 'h2') guide_content_title.text = self.options.get('landmark_title', 'Guide') guild_ol = etree.SubElement(guide_nav, 'ol') for elem in self.book.guide: li_item = etree.SubElement(guild_ol, 'li') if 'item' in elem: chap = elem.get('item', None) if chap: _href = chap.file_name _title = chap.title else: _href = elem.get('href', '') _title = elem.get('title', '') guide_type = elem.get('type', '') a_item = etree.SubElement(li_item, 'a', { '{%s}type' % NAMESPACES['EPUB']: guide_to_landscape_map.get(guide_type, guide_type), 'href': zip_path.relpath(_href, nav_dir_name) }) a_item.text = _title # PAGE-LIST if self.options.get('epub3_pages'): inserted_pages = get_pages_for_items([item for item in self.book.get_items_of_type(ebooklib.ITEM_DOCUMENT) \ if not isinstance(item, EpubNav)]) if len(inserted_pages) > 0: pagelist_nav = etree.SubElement( body, 'nav', { '{%s}type' % NAMESPACES['EPUB']: 'page-list', 'id': 'pages', 'hidden': 'hidden', } ) pagelist_content_title = etree.SubElement(pagelist_nav, 'h2') pagelist_content_title.text = self.options.get( 'pages_title', 'Pages' ) pages_ol = etree.SubElement(pagelist_nav, 'ol') for filename, pageref, label in inserted_pages: li_item = etree.SubElement(pages_ol, 'li') _href = u'{}#{}'.format(filename, pageref) _title = label a_item = etree.SubElement(li_item, 'a', { 'href': zip_path.relpath(_href, nav_dir_name), }) a_item.text = _title tree_str = etree.tostring(nav_xml, pretty_print=True, encoding='utf-8', xml_declaration=True) return tree_str def _get_ncx(self): # we should be able to setup language for NCX as also ncx = parse_string(self.book.get_template('ncx')) root = ncx.getroot() head = etree.SubElement(root, 'head') # get this id uid = etree.SubElement(head, 'meta', {'content': self.book.uid, 'name': 'dtb:uid'}) uid = etree.SubElement(head, 'meta', {'content': '0', 'name': 'dtb:depth'}) uid = etree.SubElement(head, 'meta', {'content': '0', 'name': 'dtb:totalPageCount'}) uid = etree.SubElement(head, 'meta', {'content': '0', 'name': 'dtb:maxPageNumber'}) doc_title = etree.SubElement(root, 'docTitle') title = etree.SubElement(doc_title, 'text') title.text = self.book.title # doc_author = etree.SubElement(root, 'docAuthor') # author = etree.SubElement(doc_author, 'text') # author.text = 'Name of the person' # For now just make a very simple navMap nav_map = etree.SubElement(root, 'navMap') def _add_play_order(nav_point): nav_point.set('playOrder', str(self._play_order['start_from'])) self._play_order['start_from'] += 1 def _create_section(itm, items, uid): for item in items: if isinstance(item, tuple) or isinstance(item, list): section, subsection = item[0], item[1] np = etree.SubElement(itm, 'navPoint', { 'id': section.get_id() if isinstance(section, EpubHtml) else 'sep_%d' % uid }) if self._play_order['enabled']: _add_play_order(np) nl = etree.SubElement(np, 'navLabel') nt = etree.SubElement(nl, 'text') nt.text = section.title # CAN NOT HAVE EMPTY SRC HERE href = '' if isinstance(section, EpubHtml): href = section.file_name elif isinstance(section, Section) and section.href != '': href = section.href elif isinstance(section, Link): href = section.href nc = etree.SubElement(np, 'content', {'src': href}) uid = _create_section(np, subsection, uid + 1) elif isinstance(item, Link): _parent = itm _content = _parent.find('content') if _content is not None: if _content.get('src') == '': _content.set('src', item.href) np = etree.SubElement(itm, 'navPoint', {'id': item.uid}) if self._play_order['enabled']: _add_play_order(np) nl = etree.SubElement(np, 'navLabel') nt = etree.SubElement(nl, 'text') nt.text = item.title nc = etree.SubElement(np, 'content', {'src': item.href}) elif isinstance(item, EpubHtml): _parent = itm _content = _parent.find('content') if _content is not None: if _content.get('src') == '': _content.set('src', item.file_name) np = etree.SubElement(itm, 'navPoint', {'id': item.get_id()}) if self._play_order['enabled']: _add_play_order(np) nl = etree.SubElement(np, 'navLabel') nt = etree.SubElement(nl, 'text') nt.text = item.title nc = etree.SubElement(np, 'content', {'src': item.file_name}) return uid _create_section(nav_map, self.book.toc, 0) tree_str = etree.tostring(root, pretty_print=True, encoding='utf-8', xml_declaration=True) return tree_str def _write_items(self): for item in self.book.get_items(): if isinstance(item, EpubNcx): self.out.writestr('%s/%s' % (self.book.FOLDER_NAME, item.file_name), self._get_ncx()) elif isinstance(item, EpubNav): self.out.writestr('%s/%s' % (self.book.FOLDER_NAME, item.file_name), self._get_nav(item)) elif item.manifest: self.out.writestr('%s/%s' % (self.book.FOLDER_NAME, item.file_name), item.get_content()) else: self.out.writestr('%s' % item.file_name, item.get_content()) def write(self): # check for the option allowZip64 self.out = zipfile.ZipFile(self.file_name, 'w', zipfile.ZIP_DEFLATED, compresslevel=self.options['compresslevel']) self.out.writestr('mimetype', 'application/epub+zip', compress_type=zipfile.ZIP_STORED) self._write_container() self._write_opf() self._write_items() self.out.close() class EpubReader(object): DEFAULT_OPTIONS = { 'ignore_ncx': False } def __init__(self, epub_file_name, options=None): self.file_name = epub_file_name self.book = EpubBook() self.zf = None self.opf_file = '' self.opf_dir = '' self.options = dict(self.DEFAULT_OPTIONS) if options: self.options.update(options) self._check_deprecated() def _check_deprecated(self): if self.options.get('ignore_ncx') is None: warnings.warn('In the future version we will turn default option ignore_ncx to True.') def process(self): # should cache this html parsing so we don't do it for every plugin for plg in self.options.get('plugins', []): if hasattr(plg, 'after_read'): plg.after_read(self.book) for item in self.book.get_items(): if isinstance(item, EpubHtml): for plg in self.options.get('plugins', []): if hasattr(plg, 'html_after_read'): plg.html_after_read(self.book, item) def load(self): self._load() return self.book def read_file(self, name): # Raises KeyError name = zip_path.normpath(name) return self.zf.read(name) def _load_container(self): meta_inf = self.read_file('META-INF/container.xml') tree = parse_string(meta_inf) for root_file in tree.findall('.//xmlns:rootfile[@media-type]', namespaces={'xmlns': NAMESPACES['CONTAINERNS']}): if root_file.get('media-type') == 'application/oebps-package+xml': self.opf_file = root_file.get('full-path') self.opf_dir = zip_path.dirname(self.opf_file) def _load_metadata(self): container_root = self.container.getroot() # get epub version self.book.version = container_root.get('version', None) # get unique-identifier if container_root.get('unique-identifier', None): self.book.IDENTIFIER_ID = container_root.get('unique-identifier') # get xml:lang # get metadata metadata = self.container.find('{%s}%s' % (NAMESPACES['OPF'], 'metadata')) nsmap = metadata.nsmap nstags = dict((k, '{%s}' % v) for k, v in six.iteritems(nsmap)) default_ns = nstags.get(None, '') nsdict = dict((v, {}) for v in nsmap.values()) def add_item(ns, tag, value, extra): if ns not in nsdict: nsdict[ns] = {} values = nsdict[ns].setdefault(tag, []) values.append((value, extra)) for t in metadata: if not etree.iselement(t) or t.tag is etree.Comment: continue if t.tag == default_ns + 'meta': name = t.get('name') others = dict((k, v) for k, v in t.items()) if name and ':' in name: prefix, name = name.split(':', 1) else: prefix = None add_item(t.nsmap.get(prefix, prefix), name, t.text, others) else: tag = t.tag[t.tag.rfind('}') + 1:] if (t.prefix and t.prefix.lower() == 'dc') and tag == 'identifier': _id = t.get('id', None) if _id: self.book.IDENTIFIER_ID = _id others = dict((k, v) for k, v in t.items()) add_item(t.nsmap[t.prefix], tag, t.text, others) self.book.metadata = nsdict titles = self.book.get_metadata('DC', 'title') if len(titles) > 0: self.book.title = titles[0][0] for value, others in self.book.get_metadata('DC', 'identifier'): if others.get('id') == self.book.IDENTIFIER_ID: self.book.uid = value def _load_manifest(self): for r in self.container.find('{%s}%s' % (NAMESPACES['OPF'], 'manifest')): if r is not None and r.tag != '{%s}item' % NAMESPACES['OPF']: continue media_type = r.get('media-type') _properties = r.get('properties', '') if _properties: properties = _properties.split(' ') else: properties = [] # people use wrong content types if media_type == 'image/jpg': media_type = 'image/jpeg' if media_type == 'application/x-dtbncx+xml': ei = EpubNcx(uid=r.get('id'), file_name=unquote(r.get('href'))) ei.content = self.read_file(zip_path.join(self.opf_dir, ei.file_name)) elif media_type == 'application/smil+xml': ei = EpubSMIL(uid=r.get('id'), file_name=unquote(r.get('href'))) ei.content = self.read_file(zip_path.join(self.opf_dir, ei.file_name)) elif media_type == 'application/xhtml+xml': if 'nav' in properties: ei = EpubNav(uid=r.get('id'), file_name=unquote(r.get('href'))) ei.content = self.read_file(zip_path.join(self.opf_dir, r.get('href'))) elif 'cover' in properties: ei = EpubCoverHtml() ei.content = self.read_file(zip_path.join(self.opf_dir, unquote(r.get('href')))) else: ei = EpubHtml() ei.id = r.get('id') ei.file_name = unquote(r.get('href')) ei.media_type = media_type ei.media_overlay = r.get('media-overlay', None) ei.media_duration = r.get('duration', None) ei.content = self.read_file(zip_path.join(self.opf_dir, ei.get_name())) ei.properties = properties elif media_type in IMAGE_MEDIA_TYPES: if 'cover-image' in properties: ei = EpubCover(uid=r.get('id'), file_name=unquote(r.get('href'))) ei.media_type = media_type ei.content = self.read_file(zip_path.join(self.opf_dir, ei.get_name())) else: ei = EpubImage() ei.id = r.get('id') ei.file_name = unquote(r.get('href')) ei.media_type = media_type ei.content = self.read_file(zip_path.join(self.opf_dir, ei.get_name())) else: # different types ei = EpubItem() ei.id = r.get('id') ei.file_name = unquote(r.get('href')) ei.media_type = media_type ei.content = self.read_file(zip_path.join(self.opf_dir, ei.get_name())) self.book.add_item(ei) def _parse_ncx(self, data): tree = parse_string(data) tree_root = tree.getroot() nav_map = tree_root.find('{%s}navMap' % NAMESPACES['DAISY']) def _get_children(elems, n, nid): label, content = '', '' children = [] for a in elems.getchildren(): if a.tag == '{%s}navLabel' % NAMESPACES['DAISY']: label = a.getchildren()[0].text if a.tag == '{%s}content' % NAMESPACES['DAISY']: content = a.get('src', '') if a.tag == '{%s}navPoint' % NAMESPACES['DAISY']: children.append(_get_children(a, n + 1, a.get('id', ''))) if len(children) > 0: if n == 0: return children return (Section(label, href=content), children) else: return Link(content, label, nid) self.book.toc = _get_children(nav_map, 0, '') def _parse_nav(self, data, base_path, navtype='toc'): html_node = parse_html_string(data) if navtype == 'toc': # parsing the table of contents nav_node = html_node.xpath("//nav[@*='toc']")[0] else: # parsing the list of pages _page_list = html_node.xpath("//nav[@*='page-list']") if len(_page_list) == 0: return nav_node = _page_list[0] def parse_list(list_node): items = [] for item_node in list_node.findall('li'): sublist_node = item_node.find('ol') link_node = item_node.find('a') if sublist_node is not None: title = item_node[0].text_content() children = parse_list(sublist_node) if link_node is not None: href = zip_path.normpath(zip_path.join(base_path, link_node.get('href'))) items.append((Section(title, href=href), children)) else: items.append((Section(title), children)) elif link_node is not None: title = link_node.text_content() href = zip_path.normpath(zip_path.join(base_path, link_node.get('href'))) items.append(Link(href, title)) return items if navtype == 'toc': self.book.toc = parse_list(nav_node.find('ol')) elif nav_node is not None: # generate the pages list if there is one self.book.pages = parse_list(nav_node.find('ol')) # generate the per-file pages lists # because of the order of parsing the files, this can't be done # when building the EpubHtml objects htmlfiles = dict() for htmlfile in self.book.items: if isinstance(htmlfile, EpubHtml): htmlfiles[htmlfile.file_name] = htmlfile for page in self.book.pages: try: (filename, idref) = page.href.split('#') except ValueError: filename = page.href if filename in htmlfiles: htmlfiles[filename].pages.append(page) def _load_spine(self): spine = self.container.find('{%s}%s' % (NAMESPACES['OPF'], 'spine')) self.book.spine = [(t.get('idref'), t.get('linear', 'yes')) for t in spine] toc = spine.get('toc', '') self.book.set_direction(spine.get('page-progression-direction', None)) # should read ncx or nav file nav_item = next((item for item in self.book.items if isinstance(item, EpubNav)), None) if toc: if not self.options.get('ignore_ncx') or not nav_item: try: ncxFile = self.read_file(zip_path.join(self.opf_dir, self.book.get_item_with_id(toc).get_name())) except KeyError: raise EpubException(-1, 'Can not find ncx file.') self._parse_ncx(ncxFile) def _load_guide(self): guide = self.container.find('{%s}%s' % (NAMESPACES['OPF'], 'guide')) if guide is not None: self.book.guide = [{'href': t.get('href'), 'title': t.get('title'), 'type': t.get('type')} for t in guide] def _load_opf_file(self): try: s = self.read_file(self.opf_file) except KeyError: raise EpubException(-1, 'Can not find container file') self.container = parse_string(s) self._load_metadata() self._load_manifest() self._load_spine() self._load_guide() # read nav file if found # nav_item = next((item for item in self.book.items if isinstance(item, EpubNav)), None) if nav_item: if self.options.get('ignore_ncx') or not self.book.toc: self._parse_nav( nav_item.content, zip_path.dirname(nav_item.file_name), navtype='toc' ) self._parse_nav( nav_item.content, zip_path.dirname(nav_item.file_name), navtype='pages' ) def _load(self): if os.path.isdir(self.file_name): file_name = self.file_name class Directory: def read(self, subname): with open(os.path.join(file_name, subname), 'rb') as fp: return fp.read() def close(self): pass self.zf = Directory() else: try: self.zf = zipfile.ZipFile(self.file_name, 'r', compression=zipfile.ZIP_DEFLATED, allowZip64=True) except zipfile.BadZipfile as bz: raise EpubException(0, 'Bad Zip file') except zipfile.LargeZipFile as bz: raise EpubException(1, 'Large Zip file') # 1st check metadata self._load_container() self._load_opf_file() self.zf.close() # WRITE def write_epub(name, book, options=None): """ Creates epub file with the content defined in EpubBook. >>> ebooklib.write_epub('book.epub', book) :Args: - name: file name for the output file - book: instance of EpubBook - options: extra opions as dictionary (optional) """ epub = EpubWriter(name, book, options) epub.process() try: epub.write() except IOError: warnings.warn('In the future throwing exceptions while writing will be default behavior.') t, v, tb = sys.exc_info() if options and options.get('raise_exceptions'): six.reraise(t, v, tb) else: return False return True # READ def read_epub(name, options=None): """ Creates new instance of EpubBook with the content defined in the input file. >>> book = ebooklib.read_epub('book.epub') :Args: - name: full path to the input file - options: extra options as dictionary (optional) :Returns: Instance of EpubBook. """ reader = EpubReader(name, options) book = reader.load() reader.process() return book
58,456
Python
.py
1,335
31.545318
244
0.539781
aerkalov/ebooklib
1,451
226
102
AGPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,981
sourcecode.py
aerkalov_ebooklib/ebooklib/plugins/sourcecode.py
# This file is part of EbookLib. # Copyright (c) 2013 Aleksandar Erkalovic <aerkalov@gmail.com> # # EbookLib is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # EbookLib is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with EbookLib. If not, see <http://www.gnu.org/licenses/>. from ebooklib.plugins.base import BasePlugin from ebooklib.utils import parse_html_string class SourceHighlighter(BasePlugin): def __init__(self): pass def html_before_write(self, book, chapter): from lxml import etree, html from pygments import highlight from pygments.formatters import HtmlFormatter from ebooklib import epub try: tree = parse_html_string(chapter.content) except: return root = tree.getroottree() had_source = False if len(root.find('body')) != 0: body = tree.find('body') # check for embeded source for source in body.xpath('//pre[contains(@class,"source-")]'): css_class = source.get('class') source_text = (source.text or '') + ''.join([html.tostring(child) for child in source.iterchildren()]) if 'source-python' in css_class: from pygments.lexers import PythonLexer # _text = highlight(source_text, PythonLexer(), HtmlFormatter(linenos="inline")) _text = highlight(source_text, PythonLexer(), HtmlFormatter()) if 'source-css' in css_class: from pygments.lexers import CssLexer _text = highlight(source_text, CssLexer(), HtmlFormatter()) _parent = source.getparent() _parent.replace(source, etree.XML(_text)) had_source = True if had_source: chapter.add_link(href="style/code.css", rel="stylesheet", type="text/css") chapter.content = etree.tostring(tree, pretty_print=True, encoding='utf-8')
2,481
Python
.py
50
40.58
118
0.646498
aerkalov/ebooklib
1,451
226
102
AGPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,982
standard.py
aerkalov_ebooklib/ebooklib/plugins/standard.py
# This file is part of EbookLib. # Copyright (c) 2013 Aleksandar Erkalovic <aerkalov@gmail.com> # # EbookLib is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # EbookLib is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with EbookLib. If not, see <http://www.gnu.org/licenses/>. import six from ebooklib.plugins.base import BasePlugin from ebooklib.utils import parse_html_string # TODO: # - should also look for the _required_ elements # http://www.w3.org/html/wg/drafts/html/master/tabular-data.html#the-table-element ATTRIBUTES_GLOBAL = ['accesskey', 'class', 'contenteditable', 'contextmenu', 'dir', 'draggable', 'dropzone', 'hidden', 'id', 'inert', 'itemid', 'itemprop', 'itemref', 'itemscope', 'itemtype', 'lang', 'spellcheck', 'style', 'tabindex', 'title', 'translate', 'epub:type'] # Remove <u> for now from here DEPRECATED_TAGS = ['acronym', 'applet', 'basefont', 'big', 'center', 'dir', 'font', 'frame', 'frameset', 'isindex', 'noframes', 's', 'strike', 'tt'] def leave_only(item, tag_list): for _attr in six.iterkeys(item.attrib): if _attr not in tag_list: del item.attrib[_attr] class SyntaxPlugin(BasePlugin): NAME = 'Check HTML syntax' def html_before_write(self, book, chapter): from lxml import etree try: tree = parse_html_string(chapter.content) except: return root = tree.getroottree() # delete deprecated tags # i should really have a list of allowed tags for tag in DEPRECATED_TAGS: etree.strip_tags(root, tag) head = tree.find('head') if head is not None and len(head) != 0: for _item in head: if _item.tag == 'base': leave_only(_item, ATTRIBUTES_GLOBAL + ['href', 'target']) elif _item.tag == 'link': leave_only(_item, ATTRIBUTES_GLOBAL + ['href', 'crossorigin', 'rel', 'media', 'hreflang', 'type', 'sizes']) elif _item.tag == 'title': if _item.text == '': head.remove(_item) elif _item.tag == 'meta': leave_only(_item, ATTRIBUTES_GLOBAL + ['name', 'http-equiv', 'content', 'charset']) # just remove for now, but really should not be like this head.remove(_item) elif _item.tag == 'script': leave_only(_item, ATTRIBUTES_GLOBAL + ['src', 'type', 'charset', 'async', 'defer', 'crossorigin']) elif _item.tag == 'source': leave_only(_item, ATTRIBUTES_GLOBAL + ['src', 'type', 'media']) elif _item.tag == 'style': leave_only(_item, ATTRIBUTES_GLOBAL + ['media', 'type', 'scoped']) else: leave_only(_item, ATTRIBUTES_GLOBAL) if len(root.find('body')) != 0: body = tree.find('body') for _item in body.iter(): # it is not # <a class="indexterm" href="ch05.html#ix_epub:trigger_element"> if _item.tag == 'a': leave_only(_item, ATTRIBUTES_GLOBAL + ['href', 'target', 'download', 'rel', 'hreflang', 'type']) elif _item.tag == 'area': leave_only(_item, ATTRIBUTES_GLOBAL + ['alt', 'coords', 'shape', 'href', 'target', 'download', 'rel', 'hreflang', 'type']) elif _item.tag == 'audio': leave_only(_item, ATTRIBUTES_GLOBAL + ['src', 'crossorigin', 'preload', 'autoplay', 'mediagroup', 'loop', 'muted', 'controls']) elif _item.tag == 'blockquote': leave_only(_item, ATTRIBUTES_GLOBAL + ['cite']) elif _item.tag == 'button': leave_only(_item, ATTRIBUTES_GLOBAL + ['autofocus', 'disabled', 'form', 'formaction', 'formenctype', 'formmethod', 'formnovalidate', 'formtarget', 'name', 'type', 'value', 'menu']) elif _item.tag == 'canvas': leave_only(_item, ATTRIBUTES_GLOBAL + ['width', 'height']) elif _item.tag == 'canvas': leave_only(_item, ATTRIBUTES_GLOBAL + ['width', 'height']) elif _item.tag == 'del': leave_only(_item, ATTRIBUTES_GLOBAL + ['cite', 'datetime']) elif _item.tag == 'details': leave_only(_item, ATTRIBUTES_GLOBAL + ['open']) elif _item.tag == 'embed': leave_only(_item, ATTRIBUTES_GLOBAL + ['src', 'type', 'width', 'height']) elif _item.tag == 'fieldset': leave_only(_item, ATTRIBUTES_GLOBAL + ['disable', 'form', 'name']) elif _item.tag == 'details': leave_only(_item, ATTRIBUTES_GLOBAL + ['accept-charset', 'action', 'autocomplete', 'enctype', 'method', 'name', 'novalidate', 'target']) elif _item.tag == 'iframe': leave_only(_item, ATTRIBUTES_GLOBAL + ['src', 'srcdoc', 'name', 'sandbox', 'seamless', 'allowfullscreen', 'width', 'height']) elif _item.tag == 'img': _src = _item.get('src', '').lower() if _src.startswith('http://') or _src.startswith('https://'): if 'remote-resources' not in chapter.properties: chapter.properties.append('remote-resources') # THIS DOES NOT WORK, ONLY VIDEO AND AUDIO FILES CAN BE REMOTE RESOURCES # THAT MEANS I SHOULD ALSO CATCH <SOURCE TAG from ebooklib import epub _img = epub.EpubImage(file_name = _item.get('src')) book.add_item(_img) leave_only(_item, ATTRIBUTES_GLOBAL + ['alt', 'src', 'crossorigin', 'usemap', 'ismap', 'width', 'height']) elif _item.tag == 'input': leave_only(_item, ATTRIBUTES_GLOBAL + ['accept', 'alt', 'autocomplete', 'autofocus', 'checked', 'dirname', 'disabled', 'form', 'formaction', 'formenctype', 'formmethod', 'formnovalidate', 'formtarget', 'height', 'inputmode', 'list', 'max', 'maxlength', 'min', 'multiple', 'name', 'pattern', 'placeholder', 'readonly', 'required', 'size', 'src', 'step' 'type', 'value', 'width']) elif _item.tag == 'ins': leave_only(_item, ATTRIBUTES_GLOBAL + ['cite', 'datetime']) elif _item.tag == 'keygen': leave_only(_item, ATTRIBUTES_GLOBAL + ['autofocus', 'challenge', 'disabled', 'form', 'keytype', 'name']) elif _item.tag == 'label': leave_only(_item, ATTRIBUTES_GLOBAL + ['form', 'for']) elif _item.tag == 'label': leave_only(_item, ATTRIBUTES_GLOBAL + ['form', 'for']) elif _item.tag == 'map': leave_only(_item, ATTRIBUTES_GLOBAL + ['name']) elif _item.tag == 'menu': leave_only(_item, ATTRIBUTES_GLOBAL + ['type', 'label']) elif _item.tag == 'object': leave_only(_item, ATTRIBUTES_GLOBAL + ['data', 'type', 'typemustmatch', 'name', 'usemap', 'form', 'width', 'height']) elif _item.tag == 'ol': leave_only(_item, ATTRIBUTES_GLOBAL + ['reversed', 'start', 'type']) elif _item.tag == 'optgroup': leave_only(_item, ATTRIBUTES_GLOBAL + ['disabled', 'label']) elif _item.tag == 'option': leave_only(_item, ATTRIBUTES_GLOBAL + ['disabled', 'label', 'selected', 'value']) elif _item.tag == 'output': leave_only(_item, ATTRIBUTES_GLOBAL + ['for', 'form', 'name']) elif _item.tag == 'param': leave_only(_item, ATTRIBUTES_GLOBAL + ['name', 'value']) elif _item.tag == 'progress': leave_only(_item, ATTRIBUTES_GLOBAL + ['value', 'max']) elif _item.tag == 'q': leave_only(_item, ATTRIBUTES_GLOBAL + ['cite']) elif _item.tag == 'select': leave_only(_item, ATTRIBUTES_GLOBAL + ['autofocus', 'disabled', 'form', 'multiple', 'name', 'required', 'size']) elif _item.tag == 'table': if _item.get('border', None): if _item.get('border') == '0': _item.set('border', '') if _item.get('summary', None): _caption = etree.Element('caption', {}) _caption.text = _item.get('summary') _item.insert(0, _caption) # add it as caption del _item.attrib['summary'] leave_only(_item, ATTRIBUTES_GLOBAL + ['border', 'sortable']) elif _item.tag == 'dl': _d = _item.find('dd') if _d is not None and len(_d) == 0: pass # http://html5doctor.com/the-dl-element/ # should be like this really # some of the elements can be missing # dl # dt # dd # dt # dd elif _item.tag == 'td': leave_only(_item, ATTRIBUTES_GLOBAL + ['colspan', 'rowspan', 'headers']) elif _item.tag == 'textarea': leave_only(_item, ATTRIBUTES_GLOBAL + ['autocomplete', 'autofocus', 'cols', 'dirname', 'disabled', 'form', 'inputmode', 'maxlength', 'name', 'placeholder', 'readonly', 'required', 'rows', 'wrap']) elif _item.tag in ['col', 'colgroup']: leave_only(_item, ATTRIBUTES_GLOBAL + ['span']) elif _item.tag == 'th': leave_only(_item, ATTRIBUTES_GLOBAL + ['colspan', 'rowspan', 'headers', 'scope', 'abbr', 'sorted']) elif _item.tag in ['time']: leave_only(_item, ATTRIBUTES_GLOBAL + ['datetime']) elif _item.tag in ['track']: leave_only(_item, ATTRIBUTES_GLOBAL + ['kind', 'src', 'srclang', 'label', 'default']) elif _item.tag == 'video': leave_only(_item, ATTRIBUTES_GLOBAL + ['src', 'crossorigin', 'poster', 'preload', 'autoplay', 'mediagroup', 'loop', 'muted', 'controls', 'width', 'height']) elif _item.tag == 'svg': # We need to add property "svg" in case we have embeded svg file if 'svg' not in chapter.properties: chapter.properties.append('svg') if _item.get('viewbox', None): del _item.attrib['viewbox'] if _item.get('preserveaspectratio', None): del _item.attrib['preserveaspectratio'] else: for _attr in six.iterkeys(_item.attrib): if _attr not in ATTRIBUTES_GLOBAL: del _item.attrib[_attr] chapter.content = etree.tostring(tree, pretty_print=True, encoding='utf-8', xml_declaration=True) return chapter.content
12,534
Python
.py
200
43.785
156
0.492808
aerkalov/ebooklib
1,451
226
102
AGPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,983
base.py
aerkalov_ebooklib/ebooklib/plugins/base.py
# This file is part of EbookLib. # Copyright (c) 2013 Aleksandar Erkalovic <aerkalov@gmail.com> # # EbookLib is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # EbookLib is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with EbookLib. If not, see <http://www.gnu.org/licenses/>. class BasePlugin(object): def before_write(self, book): "Processing before save" return True def after_write(self, book): "Processing after save" return True def before_read(self, book): "Processing before save" return True def after_read(self, book): "Processing after save" return True def item_after_read(self, book, item): "Process general item after read." return True def item_before_write(self, book, item): "Process general item before write." return True def html_after_read(self, book, chapter): "Processing HTML before read." return True def html_before_write(self, book, chapter): "Processing HTML before save." return True
1,546
Python
.py
40
33.425
77
0.706079
aerkalov/ebooklib
1,451
226
102
AGPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,984
booktype.py
aerkalov_ebooklib/ebooklib/plugins/booktype.py
# This file is part of EbookLib. # Copyright (c) 2013 Aleksandar Erkalovic <aerkalov@gmail.com> # # EbookLib is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # EbookLib is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with EbookLib. If not, see <http://www.gnu.org/licenses/>. from ebooklib.plugins.base import BasePlugin from ebooklib.utils import parse_html_string class BooktypeLinks(BasePlugin): NAME = 'Booktype Links' def __init__(self, booktype_book): self.booktype_book = booktype_book def html_before_write(self, book, chapter): from lxml import etree try: from urlparse import urlparse, urljoin except ImportError: from urllib.parse import urlparse, urljoin try: tree = parse_html_string(chapter.content) except: return root = tree.getroottree() if len(root.find('body')) != 0: body = tree.find('body') # should also be aware to handle # ../chapter/ # ../chapter/#reference # ../chapter#reference for _link in body.xpath('//a'): # This is just temporary for the footnotes if _link.get('href', '').find('InsertNoteID') != -1: _ln = _link.get('href', '') i = _ln.find('#') _link.set('href', _ln[i:]) continue _u = urlparse(_link.get('href', '')) # Let us care only for internal links at the moment if _u.scheme == '': if _u.path != '': _link.set('href', '%s.xhtml' % _u.path) if _u.fragment != '': _link.set('href', urljoin(_link.get('href'), '#%s' % _u.fragment)) if _link.get('name') != None: _link.set('id', _link.get('name')) etree.strip_attributes(_link, 'name') chapter.content = etree.tostring(tree, pretty_print=True, encoding='utf-8') class BooktypeFootnotes(BasePlugin): NAME = 'Booktype Footnotes' def __init__(self, booktype_book): self.booktype_book = booktype_book def html_before_write(self, book, chapter): from lxml import etree from ebooklib import epub try: tree = parse_html_string(chapter.content) except: return root = tree.getroottree() if len(root.find('body')) != 0: body = tree.find('body') # <span id="InsertNoteID_1_marker1" class="InsertNoteMarker"><sup><a href="#InsertNoteID_1">1</a></sup><span> # <ol id="InsertNote_NoteList"><li id="InsertNoteID_1">prvi footnote <span id="InsertNoteID_1_LinkBacks"><sup><a href="#InsertNoteID_1_marker1">^</a></sup></span></li> # <a epub:type="noteref" href="#n1">1</a></p> # <aside epub:type="footnote" id="n1"><p>These have been corrected in this EPUB3 edition.</p></aside> for footnote in body.xpath('//span[@class="InsertNoteMarker"]'): footnote_id = footnote.get('id')[:-8] a = footnote.getchildren()[0].getchildren()[0] footnote_text = body.xpath('//li[@id="%s"]' % footnote_id)[0] a.attrib['{%s}type' % epub.NAMESPACES['EPUB']] = 'noteref' ftn = etree.SubElement(body, 'aside', {'id': footnote_id}) ftn.attrib['{%s}type' % epub.NAMESPACES['EPUB']] = 'footnote' ftn_p = etree.SubElement(ftn, 'p') ftn_p.text = footnote_text.text old_footnote = body.xpath('//ol[@id="InsertNote_NoteList"]') if len(old_footnote) > 0: body.remove(old_footnote[0]) chapter.content = etree.tostring(tree, pretty_print=True, encoding='utf-8')
4,359
Python
.py
87
38.850575
179
0.579245
aerkalov/ebooklib
1,451
226
102
AGPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,985
tidyhtml.py
aerkalov_ebooklib/ebooklib/plugins/tidyhtml.py
# This file is part of EbookLib. # Copyright (c) 2013 Aleksandar Erkalovic <aerkalov@gmail.com> # # EbookLib is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # EbookLib is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with EbookLib. If not, see <http://www.gnu.org/licenses/>. import six import subprocess from ebooklib.plugins.base import BasePlugin from ebooklib.utils import parse_html_string # Recommend usage of # - https://github.com/w3c/tidy-html5 def tidy_cleanup(content, **extra): cmd = [] for k, v in six.iteritems(extra): if v: cmd.append('--%s' % k) cmd.append(v) else: cmd.append('-%s' % k) # must parse all other extra arguments try: p = subprocess.Popen(['tidy']+cmd, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) except OSError: return (3, None) p.stdin.write(content) (cont, p_err) = p.communicate() # 0 - all ok # 1 - there were warnings # 2 - there were errors # 3 - exception return (p.returncode, cont) class TidyPlugin(BasePlugin): NAME = 'Tidy HTML' OPTIONS = {'char-encoding': 'utf8', 'tidy-mark': 'no' } def __init__(self, extra = {}): self.options = dict(self.OPTIONS) self.options.update(extra) def html_before_write(self, book, chapter): if not chapter.content: return None (_, chapter.content) = tidy_cleanup(chapter.content, **self.options) return chapter.content def html_after_read(self, book, chapter): if not chapter.content: return None (_, chapter.content) = tidy_cleanup(chapter.content, **self.options) return chapter.content
2,317
Python
.py
61
31.508197
77
0.658166
aerkalov/ebooklib
1,451
226
102
AGPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,986
setup.py
stan-dev_pystan2/setup.py
#!/usr/bin/env python #----------------------------------------------------------------------------- # Copyright (c) 2013-2015, PyStan developers # # This file is licensed under Version 3.0 of the GNU General Public # License. See LICENSE for a text of the license. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # This file is part of PyStan. # # PyStan is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License version 3 as # published by the Free Software Foundation. # # PyStan is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with PyStan. If not, see <http://www.gnu.org/licenses/>. #----------------------------------------------------------------------------- import ast import codecs import os import platform import shutil import subprocess import sys LONG_DESCRIPTION = codecs.open('README.rst', encoding='utf-8').read() NAME = 'pystan' DESCRIPTION = 'Python interface to Stan, a package for Bayesian inference' AUTHOR = 'PyStan Developers' AUTHOR_EMAIL = 'stan-users@googlegroups.com' URL = 'https://github.com/stan-dev/pystan' LICENSE = 'GPLv3' CLASSIFIERS = [ 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', 'Programming Language :: Cython', 'Development Status :: 4 - Beta', 'Environment :: Console', 'Operating System :: OS Independent', 'Intended Audience :: Developers', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)', 'Topic :: Scientific/Engineering', 'Topic :: Scientific/Engineering :: Information Analysis' ] # VersionFinder from from django-compressor class VersionFinder(ast.NodeVisitor): def __init__(self): self.version = None def visit_Assign(self, node): if node.targets[0].id == '__version__': self.version = node.value.s def read(*parts): filename = os.path.join(os.path.dirname(__file__), *parts) with codecs.open(filename, encoding='utf-8') as fp: return fp.read() def find_version(*parts): finder = VersionFinder() finder.visit(ast.parse(read(*parts))) return finder.version ############################################################################### def build_tbb(): """Build tbb.""" stan_math_lib = os.path.abspath(os.path.join(os.path.dirname(__file__), 'pystan', 'stan', 'lib', 'stan_math', 'lib')) make = 'make' if platform.system() != 'Windows' else 'mingw32-make' cmd = [make] tbb_root = os.path.join(stan_math_lib, 'tbb_2019_U8').replace("\\", "/") cmd.extend(['-C', tbb_root]) cmd.append('tbb_build_dir={}'.format(stan_math_lib)) cmd.append('tbb_build_prefix=tbb') cmd.append('tbb_root={}'.format(tbb_root)) cmd.append('stdver=c++14') cmd.append('compiler=gcc') cwd = os.path.abspath(os.path.dirname(__file__)) subprocess.check_call(cmd, cwd=cwd) tbb_debug = os.path.join(stan_math_lib, "tbb_debug") tbb_release = os.path.join(stan_math_lib, "tbb_release") tbb_dir = os.path.join(stan_math_lib, "tbb") if not os.path.exists(tbb_dir): os.makedirs(tbb_dir) if os.path.exists(tbb_debug): shutil.rmtree(tbb_debug) shutil.move(os.path.join(tbb_root, 'include'), tbb_dir) shutil.rmtree(tbb_root) for name in os.listdir(tbb_release): srcname = os.path.join(tbb_release, name) dstname = os.path.join(tbb_dir, name) shutil.move(srcname, dstname) if os.path.exists(tbb_release): shutil.rmtree(tbb_release) ############################################################################### # Optional setuptools features # We need to import setuptools early, if we want setuptools features, # as it monkey-patches the 'setup' function # For some commands, use setuptools if len(set(('develop', 'release', 'bdist_egg', 'bdist_rpm', 'bdist_wininst', 'install_egg_info', 'build_sphinx', 'egg_info', 'easy_install', 'upload', 'bdist_wheel', '--single-version-externally-managed', )).intersection(sys.argv)) > 0: import setuptools extra_setuptools_args = dict( install_requires=['Cython>=0.22,!=0.25.1', 'numpy >= 1.7'], zip_safe=False, # the package can run out of an .egg file include_package_data=True, ) else: extra_setuptools_args = dict() ############################################################################### from distutils.errors import CCompilerError, DistutilsError from distutils.extension import Extension stan_include_dirs = ['pystan/stan/src', 'pystan/stan/lib/stan_math/', 'pystan/stan/lib/stan_math/lib/eigen_3.3.3', 'pystan/stan/lib/stan_math/lib/boost_1.72.0', 'pystan/stan/lib/stan_math/lib/sundials_4.1.0/include', 'pystan/stan/lib/stan_math/lib/tbb/include'] stan_macros = [ ('BOOST_DISABLE_ASSERTS', None), ('BOOST_NO_DECLTYPE', None), ('BOOST_PHOENIX_NO_VARIADIC_EXPRESSION', None), # needed for stanc ('BOOST_RESULT_OF_USE_TR1', None), ('FUSION_MAX_VECTOR_SIZE', 12), # for parser, stan-dev/pystan#222 ] extra_compile_args = [ '-Os', '-ftemplate-depth-256', '-Wno-unused-function', '-Wno-uninitialized', '-std=c++1y', ] if platform.system() == 'Windows': from Cython.Build.Inline import _get_build_extension if _get_build_extension().compiler in (None, 'msvc'): print("Warning: MSVC is not supported") extra_compile_args = [ '/EHsc', '-DBOOST_DATE_TIME_NO_LIB', '/std:c++14', ] else: # fix bug in MingW-W64 # use posix threads extra_compile_args.extend([ "-D_hypot=hypot", "-pthread", "-fexceptions", ]) stanc_sources = [ "pystan/stan/src/stan/lang/ast_def.cpp", "pystan/stan/src/stan/lang/grammars/bare_type_grammar_inst.cpp", "pystan/stan/src/stan/lang/grammars/block_var_decls_grammar_inst.cpp", "pystan/stan/src/stan/lang/grammars/expression07_grammar_inst.cpp", "pystan/stan/src/stan/lang/grammars/expression_grammar_inst.cpp", "pystan/stan/src/stan/lang/grammars/functions_grammar_inst.cpp", "pystan/stan/src/stan/lang/grammars/indexes_grammar_inst.cpp", "pystan/stan/src/stan/lang/grammars/local_var_decls_grammar_inst.cpp", "pystan/stan/src/stan/lang/grammars/program_grammar_inst.cpp", "pystan/stan/src/stan/lang/grammars/semantic_actions_def.cpp", "pystan/stan/src/stan/lang/grammars/statement_2_grammar_inst.cpp", "pystan/stan/src/stan/lang/grammars/statement_grammar_inst.cpp", "pystan/stan/src/stan/lang/grammars/term_grammar_inst.cpp", "pystan/stan/src/stan/lang/grammars/whitespace_grammar_inst.cpp", ] extensions = [ Extension("pystan._api", ["pystan/_api.pyx"] + stanc_sources, language='c++', define_macros=stan_macros, include_dirs=stan_include_dirs, extra_compile_args=extra_compile_args), Extension("pystan._chains", ["pystan/_chains.pyx"], language='c++', define_macros=stan_macros, include_dirs=stan_include_dirs, extra_compile_args=extra_compile_args), # _misc.pyx does not use Stan libs Extension("pystan._misc", ["pystan/_misc.pyx"], language='c++', extra_compile_args=extra_compile_args) ] ## package data package_data_pats = ['*.hpp', '*.pxd', '*.pyx', 'tests/data/*.csv', 'tests/data/*.stan', 'lookuptable/*.txt'] # Build tbb before setup if needed tbb_dir = os.path.join(os.path.dirname(__file__), 'pystan', 'stan', 'lib', 'stan_math', 'lib', 'tbb') tbb_dir = os.path.abspath(tbb_dir) if not os.path.exists(tbb_dir): build_tbb() # get every file under pystan/stan/src and pystan/stan/lib stan_files_all = sum( [[os.path.join(path.replace('pystan/', ''), fn) for fn in files] for path, dirs, files in os.walk('pystan/stan/src/')], []) lib_files_all = sum( [[os.path.join(path.replace('pystan/', ''), fn) for fn in files] for path, dirs, files in os.walk('pystan/stan/lib/')], []) package_data_pats += stan_files_all package_data_pats += lib_files_all def setup_package(): metadata = dict(name=NAME, version=find_version("pystan", "__init__.py"), maintainer=AUTHOR, maintainer_email=AUTHOR_EMAIL, packages=['pystan', 'pystan.tests', 'pystan.experimental', 'pystan.external', 'pystan.external.pymc', 'pystan.external.enum', 'pystan.external.scipy'], ext_modules=extensions, package_data={'pystan': package_data_pats}, platforms='any', description=DESCRIPTION, license=LICENSE, url=URL, long_description=LONG_DESCRIPTION, long_description_content_type='text/x-rst', classifiers=CLASSIFIERS, **extra_setuptools_args) if len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or sys.argv[1] in ('--help-commands', 'egg_info', '--version', 'clean')): # For these actions, neither Numpy nor Cython is required. # # They are required to succeed when pip is used to install PyStan # when, for example, Numpy is not yet present. try: from setuptools import setup except ImportError: from distutils.core import setup dist = setup(**metadata) else: import distutils.core distutils.core._setup_stop_after = 'commandline' from distutils.core import setup try: from Cython.Build import cythonize # FIXME: if header only works, no need for numpy.distutils at all from numpy.distutils.command import install except ImportError: raise SystemExit("Cython>=0.22 and NumPy are required.") metadata['ext_modules'] = cythonize(extensions) dist = setup(**metadata) metadata['cmdclass'] = {'install': install.install} try: dist.run_commands() except KeyboardInterrupt: raise SystemExit("Interrupted") except (IOError, os.error) as exc: from distutils.util import grok_environment_error error = grok_environment_error(exc) except (DistutilsError, CCompilerError) as msg: raise SystemExit("error: " + str(msg)) if __name__ == '__main__': setup_package()
11,312
Python
.py
262
35.591603
121
0.597019
stan-dev/pystan2
921
191
0
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,987
_compat.py
stan-dev_pystan2/pystan/_compat.py
# -*- coding: utf-8 -*- """ py2/py3 compatibility support drawn from jinja2 see http://lucumr.pocoo.org/2013/5/21/porting-to-python-3-redux/ """ import sys PY2 = sys.version_info[0] == 2 _identity = lambda x: x if not PY2: unichr = chr range_type = range text_type = str string_types = (str,) implements_to_string = _identity ifilter = filter imap = map izip = zip else: unichr = unichr text_type = unicode range_type = xrange string_types = (str, unicode) def implements_to_string(cls): cls.__unicode__ = cls.__str__ cls.__str__ = lambda x: x.__unicode__().encode('utf-8') return cls from itertools import imap, izip, ifilter
717
Python
.py
27
22.222222
64
0.638235
stan-dev/pystan2
921
191
0
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,988
_misc.pyx
stan-dev_pystan2/pystan/_misc.pyx
# cython: language_level=2 import cython import numpy as np @cython.boundscheck(False) @cython.wraparound(False) cpdef get_kept_samples(int n, dict sim): """See documentation in misc.py""" cdef int i, j, num_chains, num_samples, num_warmup, ss_index, s_index cdef double[:] s, ss cdef long[:] perm num_chains = sim['chains'] nth_key = list(sim['samples'][0]['chains'].keys())[n] # the following assumes each chain has same length, same warmup num_samples = sim['samples'][0]['chains'][nth_key].shape[0] num_warmup = sim['warmup2'][0] ss = np.empty((num_samples - num_warmup) * num_chains) for i in range(num_chains): perm = sim['permutation'][i] s = sim['samples'][i]['chains'][nth_key] for j in range(num_samples - num_warmup): ss_index = i * (num_samples - num_warmup) + j s_index = num_warmup + perm[j] ss[ss_index] = s[s_index] return ss @cython.boundscheck(False) @cython.wraparound(False) cpdef get_samples(int n, dict sim, inc_warmup): """See documentation in misc.py""" cdef int i cdef double[:] s cdef int num_chains = sim['chains'] if sim['warmup'] == 0: inc_warmup = True # the following assumes each chain has same length, same warmup cdef int num_warmup = sim['warmup2'][0] nth_key = list(sim['samples'][0]['chains'].keys())[n] ss = [] for i in range(num_chains): s = sim['samples'][i]['chains'][nth_key] if inc_warmup: ss.append(s) else: ss.append(s[num_warmup:]) return ss
1,606
Python
.py
44
30.545455
73
0.617268
stan-dev/pystan2
921
191
0
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,989
_api.pyx
stan-dev_pystan2/pystan/_api.pyx
# distutils: language = c++ # cython: language_level=2 #----------------------------------------------------------------------------- # Copyright (c) 2013, Allen B. Riddell # # This file is licensed under Version 3.0 of the GNU General Public # License. See LICENSE for a text of the license. #----------------------------------------------------------------------------- from libcpp cimport bool from pystan.stanc cimport PyStancResult, stanc as c_stanc from libcpp cimport bool from libcpp.string cimport string from libcpp.vector cimport vector def stanc(bytes model_stancode, bytes model_name, bool allow_undefined, bytes filename, vector[string] include_paths): cdef PyStancResult result c_stanc(model_stancode, model_name, allow_undefined, filename, include_paths, result) result_include_paths = [] for include_path in result.include_paths: result_include_paths.append(include_path.decode('utf-8')) return {'status': result.status, 'msg': result.msg.decode('utf-8'), 'model_cppname': result.model_cppname.decode('ascii'), 'cppcode': result.cppcode.decode('ascii'), 'include_paths' : result_include_paths, }
1,234
Python
.py
28
38.75
78
0.609129
stan-dev/pystan2
921
191
0
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,990
chains.py
stan-dev_pystan2/pystan/chains.py
import pystan._chains as _chains from numpy import nan def ess(sim, n): """Calculate effective sample size Parameters ---------- sim : chains n : int Parameter index starting from 0 """ try: ess = _chains.effective_sample_size(sim, n) except (ValueError, ZeroDivisionError): ess = nan return ess def splitrhat(sim, n): """Calculate rhat Parameters ---------- sim : chains n : int Parameter index starting from 0 """ try: rhat = _chains.split_potential_scale_reduction(sim, n) except (ValueError, ZeroDivisionError): rhat = nan return rhat def ess_and_splitrhat(sim, n): """Calculate ess and rhat This saves time by creating just one stan::mcmc::chains instance. """ # FIXME: does not yet save time return (ess(sim, n), splitrhat(sim, n))
885
Python
.py
34
20.705882
69
0.629147
stan-dev/pystan2
921
191
0
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,991
stanfit4model.pyx
stan-dev_pystan2/pystan/stanfit4model.pyx
# distutils: language = c++ # cython: language_level=2 #----------------------------------------------------------------------------- # Copyright (c) 2013, Allen B. Riddell # # This file is licensed under Version 3.0 of the GNU General Public # License. See LICENSE for a text of the license. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # THIS IS A TEMPLATE, not a proper Cython .pyx file # # A template variable $model_cppname needs to be substituted before # compilation. # # $model_cppname.hpp should be in the same directory as this # file (after substitutions have been made). # #----------------------------------------------------------------------------- # cython imports from libcpp cimport bool from libcpp.map cimport map from libcpp.pair cimport pair from libcpp.string cimport string from libcpp.vector cimport vector from cython.operator cimport dereference as deref, preincrement as inc cimport numpy as np from pystan.io cimport py_var_context, var_context from pystan.stan_fit cimport stan_fit, StanArgs, StanHolder, get_all_flatnames # Initialize numpy for use from C. When using numpy from C or Cython this must always be done. np.import_array() # python imports from collections import OrderedDict import logging import warnings import numpy as np import pystan.misc import pystan.plots from pystan._compat import PY2, string_types from pystan.constants import (sampling_algo_t, optim_algo_t, variational_algo_t, sampling_metric_t, stan_args_method_t) logger = logging.getLogger('pystan') cdef extern from "boost/random/additive_combine.hpp" namespace "boost::random": cdef cppclass additive_combine_engine[T, U]: pass ctypedef additive_combine_engine ecuyer1988 cdef extern from "$model_cppname.hpp" namespace "${model_cppname}_namespace": cdef cppclass $model_cppname: $model_cppname(var_context& context) except + # NOTE: Methods that aren't intended for public use are prefixed by '_'. For # example, _update_param_oi probably shouldn't be called unless you know # something about the state of the C++ class instance wrapped by the class. ctypedef map[string, pair[vector[double], vector[size_t]]] vars_r_t ctypedef map[string, pair[vector[int], vector[size_t]]] vars_i_t cdef class PyStanHolder: """Allow access to a StanHolder instance from Python A PyStanHolder instance wraps a StanHolder instance. When the PyStanHolder instance is deleted, the StanHolder instance will be as well. There are slight differences between the StanHolder and PyStanHolder. For example, chains is an OrderedDict in the PyStanHolder where a StanHolder tracks the same information in the fields ``chains`` and ``chain_names``. The same holds for ``sampler_params``. """ cdef public int num_failed cdef public bool test_grad cdef public list inits cdef public list par cdef public double value cdef public chains cdef public dict args cdef public mean_pars cdef public list mean_par_names cdef public double mean_lp__ cdef public adaptation_info cdef public sampler_params cdef public list sampler_param_names cdef StanHolder * holderptr # for backward compatibility allow holder[attr] def __getitem__(self, key): return getattr(self, key) def __dealloc__(self): del self.holderptr # the following three methods give Cython classes instructions for pickling def __getstate__(self): attr_names = ('num_failed test_grad inits par value chains args mean_pars mean_lp__ ' 'adaptation_info sampler_params sampler_param_names').split() state = dict((k, getattr(self, k)) for k in attr_names) return state def __setstate__(self, state): for k in state: setattr(self, k, state[k]) def __reduce__(self): return (PyStanHolder, tuple(), self.__getstate__(), None, None) cdef PyStanHolder _pystanholder_from_stanholder(StanHolder* holder): cdef int num_iter cdef double* data_ptr cdef np.npy_intp dims[1] h = PyStanHolder() h.holderptr = holder h.num_failed = holder.num_failed h.test_grad = holder.test_grad h.inits = holder.inits h.par = holder.par h.value = holder.value chains = [] cdef vector[vector[double] ].iterator it = holder.chains.begin() while it != holder.chains.end(): num_iter = deref(it).size() dims[0] = <np.npy_intp> num_iter data_ptr = &(deref(it).front()) ch = np.PyArray_SimpleNewFromData(1, dims, np.NPY_DOUBLE, data_ptr) chains.append(ch) inc(it) chain_names = [n.decode('utf-8') for n in holder.chain_names] h.chains = OrderedDict(zip(chain_names, chains)) # NOTE: when _pystanholder_from_stanholder is called we also have a pointer # to holder.args available so we will use it directly from there. Strictly # speaking it should be done here, but Cython kept throwing errors # FIXME: figure out origins of difficulties # r['args'] = _dict_from_stanargs(holder.args) h.mean_pars = holder.mean_pars h.mean_par_names = [n.decode('utf-8') for n in holder.mean_par_names] h.mean_lp__ = holder.mean_lp__ h.adaptation_info = holder.adaptation_info.decode('utf-8') h.sampler_params = holder.sampler_params h.sampler_param_names = [n.decode('utf-8') for n in holder.sampler_param_names] return h cdef dict _dict_from_stanargs(StanArgs* args): d = dict() ctrl_d = dict() d['random_seed'] = str(args.random_seed) d['chain_id'] = args.chain_id d['init'] = args.init # FIXME: reconstructing d['init_list'] from args.init_vars_r and # args.init_vars_i requires additional work. The initial values for each # chain are accessible with the method get_inits() d['init_radius'] = args.init_radius d['enable_random_init'] = args.enable_random_init d['append_samples'] = args.append_samples if args.sample_file_flag: d['sample_file'] = args.sample_file if args.diagnostic_file_flag: d['diagnostic_file'] = args.diagnostic_file method = stan_args_method_t(args.method) if method == stan_args_method_t.SAMPLING: d["method"] = method.name d["iter"] = args.ctrl.sampling.iter d["warmup"] = args.ctrl.sampling.warmup d["thin"] = args.ctrl.sampling.thin d["refresh"] = args.ctrl.sampling.refresh d["test_grad"] = False ctrl_d["adapt_engaged"] = args.ctrl.sampling.adapt_engaged ctrl_d["adapt_gamma"] = args.ctrl.sampling.adapt_gamma ctrl_d["adapt_delta"] = args.ctrl.sampling.adapt_delta ctrl_d["adapt_kappa"] = args.ctrl.sampling.adapt_kappa ctrl_d["adapt_init_buffer"] = args.ctrl.sampling.adapt_init_buffer ctrl_d["adapt_term_buffer"] = args.ctrl.sampling.adapt_term_buffer ctrl_d["adapt_window"] = args.ctrl.sampling.adapt_window ctrl_d["adapt_t0"] = args.ctrl.sampling.adapt_t0 ctrl_d["stepsize"] = args.ctrl.sampling.stepsize ctrl_d["stepsize_jitter"] = args.ctrl.sampling.stepsize_jitter d["sampler_t"] = algorithm = sampling_algo_t(args.ctrl.sampling.algorithm).name if algorithm == sampling_algo_t.NUTS: ctrl_d["max_treedepth"] = args.ctrl.sampling.max_treedepth elif algorithm == sampling_algo_t.HMC: ctrl_d["int_time"] = args.ctrl.sampling.int_time elif algorithm == sampling_algo_t.Metropolis: pass else: # included here to mirror rstan code pass if algorithm != sampling_algo_t.Metropolis: metric = sampling_metric_t(args.ctrl.sampling.metric).name if metric == sampling_metric_t.UNIT_E: ctrl_d["metric"] = "unit_e" d["sampler_t"] = d["sampler_t"] + "(unit_e)" elif metric == sampling_metric_t.DIAG_E: ctrl_d["metric"] = "diag_e" d["sampler_t"] = d["sampler_t"] + "(diag_e)" elif metric == sampling_metric_t.DENSE_E: ctrl_d["metric"] = "dense_e" d["sampler_t"] = d["sampler_t"] + "(dense_e)" d["control"] = ctrl_d elif method == stan_args_method_t.VARIATIONAL: d["method"] = method.name d["iter"] = args.ctrl.variational.iter d["grad_samples"] = args.ctrl.variational.grad_samples d["elbo_samples"] = args.ctrl.variational.elbo_samples d["eval_elbo"] = args.ctrl.variational.eval_elbo d["output_samples"] = args.ctrl.variational.output_samples d["eta"] = args.ctrl.variational.eta d["adapt_engaged"] = args.ctrl.variational.adapt_engaged d["adapt_iter"] = args.ctrl.variational.adapt_iter d["tol_rel_obj"] = args.ctrl.variational.tol_rel_obj algorithm = variational_algo_t(args.ctrl.variational.algorithm) d['algorithm'] = algorithm.name elif method == stan_args_method_t.OPTIM: d["method"] = method.name d["iter"] = args.ctrl.optim.iter d["refresh"] = args.ctrl.optim.refresh d["save_iterations"] = args.ctrl.optim.save_iterations algorithm = optim_algo_t(args.ctrl.optim.algorithm) d["algorithm"] = algorithm.name if algorithm == optim_algo_t.Newton: pass elif algorithm == optim_algo_t.LBFGS: d["init_alpha"] = args.ctrl.optim.init_alpha d["tol_param"] = args.ctrl.optim.tol_param d["tol_obj"] = args.ctrl.optim.tol_obj d["tol_grad"] = args.ctrl.optim.tol_grad d["tol_rel_obj"] = args.ctrl.optim.tol_obj d["tol_rel_grad"] = args.ctrl.optim.tol_grad d["tol_history_size"] = args.ctrl.optim.tol_grad elif algorithm == optim_algo_t.BFGS: d["init_alpha"] = args.ctrl.optim.init_alpha d["tol_param"] = args.ctrl.optim.tol_param d["tol_obj"] = args.ctrl.optim.tol_obj d["tol_grad"] = args.ctrl.optim.tol_grad d["tol_rel_obj"] = args.ctrl.optim.tol_obj d["tol_rel_grad"] = args.ctrl.optim.tol_grad elif method == stan_args_method_t.TEST_GRADIENT: d["method"] = "test_grad" d["test_grad"] = True ctrl_d["epsilon"] = args.ctrl.test_grad.epsilon ctrl_d["error"] = args.ctrl.test_grad.error d["control"] = ctrl_d return d cdef void _set_stanargs_from_dict(StanArgs* p, dict args): """Insert values in dictionary `args` into `p`""" # _call_sampler requires a specially crafted dictionary of arguments # intended for the c++ function sampler_command(...) in stan_fit.hpp # If the dictionary doesn't contain the correct keys (arguments), # the function will raise a KeyError exception (as it should!). cdef vars_r_t init_vars_r cdef vars_i_t init_vars_i p.random_seed = <unsigned int> args.get('random_seed', 0) p.chain_id = <unsigned int> args['chain_id'] p.init = args['init'] if args['init'] == b'user': init_r, init_i = pystan.misc._split_data(args['init_list']) init_vars_r = _dict_to_vars_r(init_r) init_vars_i = _dict_to_vars_i(init_i) p.init_vars_r = init_vars_r p.init_vars_i = init_vars_i p.init_radius = args['init_radius'] p.sample_file = args['sample_file'] p.append_samples = args['append_samples'] p.sample_file_flag = args['sample_file_flag'] p.method = args['method'].value p.diagnostic_file = args['diagnostic_file'] p.diagnostic_file_flag = args['diagnostic_file_flag'] p.metric_file = args['metric_file'] p.metric_file_flag = args['metric_file_flag'] if args['method'] == stan_args_method_t.SAMPLING: p.ctrl.sampling.iter = args['ctrl']['sampling']['iter'] p.ctrl.sampling.refresh = args['ctrl']['sampling']['refresh'] p.ctrl.sampling.algorithm = args['ctrl']['sampling']['algorithm'].value p.ctrl.sampling.warmup = args['ctrl']['sampling']['warmup'] p.ctrl.sampling.thin = args['ctrl']['sampling']['thin'] p.ctrl.sampling.save_warmup = args['ctrl']['sampling']['save_warmup'] p.ctrl.sampling.iter_save = args['ctrl']['sampling']['iter_save'] p.ctrl.sampling.iter_save_wo_warmup = args['ctrl']['sampling']['iter_save_wo_warmup'] p.ctrl.sampling.adapt_engaged = args['ctrl']['sampling']['adapt_engaged'] p.ctrl.sampling.adapt_gamma = args['ctrl']['sampling']['adapt_gamma'] p.ctrl.sampling.adapt_delta = args['ctrl']['sampling']['adapt_delta'] p.ctrl.sampling.adapt_kappa = args['ctrl']['sampling']['adapt_kappa'] p.ctrl.sampling.adapt_init_buffer = args['ctrl']['sampling']['adapt_init_buffer'] p.ctrl.sampling.adapt_term_buffer = args['ctrl']['sampling']['adapt_term_buffer'] p.ctrl.sampling.adapt_window = args['ctrl']['sampling']['adapt_window'] p.ctrl.sampling.adapt_t0 = args['ctrl']['sampling']['adapt_t0'] p.ctrl.sampling.metric = args['ctrl']['sampling']['metric'].value p.ctrl.sampling.stepsize = args['ctrl']['sampling']['stepsize'] p.ctrl.sampling.stepsize_jitter = args['ctrl']['sampling']['stepsize_jitter'] if args['ctrl']['sampling']['algorithm'] == sampling_algo_t.NUTS: p.ctrl.sampling.max_treedepth = args['ctrl']['sampling']['max_treedepth'] elif args['ctrl']['sampling']['algorithm'] == sampling_algo_t.HMC: p.ctrl.sampling.int_time = args['ctrl']['sampling']['int_time'] elif args['method'] == stan_args_method_t.OPTIM: p.ctrl.optim.iter = args['ctrl']['optim']['iter'] p.ctrl.optim.refresh = args['ctrl']['optim']['refresh'] p.ctrl.optim.algorithm = args['ctrl']['optim']['algorithm'].value p.ctrl.optim.save_iterations = args['ctrl']['optim']['save_iterations'] p.ctrl.optim.init_alpha = args['ctrl']['optim']['init_alpha'] p.ctrl.optim.tol_obj = args['ctrl']['optim']['tol_obj'] p.ctrl.optim.tol_grad = args['ctrl']['optim']['tol_grad'] p.ctrl.optim.tol_param = args['ctrl']['optim']['tol_param'] p.ctrl.optim.tol_rel_obj = args['ctrl']['optim']['tol_rel_obj'] p.ctrl.optim.tol_rel_grad = args['ctrl']['optim']['tol_rel_grad'] p.ctrl.optim.history_size = args['ctrl']['optim']['history_size'] elif args['method'] == stan_args_method_t.TEST_GRADIENT: p.ctrl.test_grad.epsilon = args['ctrl']['test_grad']['epsilon'] p.ctrl.test_grad.error = args['ctrl']['test_grad']['error'] elif args['method'] == stan_args_method_t.VARIATIONAL: p.ctrl.variational.algorithm = args['ctrl']['variational']['algorithm'].value p.ctrl.variational.iter = args['ctrl']['variational']['iter'] p.ctrl.variational.grad_samples = args['ctrl']['variational']['grad_samples'] p.ctrl.variational.elbo_samples = args['ctrl']['variational']['elbo_samples'] p.ctrl.variational.eval_elbo = args['ctrl']['variational']['eval_elbo'] p.ctrl.variational.output_samples = args['ctrl']['variational']['output_samples'] p.ctrl.variational.eta = args['ctrl']['variational']['eta'] p.ctrl.variational.adapt_engaged = args['ctrl']['variational']['adapt_engaged'] p.ctrl.variational.tol_rel_obj = args['ctrl']['variational']['tol_rel_obj'] p.ctrl.variational.adapt_iter = args['ctrl']['variational']['adapt_iter'] cdef vars_r_t _dict_to_vars_r(data_r): """Converts a dict or OrderedDict to a C++ map of string, double pairs""" cdef vars_r_t vars_r # The dimension for a single value is an empty vector. A list of # values is indicated by an entry with the number of values. # The dimensions of an array are indicated as one would expect. # # note, array.flat yields values in C-contiguous style, with the # last index varying the fastest. So the transpose is taken # so that the ordering matches that used by stan. for key in data_r: assert isinstance(key, bytes), "Variable name must be bytes." val = (data_r[key].T.flat, data_r[key].shape) vars_r[key] = val return vars_r cdef vars_i_t _dict_to_vars_i(data_i): """Converts a dict or OrdereDict to a C++ map of string, int pairs""" cdef vars_i_t vars_i # The dimension for a single value is an empty vector. A list of # values is indicated by an entry with the number of values. # The dimensions of an array are indicated as one would expect. # # note, array.flat yields values in C-contiguous style, with the # last index varying the fastest. So the transpose is taken # so that the ordering matches that used by stan. for key in data_i: assert isinstance(key, bytes), "Variable name must be bytes." val = (data_i[key].T.flat, data_i[key].shape) vars_i[key] = val return vars_i def _call_sampler_star(data_args): return _call_sampler(*data_args) def _call_sampler(data, args, pars_oi=None): """Wrapper for call_sampler in stan_fit This function is self-contained and suitable for parallel invocation. """ data_r, data_i = pystan.misc._split_data(data) cdef StanHolder *holderptr = new StanHolder() cdef StanArgs *argsptr = new StanArgs() if not holderptr: raise MemoryError("Couldn't allocate space for StanHolder.") if not argsptr: raise MemoryError("Couldn't allocate space for StanArgs.") chain_id = args['chain_id'] for handler in logger.handlers: handler.flush() _set_stanargs_from_dict(argsptr, args) cdef stan_fit[$model_cppname, ecuyer1988] *fitptr cdef vars_r_t vars_r = _dict_to_vars_r(data_r) cdef vars_i_t vars_i = _dict_to_vars_i(data_i) fitptr = new stan_fit[$model_cppname, ecuyer1988](vars_r, vars_i, argsptr.random_seed) if not fitptr: raise MemoryError("Couldn't allocate space for stan_fit.") # Implementation note: there is an extra stan_fit instance associated # with the model (which enables access to some methods). This is a # horrible, confusing idea which will hopefully be fixed in Stan 3. if pars_oi is not None: pars_oi_bytes = [n.encode('ascii') for n in pars_oi] if len(pars_oi_bytes) != fitptr.param_names_oi().size(): fitptr.update_param_oi(pars_oi_bytes) ret = fitptr.call_sampler(deref(argsptr), deref(holderptr)) holder = _pystanholder_from_stanholder(holderptr) # FIXME: rather than fetching the args from the holderptr, we just use # the argsptr we passed directly. This is a hack to solve a problem # that holder.args gets dropped somewhere in C++. holder.args = _dict_from_stanargs(argsptr) del argsptr del fitptr return ret, holder def _split_pars_locs(fnames, pars): """Split flatnames to par and location""" par_keys = OrderedDict((par, []) for par in pars) for key in fnames: par_tail = key.split("[") par = par_tail[0] loc = [Ellipsis] for tail in par_tail[1:]: loc = [] for i in tail[:-1].split(","): loc.append(int(i)-1) par_keys[par].append((key, loc)) return par_keys cdef class StanFit4Model: """Holder for results obtained from running a Stan model with data Attributes ---------- sim : dict Holder for runs. Stores samples in sim['samples'] data : dict Data used to fit model. Note ---- The only unexpected difference between PyStan and RStan is this: where RStan stores samples for a parameter directly in, say, fit@sim$samples[[1]]$theta, in PyStan they are in fit.sim['samples'][0]['chains']['theta']. The difference is due to Python lacking a dictionary structure that can also have attributes. """ cdef stan_fit[$model_cppname, ecuyer1988] *thisptr # attributes populated by methods of StanModel cdef public data # dict or OrderedDict cdef public random_seed cdef public dict sim cdef public model_name cdef public model_pars cdef public par_dims cdef public mode cdef public inits cdef public stan_args cdef public stanmodel cdef public date cdef public _repr_pars cdef public _repr_num def __cinit__(self, *args): # __cinit__ must be callable with no arguments for unpickling cdef vars_r_t vars_r cdef vars_i_t vars_i if len(args) == 2: data, random_seed = args data_r, data_i = pystan.misc._split_data(data) # NB: dictionary keys must be byte strings vars_r = _dict_to_vars_r(data_r) vars_i = _dict_to_vars_i(data_i) # TODO: the random seed needs to be known by StanFit4Model self.thisptr = new stan_fit[$model_cppname, ecuyer1988](vars_r, vars_i, <unsigned int> random_seed) if not self.thisptr: raise MemoryError("Couldn't allocate space for stan_fit.") def __init__(self, data, random_seed): self.data = data self.random_seed = random_seed self._set_repr_pars(None) self._set_repr_num (100) def __dealloc__(self): del self.thisptr # the following three methods give Cython classes instructions for pickling def __getstate__(self): attr_names = ('data sim model_name model_pars par_dims mode inits stan_args ' 'stanmodel date').split() state = dict((k, getattr(self, k)) for k in attr_names) return state def __setstate__(self, state): for k in state: setattr(self, k, state[k]) def __reduce__(self): msg = ("Pickling fit objects is an experimental feature!\n" "The relevant StanModel instance must be pickled along with this fit object.\n" "When unpickling the StanModel must be unpickled first.") warnings.warn(msg) return (StanFit4Model, (self.data, self.random_seed), self.__getstate__(), None, None) # public methods def plot(self, pars=None, dtypes=None): """Visualize samples from posterior distributions Parameters --------- pars : {str, sequence of str} parameter name(s); by default use all parameters of interest dtypes : dict datatype of parameter(s). If nothing is passed, np.float will be used for all parameters. If np.int is specified, the histogram will be visualized, not but kde. Note ---- This is currently an alias for the `traceplot` method. """ if pars is None: pars = [par for par in self.sim['pars_oi'] if par != 'lp__'] elif isinstance(pars, string_types): pars = [pars] pars = pystan.misc._remove_empty_pars(pars, self.sim['pars_oi'], self.sim['dims_oi']) return pystan.plots.traceplot(self, pars, dtypes) def traceplot(self, pars=None, dtypes=None): """Visualize samples from posterior distributions Parameters --------- pars : {str, sequence of str}, optional parameter name(s); by default use all parameters of interest dtypes : dict datatype of parameter(s). If nothing is passed, np.float will be used for all parameters. If np.int is specified, the histogram will be visualized, not but kde. """ # FIXME: for now plot and traceplot do the same thing return self.plot(pars, dtypes=dtypes) def extract(self, pars=None, permuted=True, inc_warmup=False, dtypes=None): """Extract samples in different forms for different parameters. Parameters ---------- pars : {str, sequence of str} parameter (or quantile) name(s). permuted : bool If True, returned samples are permuted. All chains are merged and warmup samples are discarded. inc_warmup : bool If True, warmup samples are kept; otherwise they are discarded. If `permuted` is True, `inc_warmup` is ignored. dtypes : dict datatype of parameter(s). If nothing is passed, np.float will be used for all parameters. Returns ------- samples : dict or array If `permuted` is True, return dictionary with samples for each parameter (or other quantity) named in `pars`. If `permuted` is False and `pars` is None, an array is returned. The first dimension of the array is for the iterations; the second for the number of chains; the third for the parameters. Vectors and arrays are expanded to one parameter (a scalar) per cell, with names indicating the third dimension. Parameters are listed in the same order as `model_pars` and `flatnames`. If `permuted` is False and `pars` is not None, return dictionary with samples for each parameter (or other quantity) named in `pars`. The first dimension of the sample array is for the iterations; the second for the number of chains; the rest for the parameters. Parameters are listed in the same order as `pars`. """ self._verify_has_samples() if inc_warmup is True and permuted is True: logger.warning("`inc_warmup` ignored when `permuted` is True.") inc_warmup = False if dtypes is not None and permuted is False and pars is None: logger.warning("`dtypes` ignored when `permuted` is False and `pars` is None") pars_original = pars if pars is None: pars = self.sim['pars_oi'] elif isinstance(pars, string_types): pars = [pars] pars = pystan.misc._remove_empty_pars(pars, self.sim['pars_oi'], self.sim['dims_oi']) if dtypes is None: dtypes = {} allpars = self.sim['pars_oi'] + self.sim['fnames_oi'] pystan.misc._check_pars(allpars, pars) n_kept = [s if inc_warmup else s-w for s, w in zip(self.sim['n_save'], self.sim['warmup2'])] chains = len(self.sim['samples']) # return array (n, chains, flat_pars) if (not permuted) and (pars_original is None): n = n_kept[0] arr_shape = [n, chains, len(self.sim['fnames_oi'])] arr = np.empty(arr_shape, order='F') for chain, (pyholder, n) in enumerate(zip(self.sim['samples'], n_kept)): for i, item in enumerate(pyholder.chains.values()): arr[:, chain, i] = item[-n:] return arr par_keys = _split_pars_locs(self.sim['fnames_oi'], self.sim['pars_oi']) shapes = dict(zip(self.sim['pars_oi'], self.sim['dims_oi'])) extracted = OrderedDict() for par in pars: if par in extracted: continue keys_locs = par_keys.get(par, [(par, [Ellipsis])]) shape = shapes.get(par, []) dtype = dtypes.get(par) if permuted: arr_shape = [sum(n_kept)] + shape arr = np.empty(arr_shape, dtype=dtype, order='F') for chain, (pyholder, permutation, n) in enumerate(zip(self.sim['samples'], self.sim['permutation'], n_kept)): n_processed = sum(n_kept[:chain]) axes = [slice(n_processed, n_processed+n)] for key, loc in keys_locs: arr_slice = tuple(axes + loc) arr[arr_slice] = pyholder.chains[key][-n:][permutation] extracted[par] = arr else: n = n_kept[0] arr_shape = [n, chains] + shape arr = np.empty(arr_shape, dtype=dtype, order='F') for chain, (pyholder, n) in enumerate(zip(self.sim['samples'], n_kept)): axes = [slice(None), chain] for key, loc in keys_locs: arr_slice = tuple(axes + loc) arr[arr_slice] = pyholder.chains[key][-n:] extracted[par] = arr return extracted def __unicode__(self): # for Python 2.x return pystan.misc.stansummary(self) def __str__(self): s = pystan.misc.stansummary(self) return s.encode('utf-8') if PY2 else s def _get_repr_num(self): return self._repr_num def _get_repr_pars(self): update = False if self._repr_pars is not None: update = len(self._repr_pars)-1 != self._repr_num if self._repr_pars is not None and not update: return self._repr_pars elif len(self.sim["fnames_oi"]) > self._repr_num: logger.warning("Truncated summary with the 'fit.__repr__' method. For the full summary use 'print(fit)'") self._set_repr_pars(self.sim["fnames_oi"][:self._repr_num-1] + ['lp__']) return self._repr_pars def _set_repr_num(self, n): self._repr_num = n def _set_repr_pars(self, pars): self._repr_pars = pars def __repr__(self): pars = self._get_repr_pars() if pars is not None and len(self.sim["fnames_oi"]) > len(pars): s = "\nWarning: Shown data is truncated to {} parameters".format(len(pars)) s += "\nFor the full summary use 'print(fit)'\n\n" s += pystan.misc.stansummary(self, pars=pars) else: s = pystan.misc.stansummary(self) return s.encode('utf-8') if PY2 else s def __getitem__(self, key): extr = self.extract(pars=(key,)) return extr[key] def stansummary(self, pars=None, probs=(0.025, 0.25, 0.5, 0.75, 0.975), digits_summary=2): """ Summary statistic table. Parameters ---------- pars : str or sequence of str, optional Parameter names. By default use all parameters probs : sequence of float, optional Quantiles. By default, (0.025, 0.25, 0.5, 0.75, 0.975) digits_summary : int, optional Number of significant digits. By default, 2 Returns ------- summary : string Table includes mean, se_mean, sd, probs_0, ..., probs_n, n_eff and Rhat. Examples -------- >>> model_code = 'parameters {real y;} model {y ~ normal(0,1);}' >>> m = StanModel(model_code=model_code, model_name="example_model") >>> fit = m.sampling() >>> print(fit.stansummary()) Inference for Stan model: example_model. 4 chains, each with iter=2000; warmup=1000; thin=1; post-warmup draws per chain=1000, total post-warmup draws=4000. mean se_mean sd 2.5% 25% 50% 75% 97.5% n_eff Rhat y 0.01 0.03 1.0 -2.01 -0.68 0.02 0.72 1.97 1330 1.0 lp__ -0.5 0.02 0.68 -2.44 -0.66 -0.24 -0.05-5.5e-4 1555 1.0 Samples were drawn using NUTS at Thu Aug 17 00:52:25 2017. For each parameter, n_eff is a crude measure of effective sample size, and Rhat is the potential scale reduction factor on split chains (at convergence, Rhat=1). """ return pystan.misc.stansummary(fit=self, pars=pars, probs=probs, digits_summary=digits_summary) def summary(self, pars=None, probs=None): """Summarize samples (compute mean, SD, quantiles) in all chains. REF: stanfit-class.R summary method Parameters ---------- pars : str or sequence of str, optional Parameter names. By default use all parameters probs : sequence of float, optional Quantiles. By default, (0.025, 0.25, 0.5, 0.75, 0.975) Returns ------- summaries : OrderedDict of array Array indexed by 'summary' has dimensions (num_params, num_statistics). Parameters are unraveled in *row-major order*. Statistics include: mean, se_mean, sd, probs_0, ..., probs_n, n_eff, and Rhat. Array indexed by 'c_summary' breaks down the statistics by chain and has dimensions (num_params, num_statistics_c_summary, num_chains). Statistics for `c_summary` are the same as for `summary` with the exception that se_mean, n_eff, and Rhat are absent. Row names and column names are also included in the OrderedDict. """ return pystan.misc._summary(self, pars, probs) def log_prob(self, upar, adjust_transform=True, gradient=False): """ Expose the log_prob of the model to stan_fit so user can call this function. Parameters ---------- upar : array The real parameters on the unconstrained space. adjust_transform : bool Whether we add the term due to the transform from constrained space to unconstrained space implicitly done in Stan. Note ---- In Stan, the parameters need be defined with their supports. For example, for a variance parameter, we must define it on the positive real line. But inside Stan's sampler, all parameters defined on the constrained space are transformed to unconstrained space, so the log density function need be adjusted (i.e., adding the log of the absolute value of the Jacobian determinant). With the transformation, Stan's samplers work on the unconstrained space and once a new iteration is drawn, Stan transforms the parameters back to their supports. All the transformation are done inside Stan without interference from the users. However, when using the log density function for a model exposed to Python, we need to be careful. For example, if we are interested in finding the mode of parameters on the constrained space, we then do not need the adjustment. For this reason, there is an argument named `adjust_transform` for functions `log_prob` and `grad_log_prob`. """ # gradient is ignored for now. Call grad_log_prob to get the gradient. cdef vector[double] par_r = np.asarray(upar).flat return self.thisptr.log_prob(par_r, adjust_transform, gradient) def grad_log_prob(self, upar, adjust_transform=True): """ Expose the grad_log_prob of the model to stan_fit so user can call this function. Parameters ---------- upar : array The real parameters on the unconstrained space. adjust_transform : bool Whether we add the term due to the transform from constrained space to unconstrained space implicitly done in Stan. """ cdef vector[double] par_r, grad par_r = np.asarray(upar).flat grad = self.thisptr.grad_log_prob(par_r, adjust_transform) return np.asarray(grad) def get_adaptation_info(self): """Obtain adaptation information for sampler, which now only NUTS2 has. The results are returned as a list, each element of which is a character string for a chain.""" self._verify_has_samples() lai = [ch['adaptation_info'] for ch in self.sim['samples']] return lai def get_logposterior(self, inc_warmup=True): """Get the log-posterior (up to an additive constant) for all chains. Each element of the returned array is the log-posterior for a chain. Optional parameter `inc_warmup` indicates whether to include the warmup period. """ self._verify_has_samples() llp = [ch['chains']['lp__'] for ch in self.sim['samples']] return llp if inc_warmup else [x[warmup2:] for x, warmup2 in zip(llp, self.sim['warmup2'])] def get_sampler_params(self, inc_warmup=True): """Obtain the parameters used for the sampler such as `stepsize` and `treedepth`. The results are returned as a list, each element of which is an OrderedDict a chain. The dictionary has number of elements corresponding to the number of parameters used in the sampler. Optional parameter `inc_warmup` indicates whether to include the warmup period. """ self._verify_has_samples() ldf = [OrderedDict(zip(ch['sampler_param_names'], np.array(ch['sampler_params']))) for ch in self.sim['samples']] if inc_warmup: return ldf else: for d, warmup2 in zip(ldf, self.sim['warmup2']): for key in d: d[key] = d[key][warmup2:] return ldf def get_posterior_mean(self): """Get the posterior mean for all parameters Returns ------- means : array of shape (num_parameters, num_chains) Order of parameters is given by self.model_pars or self.flatnames if parameters of interest include non-scalar parameters. An additional column for mean lp__ is also included. """ self._verify_has_samples() fnames = self.flatnames mean_pars = np.array([ch['mean_pars'] for ch in self.sim['samples']]) mean_lp__ = np.array([ch['mean_lp__'] for ch in self.sim['samples']]) mean_pars = np.column_stack(mean_pars) assert len(fnames) == len(mean_pars) m = np.row_stack([mean_pars, mean_lp__]) return m def constrain_pars(self, np.ndarray[double, ndim=1, mode="c"] upar not None): """Transform parameters from unconstrained space to defined support""" cdef vector[double] constrained constrained = self.thisptr.constrain_pars(upar) return np.asarray(constrained) def unconstrain_pars(self, par): """Transform parameters from defined support to unconstrained space""" cdef vector[double] unconstrained data_r, data_i = pystan.misc._split_data(par) cdef vars_r_t vars_r = _dict_to_vars_r(data_r) cdef vars_i_t vars_i = _dict_to_vars_i(data_i) unconstrained = self.thisptr.unconstrain_pars(vars_r, vars_i) return np.asarray(unconstrained) def get_seed(self): return self.stan_args[0]['seed'] def get_inits(self): return self.inits def get_stancode(self): return self.stanmodel.model_code def get_stanmodel(self): return self.stanmodel def get_stepsize(self): """Parse stepsize from fit object Parameters ---------- fit : StanFit4Model Returns ------- list Returns an empty list if step sizes are not found in ``fit.get_adaptation_info``. """ return pystan.misc.get_stepsize(fit=self) def get_inv_metric(self, as_dict=False): """Parse inverse metrics from the fit object Parameters ---------- as_dict : bool, optional Returns ------- list Returns an empty list if inverse metric is not found in ``fit.get_adaptation_info()``. """ return pystan.misc.get_inv_metric(fit=self, as_dict=as_dict) def get_last_position(self, warmup=False): """Parse last position from fit object Parameters ---------- warmup : bool If True, returns the last warmup position, when warmup has been done. Otherwise function returns the first sample position. Returns ------- list list contains a dictionary of last draw from each chain. """ return pystan.misc.get_last_position(fit=self, warmup=warmup) def to_dataframe(self, pars=None, permuted=False, dtypes=None, inc_warmup=False, diagnostics=True, header=True): """Extract samples as a pandas dataframe for different parameters. Parameters ---------- pars : {str, sequence of str} parameter (or quantile) name(s). permuted : bool, default False If True, returned samples are permuted. If inc_warmup is True, warmup samples have negative order. dtypes : dict datatype of parameter(s). If nothing is passed, float will be used for all parameters. inc_warmup : bool If True, warmup samples are kept; otherwise they are discarded. diagnostics : bool If True, include hmc diagnostics in dataframe. header : bool If True, include header columns. Returns ------- df : pandas dataframe Returned dataframe contains: [header_df]|[draws_df]|[diagnostics_df], where all groups are optional. To exclude draws_df use `pars=[]`. Note ---- Unlike default in extract (`permuted=True`) `.to_dataframe` method returns non-permuted samples (`permuted=False`) with diagnostics params included. """ return pystan.misc.to_dataframe(fit=self, pars=pars, permuted=permuted, dtypes=dtypes, inc_warmup=inc_warmup, diagnostics=diagnostics, header=header) # FIXME: when this is a normal Python class one can use @property instead # of this special Cython syntax. property flatnames: def __get__(self): # NOTE: RStan rewrites the C++ function get_all_flatnames in R (in misc.R). # PyStan exposes and calls the C++ function directly. cdef vector[string] fnames names = [n.encode('ascii') for n in self.model_pars] get_all_flatnames(names, self.par_dims, fnames, col_major=True) return [n.decode('ascii') for n in fnames] # "private" Python methods def _verify_has_samples(self): if self.mode == 1: msg = "Stan model {} is of mode 'test_grad';\n" \ "sampling is not conducted." raise AttributeError(msg.format(self.model_name)) elif self.mode == 2 or self.sim.get('samples') is None: msg = "Stan model {} does not contain samples." raise AttributeError(msg.format(self.model_name)) def _update_param_oi(self, pars): pars_bytes = [n.encode('ascii') for n in pars] cdef vector[string] pars_ = pars_bytes cdef int ret = self.thisptr.update_param_oi(pars_) return ret def _get_param_names(self): cdef vector[string] param_names_bytes = self.thisptr.param_names() param_names = [n.decode('utf-8') for n in param_names_bytes] return param_names def _get_param_fnames_oi(self): cdef vector[string] param_fnames_bytes = self.thisptr.param_fnames_oi() param_fnames = [n.decode('utf-8') for n in param_fnames_bytes] return param_fnames def _get_param_names_oi(self): cdef vector[string] param_names_bytes = self.thisptr.param_names_oi() param_names = [n.decode('utf-8') for n in param_names_bytes] return param_names def _get_param_dims(self): cdef vector[vector[uint]] dims = self.thisptr.param_dims() dims_ = dims return dims_ def _get_param_dims_oi(self): cdef vector[vector[uint]] dims = self.thisptr.param_dims_oi() dims_ = dims return dims_ def constrained_param_names(self): cdef vector[string] param_names_bytes = self.thisptr.constrained_param_names(False, False) param_names = [n.decode('utf-8') for n in param_names_bytes] return param_names def unconstrained_param_names(self): cdef vector[string] param_names_bytes = self.thisptr.unconstrained_param_names(False, False) param_names = [n.decode('utf-8') for n in param_names_bytes] return param_names def _call_sampler(self, dict args, pars_oi=None): return _call_sampler(self.data, args, pars_oi=pars_oi)
44,032
Python
.py
902
40.179601
157
0.62753
stan-dev/pystan2
921
191
0
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,992
_chains.pyx
stan-dev_pystan2/pystan/_chains.pyx
#cython: language_level=3 #cython: boundscheck=False #cython: wraparound=False from libcpp.vector cimport vector from libc.math cimport sqrt cimport cython import numpy as np import pystan.constants # autocovariance is a template function, which Cython doesn't yet support cdef extern from "stan/math/prim/fun/autocovariance.hpp" namespace "stan::math": void stan_autocovariance "stan::math::autocovariance<double>"(const vector[double]& y, vector[double]& acov) cdef extern from "stan/math/prim/fun/sum.hpp" namespace "stan::math": double stan_sum "stan::math::sum"(vector[double]& x) cdef extern from "stan/math/prim/fun/mean.hpp" namespace "stan::math": double stan_mean "stan::math::mean"(vector[double]& x) cdef extern from "stan/math/prim/fun/variance.hpp" namespace "stan::math": double stan_variance "stan::math::variance"(vector[double]& x) cdef void get_kept_samples(dict sim, int k, int n, vector[double]& samples): """ Parameters ---------- k : unsigned int Chain index n : unsigned int Parameter index """ cdef int i cdef long[:] warmup2 = np.array(sim['warmup2']) slst = sim['samples'][k]['chains'] # chain k, an OrderedDict param_names = list(slst.keys()) # e.g., 'beta[1]', 'beta[2]', ... cdef double[:] nv = slst[param_names[n]] # parameter n samples.clear() for i in range(nv.shape[0] - warmup2[k]): samples.push_back(nv[warmup2[k] + i]) cdef double get_chain_mean(dict sim, int k, int n): cdef long[:] warmup2 = np.array(sim['warmup2']) slst = sim['samples'][k]['chains'] # chain k, an OrderedDict param_names = list(slst.keys()) # e.g., 'beta[1]', 'beta[2]', ... cdef vector[double] nv = slst[param_names[n]] # parameter n return stan_mean(nv[warmup2[k]:]) cdef vector[double] autocovariance(dict sim, int k, int n): """ Returns the autocovariance for the specified parameter in the kept samples of the chain specified. Parameters ---------- k : unsigned int Chain index n : unsigned int Parameter index Returns ------- acov : vector[double] Note ---- PyStan is profligate with memory here in comparison to RStan. A variety of copies are made where RStan passes around references. This is done mainly for convenience; the Cython code is simpler. """ cdef vector[double] samples, acov get_kept_samples(sim, k, n, samples) stan_autocovariance(samples, acov) return acov @cython.cdivision(True) def effective_sample_size(dict sim, int n): """ Return the effective sample size for the specified parameter across all kept samples. The implementation is close to the effective sample size description in BDA3 (p. 286-287). See more details in Stan reference manual section "Effective Sample Size". Current implementation takes the minimum number of samples across chains as the number of samples per chain. Parameters ---------- sim : dict Contains samples as well as related information (warmup, number of iterations, etc). n : int Parameter index Returns ------- ess : int """ cdef int i, chain cdef int m = sim['chains'] cdef vector[int] ns_save = sim['n_save'] cdef vector[int] ns_warmup2 = sim['warmup2'] cdef vector[int] ns_kept = [s - w for s, w in zip(sim['n_save'], sim['warmup2'])] cdef int n_samples = min(ns_kept) cdef vector[vector[double]] acov for chain in range(m): acov.push_back(autocovariance(sim, chain, n)) cdef vector[double] chain_mean cdef vector[double] chain_var # double rather than int to deal with Cython quirk, see issue #186 cdef double n_kept_samples for chain in range(m): n_kept_samples = ns_kept[chain] if n_kept_samples == 1: # fix crash for mingw on Windows return np.nan chain_mean.push_back(get_chain_mean(sim, chain, n)) chain_var.push_back(acov[chain][0] * n_kept_samples / (n_kept_samples-1)) cdef double mean_var = stan_mean(chain_var) cdef double var_plus = mean_var * (n_samples-1) / n_samples if m > 1: var_plus = var_plus + stan_variance(chain_mean) cdef vector[double] rho_hat_t for _ in range(n_samples): rho_hat_t.push_back(0) cdef vector[double] acov_t acov_t.clear() for chain in range(m): acov_t.push_back(acov[chain][1]) cdef double rho_hat_even = 1 rho_hat_t[0] = rho_hat_even cdef double rho_hat_odd = 1 - (mean_var - stan_mean(acov_t)) / var_plus rho_hat_t[1] = rho_hat_odd # Geyer's initial positive sequence cdef int max_t = 1 cdef int t = 1 while t < (n_samples - 2) and (rho_hat_even + rho_hat_odd) >= 0: acov_t.clear() for chain in range(m): acov_t.push_back(acov[chain][t + 1]) rho_hat_even = 1 - (mean_var - stan_mean(acov_t)) / var_plus acov_t.clear() for chain in range(m): acov_t.push_back(acov[chain][t + 2]) rho_hat_odd = 1 - (mean_var - stan_mean(acov_t)) / var_plus if (rho_hat_even + rho_hat_odd) >= 0: rho_hat_t[t + 1] = rho_hat_even rho_hat_t[t + 2] = rho_hat_odd max_t = t + 2 t += 2 # Geyer's initial monotone sequence t = 3 while t <= max_t - 2: if rho_hat_t[t + 1] + rho_hat_t[t + 2] > rho_hat_t[t - 1] + rho_hat_t[t]: rho_hat_t[t + 1] = (rho_hat_t[t - 1] + rho_hat_t[t]) / 2 rho_hat_t[t + 2] = rho_hat_t[t + 1] t += 2 cdef double ess = m * n_samples ess = ess / (-1 + 2 * stan_sum(rho_hat_t)) return ess @cython.cdivision(True) def split_potential_scale_reduction(dict sim, int n): """ Return the split potential scale reduction (split R hat) for the specified parameter. Current implementation takes the minimum number of samples across chains as the number of samples per chain. Parameters ---------- n : unsigned int Parameter index Returns ------- rhat : float Split R hat """ cdef int i, chain cdef double srhat cdef int n_chains = sim['chains'] cdef vector[int] ns_save = sim['n_save'] cdef vector[int] ns_warmup2 = sim['warmup2'] cdef vector[int] ns_kept = [s - w for s, w in zip(sim['n_save'], sim['warmup2'])] cdef int n_samples = min(ns_kept) if n_samples % 2 == 1: n_samples = n_samples - 1 cdef vector[double] split_chain_mean, split_chain_var cdef vector[double] samples, split_chain for chain in range(n_chains): samples.clear() get_kept_samples(sim, chain, n, samples) split_chain.clear() for i in range(n_samples / 2): split_chain.push_back(samples[i]) split_chain_mean.push_back(stan_mean(split_chain)) split_chain_var.push_back(stan_variance(split_chain)) split_chain.clear() for i in range(n_samples / 2, n_samples): split_chain.push_back(samples[i]) split_chain_mean.push_back(stan_mean(split_chain)) split_chain_var.push_back(stan_variance(split_chain)) cdef double var_between = n_samples / 2 * stan_variance(split_chain_mean) cdef double var_within = stan_mean(split_chain_var) srhat = sqrt((var_between / var_within + n_samples / 2 - 1) / (n_samples / 2)) return srhat def _test_autocovariance(dict sim, int k, int n): '''Test point for autocovariance function''' return autocovariance(sim, k, n) def _test_stan_functions(): y = np.arange(10) cdef vector[double] acov stan_autocovariance(y, acov) assert sum(acov) == -40.0, sum(acov) assert stan_sum(y) == sum(y) assert stan_mean(y) == np.mean(y) assert stan_variance(y) == np.var(y, ddof=1)
7,872
Python
.py
201
33.159204
112
0.637843
stan-dev/pystan2
921
191
0
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,993
constants.py
stan-dev_pystan2/pystan/constants.py
MAX_UINT = 2**31 - 1 # conservative choice, maximum unsigned int on 32-bit Python 2.7 try: from enum import Enum # Python 3.4 except ImportError: from pystan.external.enum import Enum sampling_algo_t = Enum('sampling_algo_t', 'NUTS HMC Metropolis Fixed_param', module=__name__) variational_algo_t = Enum('variational_algo_t', 'MEANFIELD FULLRANK', module=__name__) class optim_algo_t(Enum): Newton = 1 BFGS = 3 LBFGS = 4 sampling_metric_t = Enum('sampling_metric_t', 'UNIT_E DIAG_E DENSE_E', module=__name__) stan_args_method_t = Enum('stan_args_method_t', 'SAMPLING OPTIM TEST_GRADIENT VARIATIONAL', module=__name__)
645
Python
.py
13
46.692308
108
0.712919
stan-dev/pystan2
921
191
0
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,994
plots.py
stan-dev_pystan2/pystan/plots.py
import numpy as np import logging logger = logging.getLogger('pystan') def traceplot(fit, pars, dtypes, **kwargs): """ Use pymc's traceplot to display parameters. Additional arguments are passed to pymc.plots.traceplot. """ # FIXME: eventually put this in the StanFit object # FIXME: write a to_pymc(_trace) function # Deprecation warning added in PyStan 2.18 logger.warning("Deprecation warning."\ " PyStan plotting deprecated, use ArviZ library (Python 3.5+)."\ " `pip install arviz`; `arviz.plot_trace(fit)`)") try: from pystan.external.pymc import plots except ImportError: logger.critical("matplotlib required for plotting.") raise if pars is None: pars = list(fit.model_pars) + ["lp__"] values = fit.extract(dtypes=dtypes, pars=pars, permuted=False) values = {key : arr.reshape(-1, int(np.multiply.reduce(arr.shape[2:])), order="F") for key, arr in values.items()} return plots.traceplot(values, pars, **kwargs)
1,048
Python
.py
24
37.25
118
0.667647
stan-dev/pystan2
921
191
0
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,995
api.py
stan-dev_pystan2/pystan/api.py
#----------------------------------------------------------------------------- # Copyright (c) 2013-2015, PyStan developers # # This file is licensed under Version 3.0 of the GNU General Public # License. See LICENSE for a text of the license. #----------------------------------------------------------------------------- import hashlib import io import logging import os import pystan._api # stanc wrapper from pystan._compat import string_types, PY2 from pystan.model import StanModel logger = logging.getLogger('pystan') def stanc(file=None, charset='utf-8', model_code=None, model_name="anon_model", include_paths=None, verbose=False, obfuscate_model_name=True, allow_undefined=False): """Translate Stan model specification into C++ code. Parameters ---------- file : {string, file}, optional If filename, the string passed as an argument is expected to be a filename containing the Stan model specification. If file, the object passed must have a 'read' method (file-like object) that is called to fetch the Stan model specification. charset : string, 'utf-8' by default If bytes or files are provided, this charset is used to decode. model_code : string, optional A string containing the Stan model specification. Alternatively, the model may be provided with the parameter `file`. model_name: string, 'anon_model' by default A string naming the model. If none is provided 'anon_model' is the default. However, if `file` is a filename, then the filename will be used to provide a name. include_paths: list of strings, optional Paths for #include files defined in Stan code. verbose : boolean, False by default Indicates whether intermediate output should be piped to the console. This output may be useful for debugging. obfuscate_model_name : boolean, True by default If False the model name in the generated C++ code will not be made unique by the insertion of randomly generated characters. Generally it is recommended that this parameter be left as True. allow_undefined : boolean, False by default If True, the C++ code can be written even if there are undefined functions. Returns ------- stanc_ret : dict A dictionary with the following keys: model_name, model_code, cpp_code, and status. Status indicates the success of the translation from Stan code into C++ code (success = 0, error = -1). Notes ----- C++ reserved words and Stan reserved words may not be used for variable names; see the Stan User's Guide for a complete list. The `#include` method follows a C/C++ syntax `#include foo/my_gp_funs.stan`. The method needs to be at the start of the row, no whitespace is allowed. After the included file no whitespace or comments are allowed. `pystan.experimental`(PyStan 2.18) has a `fix_include`-function to clean the `#include` statements from the `model_code`. Example: `from pystan.experimental import fix_include` `model_code = fix_include(model_code)` See also -------- StanModel : Class representing a compiled Stan model stan : Fit a model using Stan References ---------- The Stan Development Team (2013) *Stan Modeling Language User's Guide and Reference Manual*. <http://mc-stan.org/>. Examples -------- >>> stanmodelcode = ''' ... data { ... int<lower=0> N; ... real y[N]; ... } ... ... parameters { ... real mu; ... } ... ... model { ... mu ~ normal(0, 10); ... y ~ normal(mu, 1); ... } ... ''' >>> r = stanc(model_code=stanmodelcode, model_name = "normal1") >>> sorted(r.keys()) ['cppcode', 'model_code', 'model_cppname', 'model_name', 'status'] >>> r['model_name'] 'normal1' """ if file and model_code: raise ValueError("Specify stan model with `file` or `model_code`, " "not both.") if file is None and model_code is None: raise ValueError("Model file missing and empty model_code.") if file is not None: if isinstance(file, string_types): try: with io.open(file, 'rt', encoding=charset) as f: model_code = f.read() except: logger.critical("Unable to read file specified by `file`.") raise else: model_code = file.read() # bytes, going into C++ code model_code_bytes = model_code.encode('utf-8') if include_paths is None: include_paths = [os.path.abspath('.')] elif isinstance(include_paths, string_types): include_paths = [include_paths] # add trailing / include_paths = [os.path.join(path, "") for path in include_paths] include_paths_bytes = [path.encode('utf-8') for path in include_paths] if obfuscate_model_name: # Make the model name depend on the code. model_name = ( model_name + '_' + hashlib.md5(model_code_bytes).hexdigest()) model_name_bytes = model_name.encode('ascii') if not isinstance(file, string_types): # use default 'unknown file name' filename_bytes = b'unknown file name' else: # use only the filename, used only for debug printing filename_bytes = os.path.split(file)[-1].encode('utf-8') result = pystan._api.stanc(model_code_bytes, model_name_bytes, allow_undefined, filename_bytes, include_paths_bytes, ) if result['status'] == -1: # EXCEPTION_RC is -1 msg = result['msg'] if PY2: # fix problem with unicode in error message in PY2 msg = msg.encode('ascii', 'replace') error_msg = "Failed to parse Stan model '{}'. Error message:\n{}".format(model_name, msg) raise ValueError(error_msg) elif result['status'] == 0: # SUCCESS_RC is 0 logger.debug("Successfully parsed Stan model '{}'.".format(model_name)) del result['msg'] result.update({'model_name': model_name}) result.update({'model_code': model_code}) result.update({'include_paths' : include_paths}) return result def stan(file=None, model_name="anon_model", model_code=None, fit=None, data=None, pars=None, chains=4, iter=2000, warmup=None, thin=1, init="random", seed=None, algorithm=None, control=None, sample_file=None, diagnostic_file=None, verbose=False, boost_lib=None, eigen_lib=None, include_paths=None, n_jobs=-1, allow_undefined=False, **kwargs): """Fit a model using Stan. The `pystan.stan` function was deprecated in version 2.17 and will be removed in version 3.0. Compiling and using a Stan Program (e.g., for drawing samples) should be done in separate steps. Parameters ---------- file : string {'filename', file-like object} Model code must found via one of the following parameters: `file` or `model_code`. If `file` is a filename, the string passed as an argument is expected to be a filename containing the Stan model specification. If `file` is a file object, the object passed must have a 'read' method (file-like object) that is called to fetch the Stan model specification. charset : string, optional If bytes or files are provided, this charset is used to decode. 'utf-8' by default. model_code : string A string containing the Stan model specification. Alternatively, the model may be provided with the parameter `file`. model_name: string, optional A string naming the model. If none is provided 'anon_model' is the default. However, if `file` is a filename, then the filename will be used to provide a name. 'anon_model' by default. fit : StanFit instance An instance of StanFit derived from a previous fit, None by default. If `fit` is not None, the compiled model associated with a previous fit is reused and recompilation is avoided. data : dict A Python dictionary providing the data for the model. Variables for Stan are stored in the dictionary as expected. Variable names are the keys and the values are their associated values. Stan only accepts certain kinds of values; see Notes. pars : list of string, optional A list of strings indicating parameters of interest. By default all parameters specified in the model will be stored. chains : int, optional Positive integer specifying number of chains. 4 by default. iter : int, 2000 by default Positive integer specifying how many iterations for each chain including warmup. warmup : int, iter//2 by default Positive integer specifying number of warmup (aka burin) iterations. As `warmup` also specifies the number of iterations used for stepsize adaption, warmup samples should not be used for inference. thin : int, optional Positive integer specifying the period for saving samples. Default is 1. init : {0, '0', 'random', function returning dict, list of dict}, optional Specifies how initial parameter values are chosen: - 0 or '0' initializes all to be zero on the unconstrained support. - 'random' generates random initial values. An optional parameter `init_r` controls the range of randomly generated initial values for parameters in terms of their unconstrained support; - list of size equal to the number of chains (`chains`), where the list contains a dict with initial parameter values; - function returning a dict with initial parameter values. The function may take an optional argument `chain_id`. seed : int or np.random.RandomState, optional The seed, a positive integer for random number generation. Only one seed is needed when multiple chains are used, as the other chain's seeds are generated from the first chain's to prevent dependency among random number streams. By default, seed is ``random.randint(0, MAX_UINT)``. algorithm : {"NUTS", "HMC", "Fixed_param"}, optional One of the algorithms that are implemented in Stan such as the No-U-Turn sampler (NUTS, Hoffman and Gelman 2011) and static HMC. sample_file : string, optional File name specifying where samples for *all* parameters and other saved quantities will be written. If not provided, no samples will be written. If the folder given is not writable, a temporary directory will be used. When there are multiple chains, an underscore and chain number are appended to the file name. By default do not write samples to file. diagnostic_file : string, optional File name specifying where diagnostic information should be written. By default no diagnostic information is recorded. boost_lib : string, optional The path to a version of the Boost C++ library to use instead of the one supplied with PyStan. eigen_lib : string, optional The path to a version of the Eigen C++ library to use instead of the one in the supplied with PyStan. include_paths : list of strings, optional Paths for #include files defined in Stan code. verbose : boolean, optional Indicates whether intermediate output should be piped to the console. This output may be useful for debugging. False by default. control : dict, optional A dictionary of parameters to control the sampler's behavior. Default values are used if control is not specified. The following are adaptation parameters for sampling algorithms. These are parameters used in Stan with similar names: - `adapt_engaged` : bool - `adapt_gamma` : float, positive, default 0.05 - `adapt_delta` : float, between 0 and 1, default 0.8 - `adapt_kappa` : float, between default 0.75 - `adapt_t0` : float, positive, default 10 - `adapt_init_buffer` : int, positive, defaults to 75 - `adapt_term_buffer` : int, positive, defaults to 50 - `adapt_window` : int, positive, defaults to 25 In addition, the algorithm HMC (called 'static HMC' in Stan) and NUTS share the following parameters: - `stepsize`: float, positive - `stepsize_jitter`: float, between 0 and 1 - `metric` : str, {"unit_e", "diag_e", "dense_e"} In addition, depending on which algorithm is used, different parameters can be set as in Stan for sampling. For the algorithm HMC we can set - `int_time`: float, positive For algorithm NUTS, we can set - `max_treedepth` : int, positive n_jobs : int, optional Sample in parallel. If -1 all CPUs are used. If 1, no parallel computing code is used at all, which is useful for debugging. allow_undefined : boolean, False by default If True, the C++ code can be written even if there are undefined functions. Returns ------- fit : StanFit instance Other parameters ---------------- chain_id : int, optional `chain_id` can be a vector to specify the chain_id for all chains or an integer. For the former case, they should be unique. For the latter, the sequence of integers starting from the given `chain_id` are used for all chains. init_r : float, optional `init_r` is only valid if `init` == "random". In this case, the intial values are simulated from [-`init_r`, `init_r`] rather than using the default interval (see the manual of (Cmd)Stan). test_grad: bool, optional If `test_grad` is ``True``, Stan will not do any sampling. Instead, the gradient calculation is tested and printed out and the fitted StanFit4Model object is in test gradient mode. By default, it is ``False``. append_samples`: bool, optional refresh`: int, optional Argument `refresh` can be used to control how to indicate the progress during sampling (i.e. show the progress every \code{refresh} iterations). By default, `refresh` is `max(iter/10, 1)`. obfuscate_model_name : boolean, optional `obfuscate_model_name` is only valid if `fit` is None. True by default. If False the model name in the generated C++ code will not be made unique by the insertion of randomly generated characters. Generally it is recommended that this parameter be left as True. Examples -------- >>> from pystan import stan >>> import numpy as np >>> model_code = ''' ... parameters { ... real y[2]; ... } ... model { ... y[1] ~ normal(0, 1); ... y[2] ~ double_exponential(0, 2); ... }''' >>> fit1 = stan(model_code=model_code, iter=10) >>> print(fit1) >>> excode = ''' ... transformed data { ... real y[20]; ... y[1] = 0.5796; y[2] = 0.2276; y[3] = -0.2959; ... y[4] = -0.3742; y[5] = 0.3885; y[6] = -2.1585; ... y[7] = 0.7111; y[8] = 1.4424; y[9] = 2.5430; ... y[10] = 0.3746; y[11] = 0.4773; y[12] = 0.1803; ... y[13] = 0.5215; y[14] = -1.6044; y[15] = -0.6703; ... y[16] = 0.9459; y[17] = -0.382; y[18] = 0.7619; ... y[19] = 0.1006; y[20] = -1.7461; ... } ... parameters { ... real mu; ... real<lower=0, upper=10> sigma; ... vector[2] z[3]; ... real<lower=0> alpha; ... } ... model { ... y ~ normal(mu, sigma); ... for (i in 1:3) ... z[i] ~ normal(0, 1); ... alpha ~ exponential(2); ... }''' >>> >>> def initfun1(): ... return dict(mu=1, sigma=4, z=np.random.normal(size=(3, 2)), alpha=1) >>> exfit0 = stan(model_code=excode, init=initfun1) >>> def initfun2(chain_id=1): ... return dict(mu=1, sigma=4, z=np.random.normal(size=(3, 2)), alpha=1 + chain_id) >>> exfit1 = stan(model_code=excode, init=initfun2) """ logger.warning('DeprecationWarning: pystan.stan was deprecated in version 2.17 and will be removed in version 3.0. ' 'Compile and use a Stan program in separate steps.') # NOTE: this is a thin wrapper for other functions. Error handling occurs # elsewhere. if data is None: data = {} if warmup is None: warmup = int(iter // 2) obfuscate_model_name = kwargs.pop("obfuscate_model_name", True) if fit is not None: m = fit.stanmodel else: m = StanModel(file=file, model_name=model_name, model_code=model_code, boost_lib=boost_lib, eigen_lib=eigen_lib, include_paths=include_paths, obfuscate_model_name=obfuscate_model_name, verbose=verbose, allow_undefined=allow_undefined) # check that arguments in kwargs are valid valid_args = {"chain_id", "init_r", "test_grad", "append_samples", "enable_random_init", "refresh", "control"} for arg in kwargs: if arg not in valid_args: raise ValueError("Parameter `{}` is not recognized.".format(arg)) fit = m.sampling(data, pars=pars, chains=chains, iter=iter, warmup=warmup, thin=thin, seed=seed, init=init, sample_file=sample_file, diagnostic_file=diagnostic_file, verbose=verbose, algorithm=algorithm, control=control, n_jobs=n_jobs, **kwargs) return fit
17,919
Python
.py
373
40.262735
120
0.634308
stan-dev/pystan2
921
191
0
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,996
__init__.py
stan-dev_pystan2/pystan/__init__.py
#----------------------------------------------------------------------------- # Copyright (c) 2013-2015 PyStan developers # # This file is licensed under Version 3.0 of the GNU General Public # License. See LICENSE for a text of the license. #----------------------------------------------------------------------------- import logging from pystan.api import stanc, stan from pystan.misc import read_rdump, stan_rdump, stansummary from pystan.diagnostics import check_hmc_diagnostics from pystan.model import StanModel from pystan.lookup import lookup logger = logging.getLogger('pystan') logger.addHandler(logging.NullHandler()) if len(logger.handlers) == 1: logging.basicConfig(level=logging.INFO) # following PEP 386 # See also https://docs.openstack.org/pbr/latest/user/semver.html __version__ = '2.22.1.0dev'
822
Python
.py
19
41.894737
78
0.64375
stan-dev/pystan2
921
191
0
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,997
diagnostics.py
stan-dev_pystan2/pystan/diagnostics.py
import pystan from pystan.misc import _check_pars, _remove_empty_pars from pystan._compat import string_types import numpy as np import logging logger = logging.getLogger('pystan') # Diagnostics modified from Betancourt's stan_utility.py module def check_div(fit, verbose=True, per_chain=False): """Check for transitions that ended with a divergence Parameters ---------- fit : StanFit4Model object verbose : bool or int, optional If ``verbose`` is ``False`` or a nonpositive integer, no diagnostic messages are printed, and only the return value of the function conveys diagnostic information. If it is ``True`` (the default) or an integer greater than zero, then a diagnostic message is printed only if there are divergent transitions. If it is an integer greater than 2, then extra diagnostic messages are printed. per_chain : bool, optional Print the number of divergent transitions in each chain Returns ------- bool ``True`` if there are no problems with divergent transitions and ``False`` otherwise. Raises ------ ValueError If ``fit`` has no information about divergent transitions. """ verbosity = int(verbose) sampler_params = fit.get_sampler_params(inc_warmup=False) try: divergent = np.column_stack([y['divergent__'].astype(bool) for y in sampler_params]) except: raise ValueError('Cannot access divergence information from fit object') n_for_chains = divergent.sum(axis=0) n = n_for_chains.sum() if n > 0: if verbosity > 0: N = divergent.size logger.warning('{} of {} iterations ended '.format(n, N) + 'with a divergence ({:.3g} %).'.format(100 * n / N)) if per_chain: chain_len, num_chains = divergent.shape for chain_num in range(num_chains): if n_for_chains[chain_num] > 0: logger.warning('Chain {}: {} of {} iterations ended '.format(chain_num + 1, n_for_chains[chain_num], chain_len) + 'with a divergence ({:.3g} %).'.format(100 * n_for_chains[chain_num] / chain_len)) try: adapt_delta = fit.stan_args[0]['ctrl']['sampling']['adapt_delta'] except: logger.warning('Cannot obtain value of adapt_delta from fit object') adapt_delta = None if adapt_delta != None: logger.warning('Try running with adapt_delta larger than {}'.format(adapt_delta) + ' to remove the divergences.') else: logger.warning('Try running with larger adapt_delta to remove the divergences.') return False else: if verbosity > 2: logger.info('No divergent transitions found.') return True def check_treedepth(fit, verbose=True, per_chain=False): """Check for transitions that ended prematurely due to maximum tree depth limit Parameters ---------- fit : StanFit4Model object verbose : bool or int, optional If ``verbose`` is ``False`` or a nonpositive integer, no diagnostic messages are printed, and only the return value of the function conveys diagnostic information. If it is ``True`` (the default) or an integer greater than zero, then a diagnostic message is printed only if there are transitions that ended ended prematurely due to maximum tree depth limit. If it is an integer greater than 2, then extra diagnostic messages are printed. per_chain : bool, optional Print the number of prematurely ending transitions in each chain Returns ------- bool ``True`` if there are no problems with tree depth and ``False`` otherwise. Raises ------ ValueError If ``fit`` has no information about tree depth. This could happen if ``fit`` was generated from a sampler other than NUTS. """ verbosity = int(verbose) sampler_params = fit.get_sampler_params(inc_warmup=False) try: depths = np.column_stack([y['treedepth__'].astype(int) for y in sampler_params]) except: raise ValueError('Cannot access tree depth information from fit object') try: max_treedepth = int(fit.stan_args[0]['ctrl']['sampling']['max_treedepth']) except: raise ValueError('Cannot obtain value of max_treedepth from fit object') n_for_chains = (depths >= max_treedepth).sum(axis=0) n = n_for_chains.sum() if n > 0: if verbosity > 0: N = depths.size logger.warning(('{} of {} iterations saturated the maximum tree depth of {}' + ' ({:.3g} %)').format(n, N, max_treedepth, 100 * n / N)) if per_chain: chain_len, num_chains = depths.shape for chain_num in range(num_chains): if n_for_chains[chain_num] > 0: logger.warning('Chain {}: {} of {} saturated '.format(chain_num + 1, n_for_chains[chain_num], chain_len) + 'the maximum tree depth of {} ({:.3g} %).'.format(max_treedepth, 100 * n_for_chains[chain_num] / chain_len)) logger.warning('Run again with max_treedepth larger than {}'.format(max_treedepth) + ' to avoid saturation') return False else: if verbosity > 2: logger.info('No transitions that ended prematurely due to maximum tree depth limit') return True def check_energy(fit, verbose=True): """Checks the energy Bayesian fraction of missing information (E-BFMI) Parameters ---------- fit : StanFit4Model object verbose : bool or int, optional If ``verbose`` is ``False`` or a nonpositive integer, no diagnostic messages are printed, and only the return value of the function conveys diagnostic information. If it is ``True`` (the default) or an integer greater than zero, then a diagnostic message is printed only if there is low E-BFMI in one or more chains. If it is an integer greater than 2, then extra diagnostic messages are printed. Returns ------- bool ``True`` if there are no problems with E-BFMI and ``False`` otherwise. Raises ------ ValueError If ``fit`` has no information about E-BFMI. """ verbosity = int(verbose) sampler_params = fit.get_sampler_params(inc_warmup=False) try: energies = np.column_stack([y['energy__'] for y in sampler_params]) except: raise ValueError('Cannot access energy information from fit object') chain_len, num_chains = energies.shape numer = ((np.diff(energies, axis=0)**2).sum(axis=0)) / chain_len denom = np.var(energies, axis=0) e_bfmi = numer / denom no_warning = True for chain_num in range(num_chains): if e_bfmi[chain_num] < 0.2: if verbosity > 0: logger.warning('Chain {}: E-BFMI = {:.3g}'.format(chain_num + 1, e_bfmi[chain_num])) no_warning = False else: if verbosity > 2: logger.info('Chain {}: E-BFMI (= {:.3g}) '.format(chain_num + 1, e_bfmi[chain_num]) + 'equals or exceeds threshold of 0.2.') if no_warning: if verbosity > 2: logger.info('E-BFMI indicated no pathological behavior') return True else: if verbosity > 0: logger.warning('E-BFMI below 0.2 indicates you may need to reparameterize your model') return False def check_n_eff(fit, pars=None, verbose=True): """Checks the effective sample size per iteration Parameters ---------- fit : StanFit4Model object pars : {str, sequence of str}, optional Parameter (or quantile) name(s). Test only specific parameters. Raises an exception if parameter is not valid. verbose : bool or int, optional If ``verbose`` is ``False`` or a nonpositive integer, no diagnostic messages are printed, and only the return value of the function conveys diagnostic information. If it is ``True`` (the default) or an integer greater than zero, then a diagnostic message is printed only if there are effective sample sizes that appear pathologically low. If it is an integer greater than 1, then parameter (quantile) diagnostics are printed. If integer is greater than 2 extra diagnostic messages are printed. Returns ------- bool ``True`` if there are no problems with effective sample size and ``False`` otherwise. """ verbosity = int(verbose) n_iter = sum(fit.sim['n_save'])-sum(fit.sim['warmup2']) if pars is None: pars = fit.sim['fnames_oi'] else: if isinstance(pars, string_types): pars = [pars] pars = _remove_empty_pars(pars, fit.sim['pars_oi'], fit.sim['dims_oi']) allpars = fit.sim['pars_oi'] + fit.sim['fnames_oi'] _check_pars(allpars, pars) packed_pars = set(pars) - set(fit.sim['fnames_oi']) if packed_pars: unpack_dict = {} for par_unpacked in fit.sim['fnames_oi']: par_packed = par_unpacked.split("[")[0] if par_packed not in unpack_dict: unpack_dict[par_packed] = [] unpack_dict[par_packed].append(par_unpacked) pars_unpacked = [] for par in pars: if par in packed_pars: pars_unpacked.extend(unpack_dict[par]) else: pars_unpacked.append(par) pars = pars_unpacked par_n_dict = {} for n, par in enumerate(fit.sim['fnames_oi']): par_n_dict[par] = n no_warning = True for name in pars: n = par_n_dict[name] n_eff = pystan.chains.ess(fit.sim, n) ratio = n_eff / n_iter if ((ratio < 0.001) or np.isnan(ratio) or np.isinf(ratio)): if verbosity > 1: logger.warning('n_eff / iter for parameter {} is {:.3g}!'.format(name, ratio)) no_warning = False if verbosity <= 1: break if no_warning: if verbosity > 2: logger.info('n_eff / iter looks reasonable for all parameters') return True else: if verbosity > 0: logger.warning('n_eff / iter below 0.001 indicates that the effective sample size has likely been overestimated') return False def check_rhat(fit, pars=None, verbose=True): """Checks the potential scale reduction factors, i.e., Rhat values Parameters ---------- fit : StanFit4Model object pars : {str, sequence of str}, optional Parameter (or quantile) name(s). Test only specific parameters. Raises an exception if parameter is not valid. verbose : bool or int, optional If ``verbose`` is ``False`` or a nonpositive integer, no diagnostic messages are printed, and only the return value of the function conveys diagnostic information. If it is ``True`` (the default) or an integer greater than zero, then a diagnostic message is printed only if there are Rhat values too far from 1. If ``verbose`` is an integer greater than 1, parameter (quantile) diagnostics are printed. If ``verbose`` is an integer greater than 2, then extra diagnostic messages are printed. Returns ------- bool ``True`` if there are no problems with with Rhat and ``False`` otherwise. """ verbosity = int(verbose) if pars is None: pars = fit.sim['fnames_oi'] else: if isinstance(pars, string_types): pars = [pars] pars = _remove_empty_pars(pars, fit.sim['pars_oi'], fit.sim['dims_oi']) allpars = fit.sim['pars_oi'] + fit.sim['fnames_oi'] _check_pars(allpars, pars) packed_pars = set(pars) - set(fit.sim['fnames_oi']) if packed_pars: unpack_dict = {} for par_unpacked in fit.sim['fnames_oi']: par_packed = par_unpacked.split("[")[0] if par_packed not in unpack_dict: unpack_dict[par_packed] = [] unpack_dict[par_packed].append(par_unpacked) pars_unpacked = [] for par in pars: if par in packed_pars: pars_unpacked.extend(unpack_dict[par]) else: pars_unpacked.append(par) pars = pars_unpacked par_n_dict = {} for n, par in enumerate(fit.sim['fnames_oi']): par_n_dict[par] = n no_warning = True for name in pars: n = par_n_dict[name] rhat = pystan.chains.splitrhat(fit.sim, n) if (np.isnan(rhat) or np.isinf(rhat) or (rhat > 1.1) or (rhat < 0.9)): if verbosity > 1: logger.warning('Rhat for parameter {} is {:.3g}!'.format(name, rhat)) no_warning = False if verbosity <= 1: break if no_warning: if verbosity > 2: logger.info('Rhat looks reasonable for all parameters') return True else: if verbosity > 0: logger.warning('Rhat above 1.1 or below 0.9 indicates that the chains very likely have not mixed') return False def check_hmc_diagnostics(fit, pars=None, verbose=True, per_chain=False, checks=None): """Checks all hmc diagnostics Parameters ---------- fit : StanFit4Model object verbose : bool or int, optional If ``verbose`` is ``False`` or a nonpositive integer, no diagnostic messages are printed, and only the return value of the function conveys diagnostic information. If it is ``True`` (the default) or an integer greater than zero, then diagnostic messages are printed only for diagnostic checks that fail. If ``verbose`` is an integer greater than 1, then parameter (quantile) diagnostics are printed. If ``verbose`` is greater than 2, then extra diagnostic messages are printed. per_chain : bool, optional Where applicable, print diagnostics on a per-chain basis. This applies mainly to the divergence and treedepth checks. checks : list, {"n_eff", "Rhat", "divergence", "treedepth", "energy"}, optional By default run all checks. If ``checks`` is defined, run only checks given in ``checks`` Returns ------- out_dict : dict A dictionary where each key is the name of a diagnostic check, and the value associated with each key is a Boolean value that is True if the check passed and False otherwise. Possible valid keys are 'n_eff', 'Rhat', 'divergence', 'treedepth', and 'energy', though which keys are available will depend upon the sampling algorithm used. """ # For consistency with the individual diagnostic functions verbosity = int(verbose) all_checks = {"n_eff", "Rhat", "divergence", "treedepth", "energy"} if checks is None: checks = all_checks else: undefined_checks = [] for c in checks: # accept lowercase Rhat if c == "rhat": continue if c not in all_checks: undefined_checks.append(c) if undefined_checks: ucstr = "[" + ", ".join(undefined_checks) + "]" msg = "checks: {} are not legal checks: {}".format(ucstr, all_checks) raise TypeError(msg) out_dict = {} if "n_eff" in checks: try: out_dict['n_eff'] = check_n_eff(fit, pars, verbose) except ValueError: if verbosity > 0: logger.warning('Skipping check of effective sample size (n_eff)') if ("Rhat" in checks) or ("rhat" in checks): try: out_dict['Rhat'] = check_rhat(fit, pars, verbose) except ValueError: if verbosity > 0: logger.warning('Skipping check of potential scale reduction factors (Rhat)') if "divergence" in checks: try: out_dict['divergence'] = check_div(fit, verbose, per_chain) except ValueError: if verbosity > 0: logger.warning('Skipping check of divergent transitions (divergence)') if "treedepth" in checks: try: out_dict['treedepth'] = check_treedepth(fit, verbose, per_chain) except ValueError: if verbosity > 0: logger.warning('Skipping check of transitions ending prematurely due to maximum tree depth limit (treedepth)') if "energy" in checks: try: out_dict['energy'] = check_energy(fit, verbose) except ValueError: if verbosity > 0: logger.warning('Skipping check of E-BFMI (energy)') return out_dict
17,940
Python
.py
403
33.364764
126
0.581097
stan-dev/pystan2
921
191
0
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,998
model.py
stan-dev_pystan2/pystan/model.py
#----------------------------------------------------------------------------- # Copyright (c) 2013-2015, PyStan developers # # This file is licensed under Version 3.0 of the GNU General Public # License. See LICENSE for a text of the license. #----------------------------------------------------------------------------- from pystan._compat import PY2, string_types, implements_to_string, izip from collections import OrderedDict if PY2: from collections import Callable, Iterable else: from collections.abc import Callable, Iterable import datetime import io import itertools import logging import numbers import os import platform import shutil import string import sys import tempfile import time import distutils from distutils.core import Extension import Cython from Cython.Build.Inline import _get_build_extension from Cython.Build.Dependencies import cythonize import numpy as np import pystan.api import pystan.misc import pystan.diagnostics logger = logging.getLogger('pystan') def load_module(module_name, module_path): """Load the module named `module_name` from `module_path` independently of the Python version.""" if platform.system() == "Windows": pystan.misc.add_libtbb_path() if sys.version_info >= (3,0): import pyximport pyximport.install() sys.path.append(module_path) return __import__(module_name) else: import imp module_info = imp.find_module(module_name, [module_path]) return imp.load_module(module_name, *module_info) def _map_parallel(function, args, n_jobs): """multiprocessing.Pool(processors=n_jobs).map with some error checking""" # Following the error checking found in joblib multiprocessing = int(os.environ.get('JOBLIB_MULTIPROCESSING', 1)) or None if multiprocessing: try: import multiprocessing import multiprocessing.pool except ImportError: multiprocessing = None if sys.platform.startswith("win") and PY2: msg = 'Multiprocessing is not supported on Windows with Python 2.X. Setting n_jobs=1' logger.warning(msg) n_jobs = 1 # 2nd stage: validate that locking is available on the system and # issue a warning if not if multiprocessing: try: _sem = multiprocessing.Semaphore() del _sem # cleanup except (ImportError, OSError) as e: multiprocessing = None logger.warning('{}. _map_parallel will operate in serial mode'.format(e)) if multiprocessing and int(n_jobs) not in (0, 1): if n_jobs == -1: n_jobs = None try: pool = multiprocessing.Pool(processes=n_jobs) map_result = pool.map(function, args) finally: pool.close() pool.join() else: map_result = list(map(function, args)) return map_result # NOTE: StanModel instance stores references to a compiled, uninstantiated # C++ model. @implements_to_string class StanModel: """ Model described in Stan's modeling language compiled from C++ code. Instances of StanModel are typically created indirectly by the functions `stan` and `stanc`. Parameters ---------- file : string {'filename', 'file'} If filename, the string passed as an argument is expected to be a filename containing the Stan model specification. If file, the object passed must have a 'read' method (file-like object) that is called to fetch the Stan model specification. charset : string, 'utf-8' by default If bytes or files are provided, this charset is used to decode. model_name: string, 'anon_model' by default A string naming the model. If none is provided 'anon_model' is the default. However, if `file` is a filename, then the filename will be used to provide a name. model_code : string A string containing the Stan model specification. Alternatively, the model may be provided with the parameter `file`. stanc_ret : dict A dict returned from a previous call to `stanc` which can be used to specify the model instead of using the parameter `file` or `model_code`. include_paths : list of strings Paths for #include files defined in Stan program code. boost_lib : string The path to a version of the Boost C++ library to use instead of the one supplied with PyStan. eigen_lib : string The path to a version of the Eigen C++ library to use instead of the one in the supplied with PyStan. verbose : boolean, False by default Indicates whether intermediate output should be piped to the console. This output may be useful for debugging. allow_undefined : boolean, False by default If True, the C++ code can be written even if there are undefined functions. includes : list, None by default If not None, the elements of this list will be assumed to be the names of custom C++ header files that should be included. include_dirs : list, None by default If not None, the directories in this list are added to the search path of the compiler. kwargs : keyword arguments Additional arguments passed to `stanc`. Attributes ---------- model_name : string model_code : string Stan code for the model. model_cpp : string C++ code for the model. module : builtins.module Python module created by compiling the C++ code for the model. Methods ------- show Print the Stan model specification. sampling Draw samples from the model. optimizing Obtain a point estimate by maximizing the log-posterior. get_cppcode Return the C++ code for the module. get_cxxflags Return the 'CXXFLAGS' used for compiling the model. get_include_paths Return include_paths used for compiled model. See also -------- stanc: Compile a Stan model specification stan: Fit a model using Stan Notes ----- More details of Stan, including the full user's guide and reference manual can be found at <URL: http://mc-stan.org/>. There are three ways to specify the model's code for `stan_model`. 1. parameter `model_code`, containing a string to whose value is the Stan model specification, 2. parameter `file`, indicating a file (or a connection) from which to read the Stan model specification, or 3. parameter `stanc_ret`, indicating the re-use of a model generated in a previous call to `stanc`. References ---------- The Stan Development Team (2013) *Stan Modeling Language User's Guide and Reference Manual*. <URL: http://mc-stan.org/>. Examples -------- >>> model_code = 'parameters {real y;} model {y ~ normal(0,1);}' >>> model_code; m = StanModel(model_code=model_code) ... # doctest: +ELLIPSIS 'parameters ... >>> m.model_name 'anon_model' """ def __init__(self, file=None, charset='utf-8', model_name="anon_model", model_code=None, stanc_ret=None, include_paths=None, boost_lib=None, eigen_lib=None, verbose=False, obfuscate_model_name=True, extra_compile_args=None, allow_undefined=False, include_dirs=None, includes=None): tbb_dir = os.path.abspath(os.path.join( os.path.dirname(__file__), 'stan', 'lib', 'stan_math', 'lib','tbb' )) if stanc_ret is None: stanc_ret = pystan.api.stanc(file=file, charset=charset, model_code=model_code, model_name=model_name, verbose=verbose, include_paths=include_paths, obfuscate_model_name=obfuscate_model_name, allow_undefined=allow_undefined) if not isinstance(stanc_ret, dict): raise ValueError("stanc_ret must be an object returned by stanc.") stanc_ret_keys = {'status', 'model_code', 'model_cppname', 'cppcode', 'model_name', 'include_paths'} if not all(n in stanc_ret_keys for n in stanc_ret): raise ValueError("stanc_ret lacks one or more of the keys: " "{}".format(str(stanc_ret_keys))) elif stanc_ret['status'] != 0: # success == 0 raise ValueError("stanc_ret is not a successfully returned " "dictionary from stanc.") self.model_cppname = stanc_ret['model_cppname'] self.model_name = stanc_ret['model_name'] self.model_code = stanc_ret['model_code'] self.model_cppcode = stanc_ret['cppcode'] self.model_include_paths = stanc_ret['include_paths'] if allow_undefined or include_dirs or includes: logger.warning("External C++ interface is an experimental feature. Be careful.") msg = "COMPILING THE C++ CODE FOR MODEL {} NOW." logger.info(msg.format(self.model_name)) if verbose: msg = "OS: {}, Python: {}, Cython {}".format(sys.platform, sys.version, Cython.__version__) logger.info(msg) if boost_lib is not None: # FIXME: allow boost_lib, eigen_lib to be specified raise NotImplementedError if eigen_lib is not None: raise NotImplementedError # module_name needs to be unique so that each model instance has its own module nonce = abs(hash((self.model_name, time.time()))) self.module_name = 'stanfit4{}_{}'.format(self.model_name, nonce) lib_dir = tempfile.mkdtemp(prefix='pystan_') pystan_dir = os.path.dirname(__file__) if include_dirs is None: include_dirs = [] elif not isinstance(include_dirs, list): raise TypeError("'include_dirs' needs to be a list: type={}".format(type(include_dirs))) include_dirs += [ lib_dir, pystan_dir, os.path.join(pystan_dir, "stan", "src"), os.path.join(pystan_dir, "stan", "lib", "stan_math"), os.path.join(pystan_dir, "stan", "lib", "stan_math", "lib", "eigen_3.3.3"), os.path.join(pystan_dir, "stan", "lib", "stan_math", "lib", "boost_1.72.0"), os.path.join(pystan_dir, "stan", "lib", "stan_math", "lib", "sundials_4.1.0", "include"), os.path.join(pystan_dir, "stan", "lib", "stan_math", "lib", "tbb", "include"), np.get_include(), ] model_cpp_file = os.path.join(lib_dir, self.model_cppname + '.hpp') if includes is not None: code = "" for fn in includes: code += '#include "{0}"\n'.format(fn) ind = self.model_cppcode.index("static int current_statement_begin__;") self.model_cppcode = "\n".join([ self.model_cppcode[:ind], code, self.model_cppcode[ind:] ]) with io.open(model_cpp_file, 'w', encoding='utf-8') as outfile: outfile.write(self.model_cppcode) pyx_file = os.path.join(lib_dir, self.module_name + '.pyx') pyx_template_file = os.path.join(pystan_dir, 'stanfit4model.pyx') with io.open(pyx_template_file, 'r', encoding='utf-8') as infile: s = infile.read() template = string.Template(s) with io.open(pyx_file, 'w', encoding='utf-8') as outfile: s = template.safe_substitute(model_cppname=self.model_cppname) outfile.write(s) stan_macros = [ ('BOOST_RESULT_OF_USE_TR1', None), ('BOOST_NO_DECLTYPE', None), ('BOOST_DISABLE_ASSERTS', None), ] build_extension = _get_build_extension() # compile stan models with optimization (-O2) # (stanc is compiled without optimization (-O0) currently, see #33) if extra_compile_args is None: extra_compile_args = [] if platform.platform().startswith('Win'): if build_extension.compiler in (None, 'msvc'): logger.warning("MSVC compiler is not supported") extra_compile_args = [ '/EHsc', '-DBOOST_DATE_TIME_NO_LIB', '/std:c++14', ] + extra_compile_args else: # Windows, but not msvc, likely mingw # fix bug in MingW-W64 # use posix threads extra_compile_args = [ '-O2', '-ftemplate-depth-256', '-Wno-unused-function', '-Wno-uninitialized', '-std=c++1y', '-D_hypot=hypot', '-pthread', '-fexceptions', '-DSTAN_THREADS', '-D_REENTRANT', ] + extra_compile_args extra_link_args = [] else: # linux or macOS extra_compile_args = [ '-O2', '-ftemplate-depth-256', '-Wno-unused-function', '-Wno-uninitialized', '-std=c++1y', '-DSTAN_THREADS', '-D_REENTRANT', # stan-math requires _REENTRANT being defined during compilation to make lgamma_r available. ] + extra_compile_args extra_link_args = ['-Wl,-rpath,{}'.format(os.path.abspath(tbb_dir))] distutils.log.set_verbosity(verbose) extension = Extension(name=self.module_name, language="c++", sources=[pyx_file], define_macros=stan_macros, include_dirs=include_dirs, libraries=["tbb"], library_dirs=[tbb_dir], extra_compile_args=extra_compile_args, extra_link_args=extra_link_args, ) cython_include_dirs = ['.', pystan_dir] build_extension.extensions = cythonize([extension], include_path=cython_include_dirs, quiet=not verbose) build_extension.build_temp = os.path.dirname(pyx_file) build_extension.build_lib = lib_dir redirect_stderr = not verbose and pystan.misc._has_fileno(sys.stderr) if redirect_stderr: # silence stderr for compilation orig_stderr = pystan.misc._redirect_stderr() try: build_extension.run() finally: if redirect_stderr: # restore stderr os.dup2(orig_stderr, sys.stderr.fileno()) self.module = load_module(self.module_name, lib_dir) self.module_filename = os.path.basename(self.module.__file__) # once the module is in memory, we no longer need the file on disk # but we do need a copy of the file for pickling and the module name with io.open(os.path.join(lib_dir, self.module_filename), 'rb') as f: self.module_bytes = f.read() shutil.rmtree(lib_dir, ignore_errors=True) self.fit_class = getattr(self.module, "StanFit4Model") def __str__(self): # NOTE: returns unicode even for Python 2.7, implements_to_string # decorator creates __unicode__ and __str__ s = u"StanModel object '{}' coded as follows:\n{}" return s.format(self.model_name, self.model_code) def show(self): print(self) @property def dso(self): # warning added in PyStan 2.8.0 logger.warning('DeprecationWarning: Accessing the module with `dso` is deprecated and will be removed in a future version. '\ 'Use `module` instead.') return self.module def get_cppcode(self): return self.model_cppcode def get_cxxflags(self): # FIXME: implement this? raise NotImplementedError def get_include_paths(self): return self.model_include_paths def __getstate__(self): """Specify how instances are to be pickled self.module is unpicklable, for example. """ state = self.__dict__.copy() del state['module'] del state['fit_class'] return state def __setstate__(self, state): self.__dict__.update(state) lib_dir = tempfile.mkdtemp() with io.open(os.path.join(lib_dir, self.module_filename), 'wb') as f: f.write(self.module_bytes) try: self.module = load_module(self.module_name, lib_dir) self.fit_class = getattr(self.module, "StanFit4Model") except Exception as e: logger.warning(e) logger.warning("Something went wrong while unpickling " "the StanModel. Consider recompiling.") # once the module is in memory, we no longer need the file on disk shutil.rmtree(lib_dir, ignore_errors=True) def optimizing(self, data=None, seed=None, init='random', sample_file=None, algorithm=None, verbose=False, as_vector=True, **kwargs): """Obtain a point estimate by maximizing the joint posterior. Parameters ---------- data : dict A Python dictionary providing the data for the model. Variables for Stan are stored in the dictionary as expected. Variable names are the keys and the values are their associated values. Stan only accepts certain kinds of values; see Notes. seed : int or np.random.RandomState, optional The seed, a positive integer for random number generation. Only one seed is needed when multiple chains are used, as the other chain's seeds are generated from the first chain's to prevent dependency among random number streams. By default, seed is ``random.randint(0, MAX_UINT)``. init : {0, '0', 'random', function returning dict, list of dict}, optional Specifies how initial parameter values are chosen: - 0 or '0' initializes all to be zero on the unconstrained support. - 'random' generates random initial values. An optional parameter `init_r` controls the range of randomly generated initial values for parameters in terms of their unconstrained support; - list of size equal to the number of chains (`chains`), where the list contains a dict with initial parameter values; - function returning a dict with initial parameter values. The function may take an optional argument `chain_id`. sample_file : string, optional File name specifying where samples for *all* parameters and other saved quantities will be written. If not provided, no samples will be written. If the folder given is not writable, a temporary directory will be used. When there are multiple chains, an underscore and chain number are appended to the file name. By default do not write samples to file. algorithm : {"LBFGS", "BFGS", "Newton"}, optional Name of optimization algorithm to be used. Default is LBFGS. verbose : boolean, optional Indicates whether intermediate output should be piped to the console. This output may be useful for debugging. False by default. as_vector : boolean, optional Indicates an OrderedDict will be returned rather than a nested dictionary with keys 'par' and 'value'. Returns ------- optim : OrderedDict Depending on `as_vector`, returns either an OrderedDict having parameters as keys and point estimates as values or an OrderedDict with components 'par' and 'value'. ``optim['par']`` is a dictionary of point estimates, indexed by the parameter name. ``optim['value']`` stores the value of the log-posterior (up to an additive constant, the ``lp__`` in Stan) corresponding to the point identified by `optim`['par']. Other parameters ---------------- iter : int, optional The maximum number of iterations. save_iterations : bool, optional refresh : int, optional init_alpha : float, optional For BFGS and LBFGS, default is 0.001. tol_obj : float, optional For BFGS and LBFGS, default is 1e-12. tol_rel_obj : int, optional For BFGS and LBFGS, default is 1e4. tol_grad : float, optional For BFGS and LBFGS, default is 1e-8. tol_rel_grad : float, optional For BFGS and LBFGS, default is 1e7. tol_param : float, optional For BFGS and LBFGS, default is 1e-8. history_size : int, optional For LBFGS, default is 5. Refer to the manuals for both CmdStan and Stan for more details. Examples -------- >>> from pystan import StanModel >>> m = StanModel(model_code='parameters {real y;} model {y ~ normal(0,1);}') >>> f = m.optimizing() """ algorithms = {"BFGS", "LBFGS", "Newton"} if algorithm is None: algorithm = "LBFGS" if algorithm not in algorithms: raise ValueError("Algorithm must be one of {}".format(algorithms)) if data is None: data = {} seed = pystan.misc._check_seed(seed) fit = self.fit_class(data, seed) m_pars = fit._get_param_names() p_dims = fit._get_param_dims() if 'lp__' in m_pars: idx_of_lp = m_pars.index('lp__') del m_pars[idx_of_lp] del p_dims[idx_of_lp] if isinstance(init, numbers.Number): init = str(init) elif isinstance(init, Callable): init = init() elif not isinstance(init, Iterable) and \ not isinstance(init, string_types): raise ValueError("Wrong specification of initial values.") stan_args = dict(init=init, seed=seed, method="optim", algorithm=algorithm) if sample_file is not None: stan_args['sample_file'] = pystan.misc._writable_sample_file(sample_file) # check that arguments in kwargs are valid valid_args = {"iter", "save_iterations", "refresh", "init_alpha", "tol_obj", "tol_grad", "tol_param", "tol_rel_obj", "tol_rel_grad", "history_size"} for arg in kwargs: if arg not in valid_args: raise ValueError("Parameter `{}` is not recognized.".format(arg)) # This check is is to warn users of older versions of PyStan if kwargs.get('method'): raise ValueError('`method` is no longer used. Specify `algorithm` instead.') stan_args.update(kwargs) stan_args = pystan.misc._get_valid_stan_args(stan_args) ret, sample = fit._call_sampler(stan_args) pars = pystan.misc._par_vector2dict(sample['par'], m_pars, p_dims) if not as_vector: return OrderedDict([('par', pars), ('value', sample['value'])]) else: return pars def sampling(self, data=None, pars=None, chains=4, iter=2000, warmup=None, thin=1, seed=None, init='random', sample_file=None, diagnostic_file=None, verbose=False, algorithm=None, control=None, n_jobs=-1, **kwargs): """Draw samples from the model. Parameters ---------- data : dict A Python dictionary providing the data for the model. Variables for Stan are stored in the dictionary as expected. Variable names are the keys and the values are their associated values. Stan only accepts certain kinds of values; see Notes. pars : list of string, optional A list of strings indicating parameters of interest. By default all parameters specified in the model will be stored. chains : int, optional Positive integer specifying number of chains. 4 by default. iter : int, 2000 by default Positive integer specifying how many iterations for each chain including warmup. warmup : int, iter//2 by default Positive integer specifying number of warmup (aka burn-in) iterations. As `warmup` also specifies the number of iterations used for step-size adaption, warmup samples should not be used for inference. `warmup=0` forced if `algorithm=\"Fixed_param\"`. thin : int, 1 by default Positive integer specifying the period for saving samples. seed : int or np.random.RandomState, optional The seed, a positive integer for random number generation. Only one seed is needed when multiple chains are used, as the other chain's seeds are generated from the first chain's to prevent dependency among random number streams. By default, seed is ``random.randint(0, MAX_UINT)``. algorithm : {"NUTS", "HMC", "Fixed_param"}, optional One of algorithms that are implemented in Stan such as the No-U-Turn sampler (NUTS, Hoffman and Gelman 2011), static HMC, or ``Fixed_param``. Default is NUTS. init : {0, '0', 'random', function returning dict, list of dict}, optional Specifies how initial parameter values are chosen: 0 or '0' initializes all to be zero on the unconstrained support; 'random' generates random initial values; list of size equal to the number of chains (`chains`), where the list contains a dict with initial parameter values; function returning a dict with initial parameter values. The function may take an optional argument `chain_id`. sample_file : string, optional File name specifying where samples for *all* parameters and other saved quantities will be written. If not provided, no samples will be written. If the folder given is not writable, a temporary directory will be used. When there are multiple chains, an underscore and chain number are appended to the file name. By default do not write samples to file. verbose : boolean, False by default Indicates whether intermediate output should be piped to the console. This output may be useful for debugging. control : dict, optional A dictionary of parameters to control the sampler's behavior. Default values are used if control is not specified. The following are adaptation parameters for sampling algorithms. These are parameters used in Stan with similar names: - `adapt_engaged` : bool, default True - `adapt_gamma` : float, positive, default 0.05 - `adapt_delta` : float, between 0 and 1, default 0.8 - `adapt_kappa` : float, between default 0.75 - `adapt_t0` : float, positive, default 10 In addition, the algorithm HMC (called 'static HMC' in Stan) and NUTS share the following parameters: - `stepsize`: float or list of floats, positive - `stepsize_jitter`: float, between 0 and 1 - `metric` : str, {"unit_e", "diag_e", "dense_e"} - `inv_metric` : np.ndarray or str In addition, depending on which algorithm is used, different parameters can be set as in Stan for sampling. For the algorithm HMC we can set - `int_time`: float, positive For algorithm NUTS, we can set - `max_treedepth` : int, positive n_jobs : int, optional Sample in parallel. If -1 all CPUs are used. If 1, no parallel computing code is used at all, which is useful for debugging. Returns ------- fit : StanFit4Model Instance containing the fitted results. Other parameters ---------------- chain_id : int or iterable of int, optional `chain_id` can be a vector to specify the chain_id for all chains or an integer. For the former case, they should be unique. For the latter, the sequence of integers starting from the given `chain_id` are used for all chains. init_r : float, optional `init_r` is only valid if `init` == "random". In this case, the initial values are simulated from [-`init_r`, `init_r`] rather than using the default interval (see the manual of Stan). test_grad: bool, optional If `test_grad` is ``True``, Stan will not do any sampling. Instead, the gradient calculation is tested and printed out and the fitted StanFit4Model object is in test gradient mode. By default, it is ``False``. append_samples`: bool, optional refresh`: int, optional Argument `refresh` can be used to control how to indicate the progress during sampling (i.e. show the progress every \code{refresh} iterations). By default, `refresh` is `max(iter/10, 1)`. check_hmc_diagnostics : bool, optional After sampling run `pystan.diagnostics.check_hmc_diagnostics` function. Default is `True`. Checks for n_eff and rhat skipped if the flat parameter count is higher than 1000, unless user explicitly defines ``check_hmc_diagnostics=True``. Examples -------- >>> from pystan import StanModel >>> m = StanModel(model_code='parameters {real y;} model {y ~ normal(0,1);}') >>> m.sampling(iter=100) """ # NOTE: in this function, iter masks iter() the python function. # If this ever turns out to be a problem just add: # iter_ = iter # del iter # now builtins.iter is available if diagnostic_file is not None: raise NotImplementedError("diagnostic_file not supported yet") if data is None: data = {} if warmup is None: warmup = int(iter // 2) if not all(isinstance(arg, numbers.Integral) for arg in (iter, thin, warmup)): raise ValueError('only integer values allowed as `iter`, `thin`, and `warmup`.') algorithms = ("NUTS", "HMC", "Fixed_param") # , "Metropolis") algorithm = "NUTS" if algorithm is None else algorithm if algorithm not in algorithms: raise ValueError("Algorithm must be one of {}".format(algorithms)) if algorithm=="Fixed_param": if warmup > 0: logger.warning("`warmup=0` forced with `algorithm=\"Fixed_param\"`.") warmup = 0 elif algorithm == "NUTS" and warmup == 0: if (isinstance(control, dict) and control.get("adapt_engaged", True)) or control is None: raise ValueError("Warmup samples must be greater than 0 when adaptation is enabled (`adapt_engaged=True`)") seed = pystan.misc._check_seed(seed) fit = self.fit_class(data, seed) m_pars = fit._get_param_names() p_dims = fit._get_param_dims() if isinstance(pars, string_types): pars = [pars] if pars is not None and len(pars) > 0: # Implementation note: this does not set the params_oi for the # instances of stan_fit which actually make the calls to # call_sampler. This is because we need separate instances of # stan_fit in each thread/process. So update_param_oi needs to # be called in every stan_fit instance. fit._update_param_oi(pars) if not all(p in m_pars for p in pars): pars = np.asarray(pars) unmatched = pars[np.invert(np.in1d(pars, m_pars))] msg = "No parameter(s): {}; sampling not done." raise ValueError(msg.format(', '.join(unmatched))) else: pars = m_pars if chains < 1: raise ValueError("The number of chains is less than one; sampling" "not done.") check_hmc_diagnostics = kwargs.pop('check_hmc_diagnostics', None) # check that arguments in kwargs are valid valid_args = {"chain_id", "init_r", "test_grad", "append_samples", "refresh", "control"} for arg in kwargs: if arg not in valid_args: raise ValueError("Parameter `{}` is not recognized.".format(arg)) args_list = pystan.misc._config_argss(chains=chains, iter=iter, warmup=warmup, thin=thin, init=init, seed=seed, sample_file=sample_file, diagnostic_file=diagnostic_file, algorithm=algorithm, control=control, **kwargs) # number of samples saved after thinning warmup2 = 1 + (warmup - 1) // thin n_kept = 1 + (iter - warmup - 1) // thin n_save = n_kept + warmup2 if n_jobs is None: n_jobs = -1 # disable multiprocessing if we only have a single chain if chains == 1: n_jobs = 1 assert len(args_list) == chains call_sampler_args = izip(itertools.repeat(data), args_list, itertools.repeat(pars)) call_sampler_star = self.module._call_sampler_star ret_and_samples = _map_parallel(call_sampler_star, call_sampler_args, n_jobs) samples = [smpl for _, smpl in ret_and_samples] # _organize_inits strips out lp__ (RStan does it in this method) inits_used = pystan.misc._organize_inits([s['inits'] for s in samples], m_pars, p_dims) random_state = np.random.RandomState(args_list[0]['seed']) perm_lst = [random_state.permutation(int(n_kept)) for _ in range(chains)] fnames_oi = fit._get_param_fnames_oi() n_flatnames = len(fnames_oi) fit.sim = {'samples': samples, # rstan has this; name clashes with 'chains' in samples[0]['chains'] 'chains': len(samples), 'iter': iter, 'warmup': warmup, 'thin': thin, 'n_save': [n_save] * chains, 'warmup2': [warmup2] * chains, 'permutation': perm_lst, 'pars_oi': fit._get_param_names_oi(), 'dims_oi': fit._get_param_dims_oi(), 'fnames_oi': fnames_oi, 'n_flatnames': n_flatnames} fit.model_name = self.model_name fit.model_pars = m_pars fit.par_dims = p_dims fit.mode = 0 if not kwargs.get('test_grad') else 1 fit.inits = inits_used fit.stan_args = args_list fit.stanmodel = self fit.date = datetime.datetime.now() if args_list[0]["metric_file_flag"]: inv_metric_dir, _ = os.path.split(args_list[0]["metric_file"]) shutil.rmtree(inv_metric_dir, ignore_errors=True) # If problems are found in the fit, this will print diagnostic # messages. if (check_hmc_diagnostics is None and algorithm in ("NUTS", "HMC")) and fit.mode != 1: if n_flatnames > 1000: msg = "Maximum (flat) parameter count (1000) exceeded: " +\ "skipping diagnostic tests for n_eff and Rhat.\n" +\ "To run all diagnostics call pystan.check_hmc_diagnostics(fit)" logger.warning(msg) checks = ["divergence", "treedepth", "energy"] pystan.diagnostics.check_hmc_diagnostics(fit, checks=checks) # noqa else: pystan.diagnostics.check_hmc_diagnostics(fit) # noqa elif (check_hmc_diagnostics and algorithm in ("NUTS", "HMC")) and fit.mode != 1: pystan.diagnostics.check_hmc_diagnostics(fit) # noqa return fit def vb(self, data=None, pars=None, iter=10000, seed=None, init='random', sample_file=None, diagnostic_file=None, verbose=False, algorithm=None, **kwargs): """Call Stan's variational Bayes methods. Parameters ---------- data : dict A Python dictionary providing the data for the model. Variables for Stan are stored in the dictionary as expected. Variable names are the keys and the values are their associated values. Stan only accepts certain kinds of values; see Notes. pars : list of string, optional A list of strings indicating parameters of interest. By default all parameters specified in the model will be stored. seed : int or np.random.RandomState, optional The seed, a positive integer for random number generation. Only one seed is needed when multiple chains are used, as the other chain's seeds are generated from the first chain's to prevent dependency among random number streams. By default, seed is ``random.randint(0, MAX_UINT)``. sample_file : string, optional File name specifying where samples for *all* parameters and other saved quantities will be written. If not provided, samples will be written to a temporary file and read back in. If the folder given is not writable, a temporary directory will be used. When there are multiple chains, an underscore and chain number are appended to the file name. By default do not write samples to file. diagnostic_file : string, optional File name specifying where diagnostics for the variational fit will be written. iter : int, 10000 by default Positive integer specifying how many iterations for each chain including warmup. algorithm : {'meanfield', 'fullrank'} algorithm}{One of "meanfield" and "fullrank" indicating which variational inference algorithm is used. meanfield: mean-field approximation; fullrank: full-rank covariance. The default is 'meanfield'. verbose : boolean, False by default Indicates whether intermediate output should be piped to the console. This output may be useful for debugging. Other optional parameters, refer to the manuals for both CmdStan and Stan. - `iter`: the maximum number of iterations, defaults to 10000 - `grad_samples` the number of samples for Monte Carlo enumerate of gradients, defaults to 1. - `elbo_samples` the number of samples for Monte Carlo estimate of ELBO (objective function), defaults to 100. (ELBO stands for "the evidence lower bound".) - `eta` positive stepsize weighting parameters for variational inference but is ignored if adaptation is engaged, which is the case by default. - `adapt_engaged` flag indicating whether to automatically adapt the stepsize and defaults to True. - `tol_rel_obj`convergence tolerance on the relative norm of the objective, defaults to 0.01. - `eval_elbo`, evaluate ELBO every Nth iteration, defaults to 100 - `output_samples` number of posterior samples to draw and save, defaults to 1000. - `adapt_iter` number of iterations to adapt the stepsize if `adapt_engaged` is True and ignored otherwise. Returns ------- results : dict Dictionary containing information related to results. Examples -------- >>> from pystan import StanModel >>> m = StanModel(model_code='parameters {real y;} model {y ~ normal(0,1);}') >>> results = m.vb() >>> # results saved on disk in format inspired by CSV >>> print(results['args']['sample_file']) """ if data is None: data = {} algorithms = ("meanfield", "fullrank") algorithm = "meanfield" if algorithm is None else algorithm if algorithm not in algorithms: raise ValueError("Algorithm must be one of {}".format(algorithms)) seed = pystan.misc._check_seed(seed) fit = self.fit_class(data, seed) m_pars = fit._get_param_names() if isinstance(pars, string_types): pars = [pars] if pars is not None and len(pars) > 0: fit._update_param_oi(pars) if not all(p in m_pars for p in pars): pars = np.asarray(pars) unmatched = pars[np.invert(np.in1d(pars, m_pars))] msg = "No parameter(s): {}; sampling not done." raise ValueError(msg.format(', '.join(unmatched))) else: pars = m_pars if isinstance(init, numbers.Number): init = str(init) elif isinstance(init, Callable): init = init() elif not isinstance(init, Iterable) and \ not isinstance(init, string_types): raise ValueError("Wrong specification of initial values.") stan_args = dict(iter=iter, init=init, chain_id=1, seed=seed, method="variational", algorithm=algorithm) if sample_file is not None: stan_args['sample_file'] = pystan.misc._writable_sample_file(sample_file) else: stan_args['sample_file'] = os.path.join(tempfile.mkdtemp(), 'output.csv') if diagnostic_file is not None: stan_args['diagnostic_file'] = diagnostic_file # check that arguments in kwargs are valid valid_args = {'elbo_samples', 'eta', 'adapt_engaged', 'eval_elbo', 'grad_samples', 'output_samples', 'adapt_iter', 'tol_rel_obj'} for arg in kwargs: if arg not in valid_args: raise ValueError("Parameter `{}` is not recognized.".format(arg)) stan_args.update(kwargs) stan_args = pystan.misc._get_valid_stan_args(stan_args) ret, sample = fit._call_sampler(stan_args, pars_oi=pars) logger.warning('Automatic Differentiation Variational Inference (ADVI) is an EXPERIMENTAL ALGORITHM.') logger.warning('ADVI samples may be found on the filesystem in the file `{}`'.format(sample.args['sample_file'].decode('utf8'))) return OrderedDict([('args', sample.args), ('inits', sample.inits), ('sampler_params', sample.sampler_params), ('sampler_param_names', sample.sampler_param_names), ('mean_pars', sample.mean_pars), ('mean_par_names', sample.mean_par_names)])
43,933
Python
.py
874
38.479405
248
0.595576
stan-dev/pystan2
921
191
0
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,999
misc.py
stan-dev_pystan2/pystan/misc.py
"""PyStan utility functions These functions validate and organize data passed to and from the classes and functions defined in the file `stan_fit.hpp` and wrapped by the Cython file `stan_fit.pxd`. """ #----------------------------------------------------------------------------- # Copyright (c) 2013-2015, PyStan developers # # This file is licensed under Version 3.0 of the GNU General Public # License. See LICENSE for a text of the license. #----------------------------------------------------------------------------- # REF: rstan/rstan/R/misc.R from __future__ import unicode_literals, division from pystan._compat import PY2, string_types from collections import OrderedDict if PY2: from collections import Callable, Iterable, Sequence else: from collections.abc import Callable, Iterable, Sequence import inspect import io import itertools import logging import math from numbers import Number import os import random import re import sys import shutil import tempfile import time import numpy as np try: from scipy.stats.mstats import mquantiles except ImportError: from pystan.external.scipy.mstats import mquantiles import pystan.chains import pystan._misc from pystan.constants import (MAX_UINT, sampling_algo_t, optim_algo_t, variational_algo_t, sampling_metric_t, stan_args_method_t) logger = logging.getLogger('pystan') def stansummary(fit, pars=None, probs=(0.025, 0.25, 0.5, 0.75, 0.975), digits_summary=2): """ Summary statistic table. Parameters ---------- fit : StanFit4Model object pars : str or sequence of str, optional Parameter names. By default use all parameters probs : sequence of float, optional Quantiles. By default, (0.025, 0.25, 0.5, 0.75, 0.975) digits_summary : int, optional Number of significant digits. By default, 2 Returns ------- summary : string Table includes mean, se_mean, sd, probs_0, ..., probs_n, n_eff and Rhat. Examples -------- >>> model_code = 'parameters {real y;} model {y ~ normal(0,1);}' >>> m = StanModel(model_code=model_code, model_name="example_model") >>> fit = m.sampling() >>> print(stansummary(fit)) Inference for Stan model: example_model. 4 chains, each with iter=2000; warmup=1000; thin=1; post-warmup draws per chain=1000, total post-warmup draws=4000. mean se_mean sd 2.5% 25% 50% 75% 97.5% n_eff Rhat y 0.01 0.03 1.0 -2.01 -0.68 0.02 0.72 1.97 1330 1.0 lp__ -0.5 0.02 0.68 -2.44 -0.66 -0.24 -0.05-5.5e-4 1555 1.0 Samples were drawn using NUTS at Thu Aug 17 00:52:25 2017. For each parameter, n_eff is a crude measure of effective sample size, and Rhat is the potential scale reduction factor on split chains (at convergence, Rhat=1). """ if fit.mode == 1: return "Stan model '{}' is of mode 'test_grad';\n"\ "sampling is not conducted.".format(fit.model_name) elif fit.mode == 2: return "Stan model '{}' does not contain samples.".format(fit.model_name) n_kept = [s - w for s, w in zip(fit.sim['n_save'], fit.sim['warmup2'])] header = "Inference for Stan model: {}.\n".format(fit.model_name) header += "{} chains, each with iter={}; warmup={}; thin={}; \n" header = header.format(fit.sim['chains'], fit.sim['iter'], fit.sim['warmup'], fit.sim['thin'], sum(n_kept)) header += "post-warmup draws per chain={}, total post-warmup draws={}.\n\n" header = header.format(n_kept[0], sum(n_kept)) footer = "\n\nSamples were drawn using {} at {}.\n"\ "For each parameter, n_eff is a crude measure of effective sample size,\n"\ "and Rhat is the potential scale reduction factor on split chains (at \n"\ "convergence, Rhat=1)." sampler = fit.sim['samples'][0]['args']['sampler_t'] date = fit.date.strftime('%c') # %c is locale's representation footer = footer.format(sampler, date) s = _summary(fit, pars, probs) body = _array_to_table(s['summary'], s['summary_rownames'], s['summary_colnames'], digits_summary) return header + body + footer def _print_stanfit(fit, pars=None, probs=(0.025, 0.25, 0.5, 0.75, 0.975), digits_summary=2): # warning added in PyStan 2.17.0 logger.warning('Function `_print_stanfit` is deprecated and will be removed in a future version. '\ 'Use `stansummary` instead.', DeprecationWarning) return stansummary(fit, pars=pars, probs=probs, digits_summary=digits_summary) def _array_to_table(arr, rownames, colnames, n_digits): """Print an array with row and column names Example: mean se_mean sd 2.5% 25% 50% 75% 97.5% n_eff Rhat beta[1,1] 0.0 0.0 1.0 -2.0 -0.7 0.0 0.7 2.0 4000 1 beta[1,2] 0.0 0.0 1.0 -2.1 -0.7 0.0 0.7 2.0 4000 1 beta[2,1] 0.0 0.0 1.0 -2.0 -0.7 0.0 0.7 2.0 4000 1 beta[2,2] 0.0 0.0 1.0 -1.9 -0.6 0.0 0.7 2.0 4000 1 lp__ -4.2 0.1 2.1 -9.4 -5.4 -3.8 -2.7 -1.2 317 1 """ assert arr.shape == (len(rownames), len(colnames)) rownames_maxwidth = max(len(n) for n in rownames) max_col_width = 7 min_col_width = 5 max_col_header_num_width = [max(max_col_width, max(len(n) + 1, min_col_width)) for n in colnames] rows = [] for row in arr: row_nums = [] for j, (num, width) in enumerate(zip(row, max_col_header_num_width)): if colnames[j] == "n_eff": num = int(round(num, 0)) if not np.isnan(num) else num num = _format_number(num, n_digits, max_col_width - 1) row_nums.append(num) if len(num) + 1 > max_col_header_num_width[j]: max_col_header_num_width[j] = len(num) + 1 rows.append(row_nums) widths = [rownames_maxwidth] + max_col_header_num_width header = '{:>{width}}'.format('', width=widths[0]) for name, width in zip(colnames, widths[1:]): header += '{name:>{width}}'.format(name=name, width=width) lines = [header] for rowname, row in zip(rownames, rows): line = '{name:{width}}'.format(name=rowname, width=widths[0]) for j, (num, width) in enumerate(zip(row, widths[1:])): line += '{num:>{width}}'.format(num=num, width=width) lines.append(line) return '\n'.join(lines) def _number_width(n): """Calculate the width in characters required to print a number For example, -1024 takes 5 characters. -0.034 takes 6 characters. """ return len(str(n)) def _format_number_si(num, n_signif_figures): """Format a number using scientific notation to given significant figures""" if math.isnan(num) or math.isinf(num): return str(num) leading, exp = '{:E}'.format(num).split('E') leading = round(float(leading), n_signif_figures - 1) exp = exp[:1] + exp[2:] if exp[1] == '0' else exp formatted = '{}e{}'.format(leading, exp.lstrip('+')) return formatted def _format_number(num, n_signif_figures, max_width): """Format a number as a string while obeying space constraints. `n_signif_figures` is the minimum number of significant figures expressed `max_width` is the maximum width in characters allowed """ if max_width < 6: raise NotImplementedError("Guaranteed formatting in fewer than 6 characters not supported.") if math.isnan(num) or math.isinf(num): return str(num) # add 0.5 to prevent log(0) errors; only affects n_digits calculation for num > 0 n_digits = lambda num: math.floor(math.log10(abs(num) + 0.5)) + 1 if abs(num) > 10**-n_signif_figures and n_digits(num) <= max_width - n_signif_figures: return str(round(num, n_signif_figures))[:max_width].rstrip('.') elif _number_width(num) <= max_width: if n_digits(num) >= n_signif_figures: # the int() is necessary for consistency between Python 2 and 3 return str(int(round(num))) else: return str(num) else: return _format_number_si(num, n_signif_figures) def _summary(fit, pars=None, probs=None, **kwargs): """Summarize samples (compute mean, SD, quantiles) in all chains. REF: stanfit-class.R summary method Parameters ---------- fit : StanFit4Model object pars : str or sequence of str, optional Parameter names. By default use all parameters probs : sequence of float, optional Quantiles. By default, (0.025, 0.25, 0.5, 0.75, 0.975) Returns ------- summaries : OrderedDict of array Array indexed by 'summary' has dimensions (num_params, num_statistics). Parameters are unraveled in *row-major order*. Statistics include: mean, se_mean, sd, probs_0, ..., probs_n, n_eff, and Rhat. Array indexed by 'c_summary' breaks down the statistics by chain and has dimensions (num_params, num_statistics_c_summary, num_chains). Statistics for `c_summary` are the same as for `summary` with the exception that se_mean, n_eff, and Rhat are absent. Row names and column names are also included in the OrderedDict. """ if fit.mode == 1: msg = "Stan model {} is of mode 'test_grad'; sampling is not conducted." msg = msg.format(fit.model_name) raise ValueError(msg) elif fit.mode == 2: msg = "Stan model {} contains no samples.".format(fit.model_name) raise ValueError(msg) if fit.sim['n_save'] == fit.sim['warmup2']: msg = "Stan model {} contains no samples.".format(fit.model_name) raise ValueError(msg) # rstan checks for cached summaries here if pars is None: pars = fit.sim['pars_oi'] elif isinstance(pars, string_types): pars = [pars] pars = _remove_empty_pars(pars, fit.sim['pars_oi'], fit.sim['dims_oi']) if probs is None: probs = (0.025, 0.25, 0.5, 0.75, 0.975) ss = _summary_sim(fit.sim, pars, probs) # TODO: include sem, ess and rhat: ss['ess'], ss['rhat'] s1 = np.column_stack([ss['msd'][:, 0], ss['sem'], ss['msd'][:, 1], ss['quan'], ss['ess'], ss['rhat']]) s1_rownames = ss['c_msd_names']['parameters'] s1_colnames = ((ss['c_msd_names']['stats'][0],) + ('se_mean',) + (ss['c_msd_names']['stats'][1],) + ss['c_quan_names']['stats'] + ('n_eff', 'Rhat')) s2 = _combine_msd_quan(ss['c_msd'], ss['c_quan']) s2_rownames = ss['c_msd_names']['parameters'] s2_colnames = ss['c_msd_names']['stats'] + ss['c_quan_names']['stats'] return OrderedDict(summary=s1, c_summary=s2, summary_rownames=s1_rownames, summary_colnames=s1_colnames, c_summary_rownames=s2_rownames, c_summary_colnames=s2_colnames) def _combine_msd_quan(msd, quan): """Combine msd and quantiles in chain summary Parameters ---------- msd : array of shape (num_params, 2, num_chains) mean and sd for chains cquan : array of shape (num_params, num_quan, num_chains) quantiles for chains Returns ------- msdquan : array of shape (num_params, 2 + num_quan, num_chains) """ dim1 = msd.shape n_par, _, n_chains = dim1 ll = [] for i in range(n_chains): a1 = msd[:, :, i] a2 = quan[:, :, i] ll.append(np.column_stack([a1, a2])) msdquan = np.dstack(ll) return msdquan def _summary_sim(sim, pars, probs): """Summarize chains together and separately REF: rstan/rstan/R/misc.R Parameters are unraveled in *column-major order*. Parameters ---------- sim : dict dict from from a stanfit fit object, i.e., fit['sim'] pars : Iterable of str parameter names probs : Iterable of probs desired quantiles Returns ------- summaries : OrderedDict of array This dictionary contains the following arrays indexed by the keys given below: - 'msd' : array of shape (num_params, 2) with mean and sd - 'sem' : array of length num_params with standard error for the mean - 'c_msd' : array of shape (num_params, 2, num_chains) - 'quan' : array of shape (num_params, num_quan) - 'c_quan' : array of shape (num_params, num_quan, num_chains) - 'ess' : array of shape (num_params, 1) - 'rhat' : array of shape (num_params, 1) Note ---- `_summary_sim` has the parameters in *column-major* order whereas `_summary` gives them in *row-major* order. (This follows RStan.) """ # NOTE: this follows RStan rather closely. Some of the calculations here probs_len = len(probs) n_chains = len(sim['samples']) # tidx is a dict with keys that are parameters and values that are their # indices using column-major ordering tidx = _pars_total_indexes(sim['pars_oi'], sim['dims_oi'], sim['fnames_oi'], pars) tidx_colm = [tidx[par] for par in pars] tidx_colm = list(itertools.chain(*tidx_colm)) # like R's unlist() tidx_rowm = [tidx[par+'_rowmajor'] for par in pars] tidx_rowm = list(itertools.chain(*tidx_rowm)) tidx_len = len(tidx_colm) lmsdq = [_get_par_summary(sim, i, probs) for i in tidx_colm] msd = np.row_stack([x['msd'] for x in lmsdq]) quan = np.row_stack([x['quan'] for x in lmsdq]) probs_str = tuple(["{:g}%".format(100*p) for p in probs]) msd = msd.reshape(tidx_len, 2, order='F') quan = quan.reshape(tidx_len, probs_len, order='F') c_msd = np.row_stack([x['c_msd'] for x in lmsdq]) c_quan = np.row_stack([x['c_quan'] for x in lmsdq]) c_msd = c_msd.reshape(tidx_len, 2, n_chains, order='F') c_quan = c_quan.reshape(tidx_len, probs_len, n_chains, order='F') sim_attr_args = sim.get('args', None) if sim_attr_args is None: cids = list(range(n_chains)) else: cids = [x['chain_id'] for x in sim_attr_args] c_msd_names = dict(parameters=np.asarray(sim['fnames_oi'])[tidx_colm], stats=("mean", "sd"), chains=tuple("chain:{}".format(cid) for cid in cids)) c_quan_names = dict(parameters=np.asarray(sim['fnames_oi'])[tidx_colm], stats=probs_str, chains=tuple("chain:{}".format(cid) for cid in cids)) ess_and_rhat = np.array([pystan.chains.ess_and_splitrhat(sim, n) for n in tidx_colm]) ess, rhat = [arr.ravel() for arr in np.hsplit(ess_and_rhat, 2)] return dict(msd=msd, c_msd=c_msd, c_msd_names=c_msd_names, quan=quan, c_quan=c_quan, c_quan_names=c_quan_names, sem=msd[:, 1] / np.sqrt(ess), ess=ess, rhat=rhat, row_major_idx=tidx_rowm, col_major_idx=tidx_colm) def _get_par_summary(sim, n, probs): """Summarize chains merged and individually Parameters ---------- sim : dict from stanfit object n : int parameter index probs : iterable of int quantiles Returns ------- summary : dict Dictionary containing summaries """ # _get_samples gets chains for nth parameter ss = _get_samples(n, sim, inc_warmup=False) msdfun = lambda chain: (np.mean(chain), np.std(chain, ddof=1)) qfun = lambda chain: mquantiles(chain, probs) c_msd = np.array([msdfun(s) for s in ss]).flatten() c_quan = np.array([qfun(s) for s in ss]).flatten() ass = np.asarray(ss).flatten() msd = np.asarray(msdfun(ass)) quan = qfun(np.asarray(ass)) return dict(msd=msd, quan=quan, c_msd=c_msd, c_quan=c_quan) def _split_data(data): data_r = {} data_i = {} # data_r and data_i are going to be converted into C++ objects of # type: map<string, pair<vector<double>, vector<size_t>>> and # map<string, pair<vector<int>, vector<size_t>>> so prepare # them accordingly. for k, v in data.items(): if np.issubdtype(np.asarray(v).dtype, np.integer): data_i.update({k.encode('utf-8'): np.asarray(v, dtype=int)}) elif np.issubdtype(np.asarray(v).dtype, np.floating): data_r.update({k.encode('utf-8'): np.asarray(v, dtype=float)}) else: msg = "Variable {} is neither int nor float nor list/array thereof" raise ValueError(msg.format(k)) return data_r, data_i def _config_argss(chains, iter, warmup, thin, init, seed, sample_file, diagnostic_file, algorithm, control, **kwargs): # After rstan/rstan/R/misc.R (config_argss) iter = int(iter) if iter < 1: raise ValueError("`iter` should be a positive integer.") thin = int(thin) if thin < 1 or thin > iter: raise ValueError("`thin should be a positive integer " "less than `iter`.") warmup = max(0, int(warmup)) if warmup > iter: raise ValueError("`warmup` should be an integer less than `iter`.") chains = int(chains) if chains < 1: raise ValueError("`chains` should be a positive integer.") iters = [iter] * chains thins = [thin] * chains warmups = [warmup] * chains # use chain_id argument if specified if kwargs.get('chain_id') is None: chain_id = list(range(chains)) else: chain_id = [int(id) for id in kwargs['chain_id']] if len(set(chain_id)) != len(chain_id): raise ValueError("`chain_id` has duplicated elements.") chain_id_len = len(chain_id) if chain_id_len >= chains: chain_id = chain_id else: chain_id = chain_id + [max(chain_id) + 1 + i for i in range(chains - chain_id_len)] del kwargs['chain_id'] inits_specified = False # slight difference here from rstan; Python's lists are not typed. if isinstance(init, Number): init = str(init) if isinstance(init, string_types): if init in ['0', 'random']: inits = [init] * chains else: inits = ["random"] * chains inits_specified = True if not inits_specified and isinstance(init, Callable): ## test if function takes argument named "chain_id" if "chain_id" in ( inspect.getfullargspec(init).args if hasattr(inspect, "getfullargspec") else inspect.getargspec(init).args ): inits = [init(chain_id=id) for id in chain_id] else: inits = [init()] * chains if not isinstance(inits[0], dict): raise ValueError("The function specifying initial values must " "return a dictionary.") inits_specified = True if not inits_specified and isinstance(init, Sequence): if len(init) != chains: raise ValueError("Length of list of initial values does not " "match number of chains.") if not all([isinstance(d, dict) for d in init]): raise ValueError("Initial value list is not a sequence of " "dictionaries.") inits = init inits_specified = True if not inits_specified: raise ValueError("Invalid specification of initial values.") ## only one seed is needed by virtue of the RNG seed = _check_seed(seed) kwargs['method'] = "test_grad" if kwargs.get('test_grad') else 'sampling' all_control = { "adapt_engaged", "adapt_gamma", "adapt_delta", "adapt_kappa", "adapt_t0", "adapt_init_buffer", "adapt_term_buffer", "adapt_window", "stepsize", "stepsize_jitter", "metric", "int_time", "max_treedepth", "epsilon", "error", "inv_metric" } all_metrics = {"unit_e", "diag_e", "dense_e"} if control is not None: if not isinstance(control, dict): raise ValueError("`control` must be a dictionary") if not all(key in all_control for key in control): unknown = set(control) - all_control raise ValueError("`control` contains unknown parameters: {}".format(unknown)) if control.get('metric') and control['metric'] not in all_metrics: raise ValueError("`metric` must be one of {}".format(all_metrics)) kwargs['control'] = control argss = [dict() for _ in range(chains)] for i in range(chains): argss[i] = dict(chain_id=chain_id[i], iter=iters[i], thin=thins[i], seed=seed, warmup=warmups[i], init=inits[i], algorithm=algorithm) if sample_file is not None: sample_file = _writable_sample_file(sample_file) if chains == 1: argss[0]['sample_file'] = sample_file elif chains > 1: for i in range(chains): argss[i]['sample_file'] = _append_id(sample_file, i) if diagnostic_file is not None: raise NotImplementedError("diagnostic_file not implemented yet.") if control is not None and "inv_metric" in control: inv_metric = control.pop("inv_metric") metric_dir = tempfile.mkdtemp() if isinstance(inv_metric, dict): for i in range(chains): if i not in inv_metric: msg = "Invalid value for init_inv_metric found (keys={}). " \ "Use either a dictionary with chain_index as keys (0,1,2,...)" \ "or ndarray." msg = msg.format(list(metric_file.keys())) raise ValueError(msg) mass_values = inv_metric[i] metric_filename = "inv_metric_chain_{}.Rdata".format(str(i)) metric_path = os.path.join(metric_dir, metric_filename) if isinstance(mass_values, str): if not os.path.exists(mass_values): raise ValueError("inverse metric file was not found: {}".format(mass_values)) shutil.copy(mass_values, metric_path) else: stan_rdump(dict(inv_metric=mass_values), metric_path) argss[i]['metric_file'] = metric_path elif isinstance(inv_metric, str): if not os.path.exists(inv_metric): raise ValueError("inverse metric file was not found: {}".format(inv_metric)) for i in range(chains): metric_filename = "inv_metric_chain_{}.Rdata".format(str(i)) metric_path = os.path.join(metric_dir, metric_filename) shutil.copy(inv_metric, metric_path) argss[i]['metric_file'] = metric_path elif isinstance(inv_metric, Iterable): metric_filename = "inv_metric_chain_0.Rdata" metric_path = os.path.join(metric_dir, metric_filename) stan_rdump(dict(inv_metric=inv_metric), metric_path) argss[0]['metric_file'] = metric_path for i in range(1, chains): metric_filename = "inv_metric_chain_{}.Rdata".format(str(i)) metric_path = os.path.join(metric_dir, metric_filename) shutil.copy(argss[i-1]['metric_file'], metric_path) argss[i]['metric_file'] = metric_path else: argss[i]['metric_file'] = "" stepsize_list = None if "control" in kwargs and "stepsize" in kwargs["control"]: if isinstance(kwargs["control"]["stepsize"], Sequence): stepsize_list = kwargs["control"]["stepsize"] if len(kwargs["control"]["stepsize"]) == 1: kwargs["control"]["stepsize"] = kwargs["control"]["stepsize"][0] elif len(kwargs["control"]["stepsize"]) != chains: raise ValueError("stepsize length needs to equal chain count.") else: stepsize_list = kwargs["control"]["stepsize"] for i in range(chains): argss[i].update(kwargs) if stepsize_list is not None: argss[i]["control"]["stepsize"] = stepsize_list[i] argss[i] = _get_valid_stan_args(argss[i]) return argss def _get_valid_stan_args(base_args=None): """Fill in default values for arguments not provided in `base_args`. RStan does this in C++ in stan_args.hpp in the stan_args constructor. It seems easier to deal with here in Python. """ args = base_args.copy() if base_args is not None else {} # Default arguments, c.f. rstan/rstan/inst/include/rstan/stan_args.hpp # values in args are going to be converted into C++ objects so # prepare them accordingly---e.g., unicode -> bytes -> std::string args['chain_id'] = args.get('chain_id', 1) args['append_samples'] = args.get('append_samples', False) if args.get('method') is None or args['method'] == "sampling": args['method'] = stan_args_method_t.SAMPLING elif args['method'] == "optim": args['method'] = stan_args_method_t.OPTIM elif args['method'] == 'test_grad': args['method'] = stan_args_method_t.TEST_GRADIENT elif args['method'] == 'variational': args['method'] = stan_args_method_t.VARIATIONAL else: args['method'] = stan_args_method_t.SAMPLING args['sample_file_flag'] = True if args.get('sample_file') else False args['sample_file'] = args.get('sample_file', '').encode('ascii') args['diagnostic_file_flag'] = True if args.get('diagnostic_file') else False args['diagnostic_file'] = args.get('diagnostic_file', '').encode('ascii') # NB: argument named "seed" not "random_seed" args['random_seed'] = args.get('seed', int(time.time())) args['metric_file_flag'] = True if args.get('metric_file') else False args['metric_file'] = args.get('metric_file', '').encode('ascii') if args['method'] == stan_args_method_t.VARIATIONAL: # variational does not use a `control` map like sampling args['ctrl'] = args.get('ctrl', dict(variational=dict())) args['ctrl']['variational']['iter'] = args.get('iter', 10000) args['ctrl']['variational']['grad_samples'] = args.get('grad_samples', 1) args['ctrl']['variational']['elbo_samples'] = args.get('elbo_samples', 100) args['ctrl']['variational']['eval_elbo'] = args.get('eval_elbo', 100) args['ctrl']['variational']['output_samples'] = args.get('output_samples', 1000) args['ctrl']['variational']['adapt_iter'] = args.get('adapt_iter', 50) args['ctrl']['variational']['eta'] = args.get('eta', 1.0) args['ctrl']['variational']['adapt_engaged'] = args.get('adapt_engaged', True) args['ctrl']['variational']['tol_rel_obj'] = args.get('tol_rel_obj', 0.01) if args.get('algorithm', '').lower() == 'fullrank': args['ctrl']['variational']['algorithm'] = variational_algo_t.FULLRANK else: args['ctrl']['variational']['algorithm'] = variational_algo_t.MEANFIELD elif args['method'] == stan_args_method_t.SAMPLING: args['ctrl'] = args.get('ctrl', dict(sampling=dict())) args['ctrl']['sampling']['iter'] = iter = args.get('iter', 2000) args['ctrl']['sampling']['warmup'] = warmup = args.get('warmup', iter // 2) calculated_thin = iter - warmup // 1000 if calculated_thin < 1: calculated_thin = 1 args['ctrl']['sampling']['thin'] = thin = args.get('thin', calculated_thin) args['ctrl']['sampling']['save_warmup'] = True # always True now args['ctrl']['sampling']['iter_save_wo_warmup'] = iter_save_wo_warmup = 1 + (iter - warmup - 1) // thin args['ctrl']['sampling']['iter_save'] = iter_save_wo_warmup + 1 + (warmup - 1) // thin refresh = iter // 10 if iter >= 20 else 1 args['ctrl']['sampling']['refresh'] = args.get('refresh', refresh) ctrl_lst = args.get('control', dict()) ctrl_sampling = args['ctrl']['sampling'] # NB: if these defaults change, remember to update docstrings ctrl_sampling['adapt_engaged'] = ctrl_lst.get("adapt_engaged", True) ctrl_sampling['adapt_gamma'] = ctrl_lst.get("adapt_gamma", 0.05) ctrl_sampling['adapt_delta'] = ctrl_lst.get("adapt_delta", 0.8) ctrl_sampling['adapt_kappa'] = ctrl_lst.get("adapt_kappa", 0.75) ctrl_sampling['adapt_t0'] = ctrl_lst.get("adapt_t0", 10.0) ctrl_sampling['adapt_init_buffer'] = ctrl_lst.get("adapt_init_buffer", 75) ctrl_sampling['adapt_term_buffer'] = ctrl_lst.get("adapt_term_buffer", 50) ctrl_sampling['adapt_window'] = ctrl_lst.get("adapt_window", 25) ctrl_sampling['stepsize'] = ctrl_lst.get("stepsize", 1.0) ctrl_sampling['stepsize_jitter'] = ctrl_lst.get("stepsize_jitter", 0.0) algorithm = args.get('algorithm', 'NUTS') if algorithm == 'HMC': args['ctrl']['sampling']['algorithm'] = sampling_algo_t.HMC elif algorithm == 'Metropolis': args['ctrl']['sampling']['algorithm'] = sampling_algo_t.Metropolis elif algorithm == 'NUTS': args['ctrl']['sampling']['algorithm'] = sampling_algo_t.NUTS elif algorithm == 'Fixed_param': args['ctrl']['sampling']['algorithm'] = sampling_algo_t.Fixed_param # TODO: Setting adapt_engaged to False solves the segfault reported # in issue #200; find out why this hack is needed. RStan deals with # the setting elsewhere. ctrl_sampling['adapt_engaged'] = False else: msg = "Invalid value for parameter algorithm (found {}; " \ "require HMC, Metropolis, NUTS, or Fixed_param).".format(algorithm) raise ValueError(msg) metric = ctrl_lst.get('metric', 'diag_e') if metric == "unit_e": ctrl_sampling['metric'] = sampling_metric_t.UNIT_E elif metric == "diag_e": ctrl_sampling['metric'] = sampling_metric_t.DIAG_E elif metric == "dense_e": ctrl_sampling['metric'] = sampling_metric_t.DENSE_E if ctrl_sampling['algorithm'] == sampling_algo_t.NUTS: ctrl_sampling['max_treedepth'] = ctrl_lst.get("max_treedepth", 10) elif ctrl_sampling['algorithm'] == sampling_algo_t.HMC: ctrl_sampling['int_time'] = ctrl_lst.get('int_time', 6.283185307179586476925286766559005768e+00) elif ctrl_sampling['algorithm'] == sampling_algo_t.Metropolis: pass elif ctrl_sampling['algorithm'] == sampling_algo_t.Fixed_param: pass elif args['method'] == stan_args_method_t.OPTIM: args['ctrl'] = args.get('ctrl', dict(optim=dict())) args['ctrl']['optim']['iter'] = iter = args.get('iter', 2000) algorithm = args.get('algorithm', 'LBFGS') if algorithm == "BFGS": args['ctrl']['optim']['algorithm'] = optim_algo_t.BFGS elif algorithm == "Newton": args['ctrl']['optim']['algorithm'] = optim_algo_t.Newton elif algorithm == "LBFGS": args['ctrl']['optim']['algorithm'] = optim_algo_t.LBFGS else: msg = "Invalid value for parameter algorithm (found {}; " \ "require (L)BFGS or Newton).".format(algorithm) raise ValueError(msg) refresh = args['ctrl']['optim']['iter'] // 100 args['ctrl']['optim']['refresh'] = args.get('refresh', refresh) if args['ctrl']['optim']['refresh'] < 1: args['ctrl']['optim']['refresh'] = 1 args['ctrl']['optim']['init_alpha'] = args.get("init_alpha", 0.001) args['ctrl']['optim']['tol_obj'] = args.get("tol_obj", 1e-12) args['ctrl']['optim']['tol_grad'] = args.get("tol_grad", 1e-8) args['ctrl']['optim']['tol_param'] = args.get("tol_param", 1e-8) args['ctrl']['optim']['tol_rel_obj'] = args.get("tol_rel_obj", 1e4) args['ctrl']['optim']['tol_rel_grad'] = args.get("tol_rel_grad", 1e7) args['ctrl']['optim']['save_iterations'] = args.get("save_iterations", True) args['ctrl']['optim']['history_size'] = args.get("history_size", 5) elif args['method'] == stan_args_method_t.TEST_GRADIENT: args['ctrl'] = args.get('ctrl', dict(test_grad=dict())) args['ctrl']['test_grad']['epsilon'] = args.get("epsilon", 1e-6) args['ctrl']['test_grad']['error'] = args.get("error", 1e-6) init = args.get('init', "random") if isinstance(init, string_types): args['init'] = init.encode('ascii') elif isinstance(init, dict): args['init'] = "user".encode('ascii') # while the name is 'init_list', it is a dict; the name comes from rstan, # where list elements can have names args['init_list'] = init else: args['init'] = "random".encode('ascii') args['init_radius'] = args.get('init_r', 2.0) if (args['init_radius'] <= 0): args['init'] = b"0" # 0 initialization requires init_radius = 0 if (args['init'] == b"0" or args['init'] == 0): args['init_radius'] = 0.0 args['enable_random_init'] = args.get('enable_random_init', True) # RStan calls validate_args() here return args def _check_seed(seed): """If possible, convert `seed` into a valid form for Stan (an integer between 0 and MAX_UINT, inclusive). If not possible, use a random seed instead and raise a warning if `seed` was not provided as `None`. """ if isinstance(seed, (Number, string_types)): try: seed = int(seed) except ValueError: logger.warning("`seed` must be castable to an integer") seed = None else: if seed < 0: logger.warning("`seed` may not be negative") seed = None elif seed > MAX_UINT: raise ValueError('`seed` is too large; max is {}'.format(MAX_UINT)) elif isinstance(seed, np.random.RandomState): seed = seed.randint(0, MAX_UINT) elif seed is not None: logger.warning('`seed` has unexpected type') seed = None if seed is None: seed = random.randint(0, MAX_UINT) return seed def _organize_inits(inits, pars, dims): """Obtain a list of initial values for each chain. The parameter 'lp__' will be removed from the chains. Parameters ---------- inits : list list of initial values for each chain. pars : list of str dims : list of list of int from (via cython conversion) vector[vector[uint]] dims Returns ------- inits : list of dict """ try: idx_of_lp = pars.index('lp__') del pars[idx_of_lp] del dims[idx_of_lp] except ValueError: pass starts = _calc_starts(dims) return [_par_vector2dict(init, pars, dims, starts) for init in inits] def _calc_starts(dims): """Calculate starting indexes Parameters ---------- dims : list of list of int from (via cython conversion) vector[vector[uint]] dims Examples -------- >>> _calc_starts([[8, 2], [5], [6, 2]]) [0, 16, 21] """ # NB: Python uses 0-indexing; R uses 1-indexing. l = len(dims) s = [np.prod(d) for d in dims] starts = np.cumsum([0] + s)[0:l].tolist() # coerce things into ints before returning return [int(i) for i in starts] def _par_vector2dict(v, pars, dims, starts=None): """Turn a vector of samples into an OrderedDict according to param dims. Parameters ---------- y : list of int or float pars : list of str parameter names dims : list of list of int list of dimensions of parameters Returns ------- d : dict Examples -------- >>> v = list(range(31)) >>> dims = [[5], [5, 5], []] >>> pars = ['mu', 'Phi', 'eta'] >>> _par_vector2dict(v, pars, dims) # doctest: +ELLIPSIS OrderedDict([('mu', array([0, 1, 2, 3, 4])), ('Phi', array([[ 5, ... """ if starts is None: starts = _calc_starts(dims) d = OrderedDict() for i in range(len(pars)): l = int(np.prod(dims[i])) start = starts[i] end = start + l y = np.asarray(v[start:end]) if len(dims[i]) > 1: y = y.reshape(dims[i], order='F') # 'F' = Fortran, column-major d[pars[i]] = y.squeeze() if y.shape == (1,) else y return d def _check_pars(allpars, pars): if len(pars) == 0: raise ValueError("No parameter specified (`pars` is empty).") for par in pars: if par not in allpars: raise ValueError("No parameter {}".format(par)) def _pars_total_indexes(names, dims, fnames, pars): """Obtain all the indexes for parameters `pars` in the sequence of names. `names` references variables that are in column-major order Parameters ---------- names : sequence of str All the parameter names. dim : sequence of list of int Dimensions, in same order as `names`. fnames : sequence of str All the scalar parameter names pars : sequence of str The parameters of interest. It is assumed all elements in `pars` are in `names`. Returns ------- indexes : OrderedDict of list of int Dictionary uses parameter names as keys. Indexes are column-major order. For each parameter there is also a key `par`+'_rowmajor' that stores the row-major indexing. Note ---- Inside each parameter (vector or array), the sequence uses column-major ordering. For example, if we have parameters alpha and beta, having dimensions [2, 2] and [2, 3] respectively, the whole parameter sequence is alpha[0,0], alpha[1,0], alpha[0, 1], alpha[1, 1], beta[0, 0], beta[1, 0], beta[0, 1], beta[1, 1], beta[0, 2], beta[1, 2]. In short, like R matrix(..., bycol=TRUE). Example ------- >>> pars_oi = ['mu', 'tau', 'eta', 'theta', 'lp__'] >>> dims_oi = [[], [], [8], [8], []] >>> fnames_oi = ['mu', 'tau', 'eta[1]', 'eta[2]', 'eta[3]', 'eta[4]', ... 'eta[5]', 'eta[6]', 'eta[7]', 'eta[8]', 'theta[1]', 'theta[2]', ... 'theta[3]', 'theta[4]', 'theta[5]', 'theta[6]', 'theta[7]', ... 'theta[8]', 'lp__'] >>> pars = ['mu', 'tau', 'eta', 'theta', 'lp__'] >>> _pars_total_indexes(pars_oi, dims_oi, fnames_oi, pars) ... # doctest: +ELLIPSIS OrderedDict([('mu', (0,)), ('tau', (1,)), ('eta', (2, 3, ... """ starts = _calc_starts(dims) def par_total_indexes(par): # if `par` is a scalar, it will match one of `fnames` if par in fnames: p = fnames.index(par) idx = tuple([p]) return OrderedDict([(par, idx), (par+'_rowmajor', idx)]) else: p = names.index(par) idx = starts[p] + np.arange(np.prod(dims[p])) idx_rowmajor = starts[p] + _idx_col2rowm(dims[p]) return OrderedDict([(par, tuple(idx)), (par+'_rowmajor', tuple(idx_rowmajor))]) indexes = OrderedDict() for par in pars: indexes.update(par_total_indexes(par)) return indexes def _idx_col2rowm(d): """Generate indexes to change from col-major to row-major ordering""" if 0 == len(d): return 1 if 1 == len(d): return np.arange(d[0]) # order='F' indicates column-major ordering idx = np.array(np.arange(np.prod(d))).reshape(d, order='F').T return idx.flatten(order='F') def _get_kept_samples(n, sim): """Get samples to be kept from the chain(s) for `n`th parameter. Samples from different chains are merged. Parameters ---------- n : int sim : dict A dictionary tied to a StanFit4Model instance. Returns ------- samples : array Samples being kept, permuted and in column-major order. """ return pystan._misc.get_kept_samples(n, sim) def _get_samples(n, sim, inc_warmup=True): # NOTE: this is in stanfit-class.R in RStan (rather than misc.R) """Get chains for `n`th parameter. Parameters ---------- n : int sim : dict A dictionary tied to a StanFit4Model instance. Returns ------- chains : list of array Each chain is an element in the list. """ return pystan._misc.get_samples(n, sim, inc_warmup) def _redirect_stderr(): """Redirect stderr for subprocesses to /dev/null Silences copious compilation messages. Returns ------- orig_stderr : file descriptor Copy of original stderr file descriptor """ sys.stderr.flush() stderr_fileno = sys.stderr.fileno() orig_stderr = os.dup(stderr_fileno) devnull = os.open(os.devnull, os.O_WRONLY) os.dup2(devnull, stderr_fileno) os.close(devnull) return orig_stderr def _has_fileno(stream): """Returns whether the stream object seems to have a working fileno() Tells whether _redirect_stderr is likely to work. Parameters ---------- stream : IO stream object Returns ------- has_fileno : bool True if stream.fileno() exists and doesn't raise OSError or UnsupportedOperation """ try: stream.fileno() except (AttributeError, OSError, IOError, io.UnsupportedOperation): return False return True def _append_id(file, id, suffix='.csv'): fname = os.path.basename(file) fpath = os.path.dirname(file) fname2 = re.sub(r'\.csv\s*$', '_{}.csv'.format(id), fname) if fname2 == fname: fname2 = '{}_{}.csv'.format(fname, id) return os.path.join(fpath, fname2) def _writable_sample_file(file, warn=True, wfun=None): """Check to see if file is writable, if not use temporary file""" if wfun is None: wfun = lambda x, y: '"{}" is not writable; use "{}" instead'.format(x, y) dir = os.path.dirname(file) dir = os.getcwd() if dir == '' else dir if os.access(dir, os.W_OK): return file else: dir2 = tempfile.mkdtemp() if warn: logger.warning(wfun(dir, dir2)) return os.path.join(dir2, os.path.basename(file)) def is_legal_stan_vname(name): stan_kw1 = ('for', 'in', 'while', 'repeat', 'until', 'if', 'then', 'else', 'true', 'false') stan_kw2 = ('int', 'real', 'vector', 'simplex', 'ordered', 'positive_ordered', 'row_vector', 'matrix', 'corr_matrix', 'cov_matrix', 'lower', 'upper') stan_kw3 = ('model', 'data', 'parameters', 'quantities', 'transformed', 'generated') cpp_kw = ("alignas", "alignof", "and", "and_eq", "asm", "auto", "bitand", "bitor", "bool", "break", "case", "catch", "char", "char16_t", "char32_t", "class", "compl", "const", "constexpr", "const_cast", "continue", "decltype", "default", "delete", "do", "double", "dynamic_cast", "else", "enum", "explicit", "export", "extern", "false", "float", "for", "friend", "goto", "if", "inline", "int", "long", "mutable", "namespace", "new", "noexcept", "not", "not_eq", "nullptr", "operator", "or", "or_eq", "private", "protected", "public", "register", "reinterpret_cast", "return", "short", "signed", "sizeof", "static", "static_assert", "static_cast", "struct", "switch", "template", "this", "thread_local", "throw", "true", "try", "typedef", "typeid", "typename", "union", "unsigned", "using", "virtual", "void", "volatile", "wchar_t", "while", "xor", "xor_eq") illegal = stan_kw1 + stan_kw2 + stan_kw3 + cpp_kw if re.findall(r'(\.|^[0-9]|__$)', name): return False return not name in illegal def _dict_to_rdump(data): parts = [] for name, value in data.items(): if isinstance(value, (Sequence, Number, np.number, np.ndarray, int, bool, float)) \ and not isinstance(value, string_types): value = np.asarray(value) else: raise ValueError("Variable {} is not a number and cannot be dumped.".format(name)) if value.dtype == np.bool: value = value.astype(int) if value.ndim == 0: s = '{} <- {}\n'.format(name, str(value)) elif value.ndim == 1: s = '{} <-\nc({})\n'.format(name, ', '.join(str(v) for v in value)) elif value.ndim > 1: tmpl = '{} <-\nstructure(c({}), .Dim = c({}))\n' # transpose value as R uses column-major # 'F' = Fortran, column-major s = tmpl.format(name, ', '.join(str(v) for v in value.flatten(order='F')), ', '.join(str(v) for v in value.shape)) parts.append(s) return ''.join(parts) def stan_rdump(data, filename): """ Dump a dictionary with model data into a file using the R dump format that Stan supports. Parameters ---------- data : dict filename : str """ for name in data: if not is_legal_stan_vname(name): raise ValueError("Variable name {} is not allowed in Stan".format(name)) with open(filename, 'w') as f: f.write(_dict_to_rdump(data)) def _rdump_value_to_numpy(s): """ Convert a R dump formatted value to Numpy equivalent For example, "c(1, 2)" becomes ``array([1, 2])`` Only supports a few R data structures. Will not work with European decimal format. """ if "structure" in s: vector_str, shape_str = re.findall(r'c\([^\)]+\)', s) shape = [int(d) for d in shape_str[2:-1].split(',')] if '.' in vector_str: arr = np.array([float(v) for v in vector_str[2:-1].split(',')]) else: arr = np.array([int(v) for v in vector_str[2:-1].split(',')]) # 'F' = Fortran, column-major arr = arr.reshape(shape, order='F') elif "c(" in s: if '.' in s: arr = np.array([float(v) for v in s[2:-1].split(',')], order='F') else: arr = np.array([int(v) for v in s[2:-1].split(',')], order='F') else: arr = np.array(float(s) if '.' in s else int(s)) return arr def _remove_empty_pars(pars, pars_oi, dims_oi): """ Remove parameters that are actually empty. For example, the parameter y would be removed with the following model code: transformed data { int n; n <- 0; } parameters { real y[n]; } Parameters ---------- pars: iterable of str pars_oi: list of str dims_oi: list of list of int Returns ------- pars_trimmed: list of str """ pars = list(pars) for par, dim in zip(pars_oi, dims_oi): if par in pars and np.prod(dim) == 0: del pars[pars.index(par)] return pars def read_rdump(filename): """ Read data formatted using the R dump format Parameters ---------- filename: str Returns ------- data : OrderedDict """ contents = open(filename).read().strip() names = [name.strip() for name in re.findall(r'^(\w+) <-', contents, re.MULTILINE)] values = [value.strip() for value in re.split('\w+ +<-', contents) if value] if len(values) != len(names): raise ValueError("Unable to read file. Unable to pair variable name with value.") d = OrderedDict() for name, value in zip(names, values): d[name.strip()] = _rdump_value_to_numpy(value.strip()) return d def to_dataframe(fit, pars=None, permuted=False, dtypes=None, inc_warmup=False, diagnostics=True, header=True): """Extract samples as a pandas dataframe for different parameters. Parameters ---------- pars : {str, sequence of str} parameter (or quantile) name(s). permuted : bool If True, returned samples are permuted. If inc_warmup is True, warmup samples have negative order. dtypes : dict datatype of parameter(s). If nothing is passed, float will be used for all parameters. inc_warmup : bool If True, warmup samples are kept; otherwise they are discarded. diagnostics : bool If True, include hmc diagnostics in dataframe. header : bool If True, include header columns. Returns ------- df : pandas dataframe Returned dataframe contains: [header_df]|[draws_df]|[diagnostics_df], where all groups are optional. To exclude draws_df use `pars=[]`. """ try: import pandas as pd except ImportError: raise ImportError("Pandas module not found. You can install pandas with: pip install pandas") fit._verify_has_samples() pars_original = pars if pars is None: pars = fit.sim['pars_oi'] elif isinstance(pars, string_types): pars = [pars] if pars: pars = pystan.misc._remove_empty_pars(pars, fit.sim['pars_oi'], fit.sim['dims_oi']) allpars = fit.sim['pars_oi'] + fit.sim['fnames_oi'] _check_pars(allpars, pars) if dtypes is None: dtypes = {} n_kept = [s if inc_warmup else s-w for s, w in zip(fit.sim['n_save'], fit.sim['warmup2'])] chains = len(fit.sim['samples']) diagnostic_type = {'divergent__':int, 'energy__':float, 'treedepth__':int, 'accept_stat__':float, 'stepsize__':float, 'n_leapfrog__':int} header_dict = OrderedDict() if header: idx = np.concatenate([np.full(n_kept[chain], chain, dtype=int) for chain in range(chains)]) warmup = [np.zeros(n_kept[chain], dtype=np.int64) for chain in range(chains)] if inc_warmup: draw = [] for chain, w in zip(range(chains), fit.sim['warmup2']): warmup[chain][:w] = 1 draw.append(np.arange(n_kept[chain], dtype=np.int64) - w) draw = np.concatenate(draw) else: draw = np.concatenate([np.arange(n_kept[chain], dtype=np.int64) for chain in range(chains)]) warmup = np.concatenate(warmup) header_dict = OrderedDict(zip(['chain', 'draw', 'warmup'], [idx, draw, warmup])) if permuted: if inc_warmup: chain_permutation = [] chain_permutation_order = [] permutation = [] permutation_order = [] for chain, p, w in zip(range(chains), fit.sim['permutation'], fit.sim['warmup2']): chain_permutation.append(list(range(-w, 0)) + p) chain_permutation_order.append(list(range(-w, 0)) + list(np.argsort(p))) permutation.append(sum(n_kept[:chain])+chain_permutation[-1]+w) permutation_order.append(sum(n_kept[:chain])+chain_permutation_order[-1]+w) chain_permutation = np.concatenate(chain_permutation) chain_permutation_order = np.concatenate(chain_permutation_order) permutation = np.concatenate(permutation) permutation_order = np.concatenate(permutation_order) else: chain_permutation = np.concatenate(fit.sim['permutation']) chain_permutation_order = np.concatenate([np.argsort(item) for item in fit.sim['permutation']]) permutation = np.concatenate([sum(n_kept[:chain])+p for chain, p in enumerate(fit.sim['permutation'])]) permutation_order = np.argsort(permutation) header_dict["permutation"] = permutation header_dict["chain_permutation"] = chain_permutation header_dict["permutation_order"] = permutation_order header_dict["chain_permutation_order"] = chain_permutation_order if header: header_df = pd.DataFrame.from_dict(header_dict) else: if permuted: header_df = pd.DataFrame.from_dict({"permutation_order" : header_dict["permutation_order"]}) else: header_df = pd.DataFrame() fnames_set = set(fit.sim['fnames_oi']) pars_set = set(pars) if pars_original is None or fnames_set == pars_set: dfs = [pd.DataFrame.from_dict(pyholder.chains).iloc[-n:] for pyholder, n in zip(fit.sim['samples'], n_kept)] df = pd.concat(dfs, axis=0, sort=False, ignore_index=True) if dtypes: if not fnames_set.issuperset(pars_set): par_keys = OrderedDict([(par, []) for par in fit.sim['pars_oi']]) for key in fit.sim['fnames_oi']: par = key.split("[") par = par[0] par_keys[par].append(key) for par, dtype in dtypes.items(): if isinstance(dtype, (float, np.float64)): continue for key in par_keys.get(par, [par]): df.loc[:, key] = df.loc[:, key].astype(dtype) elif pars: par_keys = dict() if not fnames_set.issuperset(pars_set): par_keys = OrderedDict([(par, []) for par in fit.sim['pars_oi']]) for key in fit.sim['fnames_oi']: par = key.split("[") par = par[0] par_keys[par].append(key) columns = [] for par in pars: columns.extend(par_keys.get(par, [par])) columns = list(np.unique(columns)) df = pd.DataFrame(index=np.arange(sum(n_kept)), columns=columns, dtype=float) for key in columns: key_values = [] for chain, (pyholder, n) in enumerate(zip(fit.sim['samples'], n_kept)): key_values.append(pyholder.chains[key][-n:]) df.loc[:, key] = np.concatenate(key_values) for par, dtype in dtypes.items(): if isinstance(dtype, (float, np.float64)): continue for key in par_keys.get(par, [par]): df.loc[:, key] = df.loc[:, key].astype(dtype) else: df = pd.DataFrame() if diagnostics: diagnostics_dfs = [] for idx, (pyholder, permutation, n) in enumerate(zip(fit.sim['samples'], fit.sim['permutation'], n_kept), 1): diagnostics_df = pd.DataFrame(pyholder['sampler_params'], index=pyholder['sampler_param_names']).T diagnostics_df = diagnostics_df.iloc[-n:, :] for key, dtype in diagnostic_type.items(): if key in diagnostics_df: diagnostics_df.loc[:, key] = diagnostics_df.loc[:, key].astype(dtype) diagnostics_dfs.append(diagnostics_df) if diagnostics_dfs: diagnostics_df = pd.concat(diagnostics_dfs, axis=0, sort=False, ignore_index=True) else: diagnostics_df = pd.DataFrame() else: diagnostics_df = pd.DataFrame() df = pd.concat((header_df, df, diagnostics_df), axis=1, sort=False) if permuted: df.sort_values(by='permutation_order', inplace=True) if not header: df.drop(columns='permutation_order', inplace=True) return df def get_stepsize(fit): """Parse stepsize from fit object Parameters ---------- fit : StanFit4Model Returns ------- list Returns an empty list if step sizes are not found in ``fit.get_adaptation_info``. """ fit._verify_has_samples() stepsizes = [] for adaptation_info in fit.get_adaptation_info(): for line in adaptation_info.splitlines(): if "Step size" in line: stepsizes.append(float(line.split("=")[1].strip())) break return stepsizes def get_inv_metric(fit, as_dict=False): """Parse inverse metric from the fit object Parameters ---------- fit : StanFit4Model as_dict : bool, optional Returns ------- list or dict Returns an empty list if inverse metric is not found in ``fit.get_adaptation_info()``. If `as_dict` returns a dictionary which can be used with `.sampling` method. """ fit._verify_has_samples() inv_metrics = [] if not (("ctrl" in fit.stan_args[0]) and ("sampling" in fit.stan_args[0]["ctrl"])): return inv_metrics metric = [args["ctrl"]["sampling"]["metric"].name for args in fit.stan_args] for adaptation_info, metric_name in zip(fit.get_adaptation_info(), metric): iter_adaptation_info = iter(adaptation_info.splitlines()) inv_metric_list = [] for line in iter_adaptation_info: if any(value in line for value in ["Step size", "Adaptation"]): continue elif "inverse mass matrix" in line: for line in iter_adaptation_info: stripped_set = set(line.replace("# ", "").replace(" ", "").replace(",", "")) if stripped_set.issubset(set(".-1234567890e")): inv_metric = np.array(list(map(float, line.replace("# ", "").strip().split(",")))) if metric_name == "DENSE_E": inv_metric = np.atleast_2d(inv_metric) inv_metric_list.append(inv_metric) else: break inv_metrics.append(np.concatenate(inv_metric_list)) return inv_metrics if not as_dict else dict(enumerate(inv_metrics)) def get_last_position(fit, warmup=False): """Parse last position from fit object Parameters ---------- fit : StanFit4Model warmup : bool If True, returns the last warmup position, when warmup has been done. Otherwise function returns the first sample position. Returns ------- list list contains a dictionary of last draw from each chain. """ fit._verify_has_samples() positions = [] extracted = fit.extract(permuted=False, pars=fit.model_pars, inc_warmup=warmup) draw_location = -1 if warmup: draw_location += max(1, fit.sim["warmup"]) chains = fit.sim["chains"] for i in range(chains): extract_pos = {key : values[draw_location, i] for key, values in extracted.items()} positions.append(extract_pos) return positions def add_libtbb_path(): """Add libtbb to PATH.""" libtbb = os.path.abspath(os.path.join( os.path.dirname(__file__), 'stan', 'lib', 'stan_math', 'lib', 'tbb' )) os.environ['PATH'] = ';'.join( list(OrderedDict.fromkeys([libtbb] + os.getenv('PATH', '').split(';'))) )
58,254
Python
.py
1,283
37.081839
117
0.592985
stan-dev/pystan2
921
191
0
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)