id int64 0 458k | file_name stringlengths 4 119 | file_path stringlengths 14 227 | content stringlengths 24 9.96M | size int64 24 9.96M | language stringclasses 1 value | extension stringclasses 14 values | total_lines int64 1 219k | avg_line_length float64 2.52 4.63M | max_line_length int64 5 9.91M | alphanum_fraction float64 0 1 | repo_name stringlengths 7 101 | repo_stars int64 100 139k | repo_forks int64 0 26.4k | repo_open_issues int64 0 2.27k | repo_license stringclasses 12 values | repo_extraction_date stringclasses 433 values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
27,800 | cli.py | kovidgoyal_calibre/src/calibre/devices/usbms/cli.py | __license__ = 'GPL v3'
__copyright__ = '2009, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
import os
import shutil
import time
from calibre import fsync
from calibre.devices.errors import PathError
from calibre.utils.filenames import case_preserving_open_file
class File:
def __init__(self, path):
stats = os.stat(path)
self.is_dir = os.path.isdir(path)
self.is_readonly = not os.access(path, os.W_OK)
self.ctime = stats.st_ctime
self.wtime = stats.st_mtime
self.size = stats.st_size
if path.endswith(os.sep):
path = path[:-1]
self.path = path
self.name = os.path.basename(path)
def check_transfer(infile, dest):
infile.seek(0)
dest.seek(0)
return infile.read() == dest.read()
class CLI:
def get_file(self, path, outfile, end_session=True):
path = self.munge_path(path)
with open(path, 'rb') as src:
shutil.copyfileobj(src, outfile)
def put_file(self, infile, path, replace_file=False, end_session=True):
path = self.munge_path(path)
close = False
if not hasattr(infile, 'read'):
infile, close = open(infile, 'rb'), True
infile.seek(0)
if os.path.isdir(path):
path = os.path.join(path, infile.name)
if not replace_file and os.path.exists(path):
raise PathError('File already exists: ' + path)
dest, actual_path = case_preserving_open_file(path)
with dest:
try:
shutil.copyfileobj(infile, dest)
except OSError:
print('WARNING: First attempt to send file to device failed')
time.sleep(0.2)
infile.seek(0)
dest.seek(0)
dest.truncate()
shutil.copyfileobj(infile, dest)
fsync(dest)
# if not check_transfer(infile, dest): raise Exception('Transfer failed')
if close:
infile.close()
return actual_path
def munge_path(self, path):
if path.startswith('/') and not (path.startswith(self._main_prefix) or
(self._card_a_prefix and path.startswith(self._card_a_prefix)) or
(self._card_b_prefix and path.startswith(self._card_b_prefix))):
path = self._main_prefix + path[1:]
elif path.startswith('carda:'):
path = path.replace('carda:', self._card_a_prefix[:-1])
elif path.startswith('cardb:'):
path = path.replace('cardb:', self._card_b_prefix[:-1])
return path
def list(self, path, recurse=False, end_session=True, munge=True):
if munge:
path = self.munge_path(path)
if os.path.isfile(path):
return [(os.path.dirname(path), [File(path)])]
entries = [File(os.path.join(path, f)) for f in os.listdir(path)]
dirs = [(path, entries)]
for _file in entries:
if recurse and _file.is_dir:
dirs[len(dirs):] = self.list(_file.path, recurse=True, munge=False)
return dirs
def mkdir(self, path, end_session=True):
if self.SUPPORTS_SUB_DIRS:
path = self.munge_path(path)
os.mkdir(path)
def rm(self, path, end_session=True):
path = self.munge_path(path)
self.delete_books([path])
def touch(self, path, end_session=True):
path = self.munge_path(path)
if not os.path.exists(path):
open(path, 'wb').close()
if not os.path.isdir(path):
os.utime(path, None)
| 3,609 | Python | .py | 90 | 30.788889 | 85 | 0.592349 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,801 | books.py | kovidgoyal_calibre/src/calibre/devices/usbms/books.py | __license__ = 'GPL 3'
__copyright__ = '2009, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
import os
import re
import sys
import time
from functools import cmp_to_key
from calibre import force_unicode, isbytestring
from calibre.constants import preferred_encoding
from calibre.devices.interface import BookList as _BookList
from calibre.devices.mime import mime_type_ext
from calibre.ebooks.metadata import title_sort
from calibre.ebooks.metadata.book.base import Metadata
from calibre.prints import debug_print
from calibre.utils.config_base import tweaks
from calibre.utils.icu import sort_key
from polyglot.builtins import cmp, iteritems, itervalues, string_or_bytes
def none_cmp(xx, yy):
x = xx[1]
y = yy[1]
if x is None and y is None:
# No sort_key needed here, because defaults are ascii
try:
return cmp(xx[2], yy[2])
except TypeError:
return 0
if x is None:
return 1
if y is None:
return -1
if isinstance(x, string_or_bytes) and isinstance(y, string_or_bytes):
x, y = sort_key(force_unicode(x)), sort_key(force_unicode(y))
try:
c = cmp(x, y)
except TypeError:
c = 0
if c != 0:
return c
# same as above -- no sort_key needed here
try:
return cmp(xx[2], yy[2])
except TypeError:
return 0
class Book(Metadata):
def __init__(self, prefix, lpath, size=None, other=None):
from calibre.ebooks.metadata.meta import path_to_ext
Metadata.__init__(self, '')
self._new_book = False
self.device_collections = []
self.path = os.path.join(prefix, lpath)
if os.sep == '\\':
self.path = self.path.replace('/', '\\')
self.lpath = lpath.replace('\\', '/')
else:
self.lpath = lpath
self.mime = mime_type_ext(path_to_ext(lpath))
self.size = size # will be set later if None
try:
self.datetime = time.gmtime(os.path.getctime(self.path))
except:
self.datetime = time.gmtime()
if other:
self.smart_update(other)
def __eq__(self, other):
# use lpath because the prefix can change, changing path
return self.lpath == getattr(other, 'lpath', None)
@property
def db_id(self):
'''The database id in the application database that this file corresponds to'''
match = re.search(r'_(\d+)$', self.lpath.rpartition('.')[0])
if match:
return int(match.group(1))
return None
@property
def title_sorter(self):
'''String to sort the title. If absent, title is returned'''
return title_sort(self.title)
@property
def thumbnail(self):
return None
class BookList(_BookList):
def __init__(self, oncard, prefix, settings):
_BookList.__init__(self, oncard, prefix, settings)
self._bookmap = {}
def supports_collections(self):
return False
def add_book(self, book, replace_metadata):
return self.add_book_extended(book, replace_metadata, check_for_duplicates=True)
def add_book_extended(self, book, replace_metadata, check_for_duplicates):
'''
Add the book to the booklist, if needed. Return None if the book is
already there and not updated, otherwise return the book.
'''
try:
b = self.index(book) if check_for_duplicates else None
except (ValueError, IndexError):
b = None
if b is None:
self.append(book)
return book
if replace_metadata:
self[b].smart_update(book, replace_metadata=True)
return self[b]
return None
def remove_book(self, book):
self.remove(book)
def get_collections(self):
return {}
class CollectionsBookList(BookList):
def supports_collections(self):
return True
def in_category_sort_rules(self, attr):
sorts = tweaks['sony_collection_sorting_rules']
for attrs,sortattr in sorts:
if attr in attrs or '*' in attrs:
return sortattr
return None
def compute_category_name(self, field_key, field_value, field_meta):
from calibre.utils.formatter import EvalFormatter
renames = tweaks['sony_collection_renaming_rules']
field_name = renames.get(field_key, None)
if field_name is None:
if field_meta['is_custom']:
field_name = field_meta['name']
else:
field_name = ''
cat_name = EvalFormatter().safe_format(
fmt=tweaks['sony_collection_name_template'],
kwargs={'category':field_name, 'value':field_value},
error_value='GET_CATEGORY', book=None)
return cat_name.strip()
def get_collections(self, collection_attributes):
from calibre.utils.config import device_prefs
debug_print('Starting get_collections:', device_prefs['manage_device_metadata'])
debug_print('Renaming rules:', tweaks['sony_collection_renaming_rules'])
debug_print('Formatting template:', tweaks['sony_collection_name_template'])
debug_print('Sorting rules:', tweaks['sony_collection_sorting_rules'])
# Complexity: we can use renaming rules only when using automatic
# management. Otherwise we don't always have the metadata to make the
# right decisions
use_renaming_rules = device_prefs['manage_device_metadata'] == 'on_connect'
collections = {}
# get the special collection names
all_by_author = ''
all_by_title = ''
ca = []
all_by_something = []
for c in collection_attributes:
if c.startswith('aba:') and c[4:].strip():
all_by_author = c[4:].strip()
elif c.startswith('abt:') and c[4:].strip():
all_by_title = c[4:].strip()
elif c.startswith('abs:') and c[4:].strip():
name = c[4:].strip()
sby = self.in_category_sort_rules(name)
if sby is None:
sby = name
if name and sby:
all_by_something.append((name, sby))
else:
ca.append(c.lower())
collection_attributes = ca
for book in self:
tsval = book.get('_pb_title_sort',
book.get('title_sort', book.get('title', 'zzzz')))
asval = book.get('_pb_author_sort', book.get('author_sort', ''))
# Make sure we can identify this book via the lpath
lpath = getattr(book, 'lpath', None)
if lpath is None:
continue
# Decide how we will build the collections. The default: leave the
# book in all existing collections. Do not add any new ones.
attrs = ['device_collections']
if getattr(book, '_new_book', False):
if device_prefs['manage_device_metadata'] == 'manual':
# Ensure that the book is in all the book's existing
# collections plus all metadata collections
attrs += collection_attributes
else:
# For new books, both 'on_send' and 'on_connect' do the same
# thing. The book's existing collections are ignored. Put
# the book in collections defined by its metadata.
attrs = collection_attributes
elif device_prefs['manage_device_metadata'] == 'on_connect':
# For existing books, modify the collections only if the user
# specified 'on_connect'
attrs = collection_attributes
for attr in attrs:
attr = attr.strip()
# If attr is device_collections, then we cannot use
# format_field, because we don't know the fields where the
# values came from.
if attr == 'device_collections':
doing_dc = True
val = book.device_collections # is a list
else:
doing_dc = False
ign, val, orig_val, fm = book.format_field_extended(attr)
if not val:
continue
if isbytestring(val):
val = val.decode(preferred_encoding, 'replace')
if isinstance(val, (list, tuple)):
val = list(val)
elif fm['datatype'] == 'series':
val = [orig_val]
elif fm['datatype'] == 'text' and fm['is_multiple']:
val = orig_val
elif fm['datatype'] == 'composite' and fm['is_multiple']:
val = [v.strip() for v in
val.split(fm['is_multiple']['ui_to_list'])]
else:
val = [val]
sort_attr = self.in_category_sort_rules(attr)
for category in val:
is_series = False
if doing_dc:
# Attempt to determine if this value is a series by
# comparing it to the series name.
if category == book.series:
is_series = True
elif fm['is_custom']: # is a custom field
if fm['datatype'] == 'text' and len(category) > 1 and \
category[0] == '[' and category[-1] == ']':
continue
if fm['datatype'] == 'series':
is_series = True
else: # is a standard field
if attr == 'tags' and len(category) > 1 and \
category[0] == '[' and category[-1] == ']':
continue
if attr == 'series' or \
('series' in collection_attributes and
book.get('series', None) == category):
is_series = True
if use_renaming_rules:
cat_name = self.compute_category_name(attr, category, fm)
else:
cat_name = category
if cat_name not in collections:
collections[cat_name] = {}
if use_renaming_rules and sort_attr:
sort_val = book.get(sort_attr, None)
collections[cat_name][lpath] = (book, sort_val, tsval)
elif is_series:
if doing_dc:
collections[cat_name][lpath] = \
(book, book.get('series_index', sys.maxsize), tsval)
else:
collections[cat_name][lpath] = \
(book, book.get(attr+'_index', sys.maxsize), tsval)
else:
if lpath not in collections[cat_name]:
collections[cat_name][lpath] = (book, tsval, tsval)
# All books by author
if all_by_author:
if all_by_author not in collections:
collections[all_by_author] = {}
collections[all_by_author][lpath] = (book, asval, tsval)
# All books by title
if all_by_title:
if all_by_title not in collections:
collections[all_by_title] = {}
collections[all_by_title][lpath] = (book, tsval, asval)
for (n, sb) in all_by_something:
if n not in collections:
collections[n] = {}
collections[n][lpath] = (book, book.get(sb, ''), tsval)
# Sort collections
result = {}
for category, lpaths in iteritems(collections):
books = sorted(itervalues(lpaths), key=cmp_to_key(none_cmp))
result[category] = [x[0] for x in books]
return result
def rebuild_collections(self, booklist, oncard):
'''
For each book in the booklist for the card oncard, remove it from all
its current collections, then add it to the collections specified in
device_collections.
oncard is None for the main memory, carda for card A, cardb for card B,
etc.
booklist is the object created by the :method:`books` call above.
'''
pass
| 12,760 | Python | .py | 285 | 31.470175 | 88 | 0.540534 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,802 | driver.py | kovidgoyal_calibre/src/calibre/devices/usbms/driver.py | __license__ = 'GPL v3'
__copyright__ = '2009, John Schember <john at nachtimwald.com>'
__docformat__ = 'restructuredtext en'
'''
Generic USB Mass storage device driver. This is not a complete stand alone
driver. It is intended to be subclassed with the relevant parts implemented
for a particular device.
'''
import json
import os
import shutil
from itertools import cycle
from calibre import fsync, isbytestring, prints
from calibre.constants import filesystem_encoding, ismacos, numeric_version
from calibre.devices.usbms.books import Book, BookList
from calibre.devices.usbms.cli import CLI
from calibre.devices.usbms.device import Device
from calibre.ebooks.metadata.book.json_codec import JsonCodec
from calibre.prints import debug_print
from polyglot.builtins import itervalues, string_or_bytes
def safe_walk(top, topdown=True, onerror=None, followlinks=False, maxdepth=128):
' A replacement for os.walk that does not die when it encounters undecodeable filenames in a linux filesystem'
if maxdepth < 0:
return
islink, join, isdir = os.path.islink, os.path.join, os.path.isdir
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.path.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
names = os.listdir(top)
except OSError as err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if isinstance(name, bytes):
try:
name = name.decode(filesystem_encoding)
except UnicodeDecodeError:
debug_print('Skipping undecodeable file: %r' % name)
continue
if isdir(join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
new_path = join(top, name)
if followlinks or not islink(new_path):
yield from safe_walk(new_path, topdown, onerror, followlinks, maxdepth-1)
if not topdown:
yield top, dirs, nondirs
# CLI must come before Device as it implements the CLI functions that
# are inherited from the device interface in Device.
class USBMS(CLI, Device):
'''
The base class for all USBMS devices. Implements the logic for
sending/getting/updating metadata/caching metadata/etc.
'''
description = _('Communicate with an e-book reader.')
author = 'John Schember'
supported_platforms = ['windows', 'osx', 'linux']
# Store type instances of BookList and Book. We must do this because
# a) we need to override these classes in some device drivers, and
# b) the classmethods seem only to see real attributes declared in the
# class, not attributes stored in the class
booklist_class = BookList
book_class = Book
FORMATS = []
CAN_SET_METADATA = []
METADATA_CACHE = 'metadata.calibre'
DRIVEINFO = 'driveinfo.calibre'
SCAN_FROM_ROOT = False
def _update_driveinfo_record(self, dinfo, prefix, location_code, name=None):
import uuid
from calibre.utils.date import isoformat, now
if not isinstance(dinfo, dict):
dinfo = {}
if dinfo.get('device_store_uuid', None) is None:
dinfo['device_store_uuid'] = str(uuid.uuid4())
if dinfo.get('device_name', None) is None:
dinfo['device_name'] = self.get_gui_name()
if name is not None:
dinfo['device_name'] = name
dinfo['location_code'] = location_code
dinfo['last_library_uuid'] = getattr(self, 'current_library_uuid', None)
dinfo['calibre_version'] = '.'.join([str(i) for i in numeric_version])
dinfo['date_last_connected'] = isoformat(now())
dinfo['prefix'] = prefix.replace('\\', '/')
return dinfo
def _update_driveinfo_file(self, prefix, location_code, name=None):
from calibre.utils.config import from_json, to_json
if os.path.exists(os.path.join(prefix, self.DRIVEINFO)):
with open(os.path.join(prefix, self.DRIVEINFO), 'rb') as f:
try:
driveinfo = json.loads(f.read(), object_hook=from_json)
except:
driveinfo = None
driveinfo = self._update_driveinfo_record(driveinfo, prefix,
location_code, name)
data = json.dumps(driveinfo, default=to_json)
if not isinstance(data, bytes):
data = data.encode('utf-8')
with open(os.path.join(prefix, self.DRIVEINFO), 'wb') as f:
f.write(data)
fsync(f)
else:
driveinfo = self._update_driveinfo_record({}, prefix, location_code, name)
data = json.dumps(driveinfo, default=to_json)
if not isinstance(data, bytes):
data = data.encode('utf-8')
with open(os.path.join(prefix, self.DRIVEINFO), 'wb') as f:
f.write(data)
fsync(f)
return driveinfo
def get_device_information(self, end_session=True):
self.report_progress(1.0, _('Get device information...'))
self.driveinfo = {}
def raise_os_error(e):
raise OSError(_('Failed to access files in the main memory of'
' your device. You should contact the device'
' manufacturer for support. Common fixes are:'
' try a different USB cable/USB port on your computer.'
' If you device has a "Reset to factory defaults" type'
' of setting somewhere, use it. Underlying error: %s')
% e) from e
if self._main_prefix is not None:
try:
self.driveinfo['main'] = self._update_driveinfo_file(self._main_prefix, 'main')
except PermissionError as e:
if ismacos:
raise PermissionError(_(
'Permission was denied by macOS trying to access files in the main memory of'
' your device. You will need to grant permission explicitly by looking under'
' System Preferences > Security and Privacy > Privacy > Files and Folders.'
' Underlying error: %s'
) % e) from e
raise_os_error(e)
except OSError as e:
raise_os_error(e)
try:
if self._card_a_prefix is not None:
self.driveinfo['A'] = self._update_driveinfo_file(self._card_a_prefix, 'A')
if self._card_b_prefix is not None:
self.driveinfo['B'] = self._update_driveinfo_file(self._card_b_prefix, 'B')
except OSError as e:
raise OSError(_('Failed to access files on the SD card in your'
' device. This can happen for many reasons. The SD card may be'
' corrupted, it may be too large for your device, it may be'
' write-protected, etc. Try a different SD card, or reformat'
' your SD card using the FAT32 filesystem. Also make sure'
' there are not too many files in the root of your SD card.'
' Underlying error: %s') % e)
return (self.get_gui_name(), '', '', '', self.driveinfo)
def set_driveinfo_name(self, location_code, name):
if location_code == 'main':
self._update_driveinfo_file(self._main_prefix, location_code, name)
elif location_code == 'A':
self._update_driveinfo_file(self._card_a_prefix, location_code, name)
elif location_code == 'B':
self._update_driveinfo_file(self._card_b_prefix, location_code, name)
def formats_to_scan_for(self):
return set(self.settings().format_map) | set(self.FORMATS)
def is_allowed_book_file(self, filename, path, prefix):
return True
def books(self, oncard=None, end_session=True):
from calibre.ebooks.metadata.meta import path_to_ext
debug_print('USBMS: Fetching list of books from device. Device=',
self.__class__.__name__,
'oncard=', oncard)
dummy_bl = self.booklist_class(None, None, None)
if oncard == 'carda' and not self._card_a_prefix:
self.report_progress(1.0, _('Getting list of books on device...'))
return dummy_bl
elif oncard == 'cardb' and not self._card_b_prefix:
self.report_progress(1.0, _('Getting list of books on device...'))
return dummy_bl
elif oncard and oncard != 'carda' and oncard != 'cardb':
self.report_progress(1.0, _('Getting list of books on device...'))
return dummy_bl
prefix = self._card_a_prefix if oncard == 'carda' else \
self._card_b_prefix if oncard == 'cardb' \
else self._main_prefix
ebook_dirs = self.get_carda_ebook_dir() if oncard == 'carda' else \
self.EBOOK_DIR_CARD_B if oncard == 'cardb' else \
self.get_main_ebook_dir()
debug_print('USBMS: dirs are:', prefix, ebook_dirs)
# get the metadata cache
bl = self.booklist_class(oncard, prefix, self.settings)
need_sync = self.parse_metadata_cache(bl, prefix, self.METADATA_CACHE)
# make a dict cache of paths so the lookup in the loop below is faster.
bl_cache = {}
for idx, b in enumerate(bl):
bl_cache[b.lpath] = idx
all_formats = self.formats_to_scan_for()
def update_booklist(filename, path, prefix):
changed = False
# Ignore AppleDouble files
if filename.startswith("._"):
return False
if path_to_ext(filename) in all_formats and self.is_allowed_book_file(filename, path, prefix):
try:
lpath = os.path.join(path, filename).partition(self.normalize_path(prefix))[2]
if lpath.startswith(os.sep):
lpath = lpath[len(os.sep):]
lpath = lpath.replace('\\', '/')
idx = bl_cache.get(lpath, None)
if idx is not None:
bl_cache[lpath] = None
if self.update_metadata_item(bl[idx]):
# print 'update_metadata_item returned true'
changed = True
else:
if bl.add_book(self.book_from_path(prefix, lpath),
replace_metadata=False):
changed = True
except: # Probably a filename encoding error
import traceback
traceback.print_exc()
return changed
if isinstance(ebook_dirs, string_or_bytes):
ebook_dirs = [ebook_dirs]
for ebook_dir in ebook_dirs:
ebook_dir = self.path_to_unicode(ebook_dir)
if self.SCAN_FROM_ROOT:
ebook_dir = self.normalize_path(prefix)
else:
ebook_dir = self.normalize_path(
os.path.join(prefix, *(ebook_dir.split('/')))
if ebook_dir else prefix)
debug_print('USBMS: scan from root', self.SCAN_FROM_ROOT, ebook_dir)
if not os.path.exists(ebook_dir):
continue
# Get all books in the ebook_dir directory
if self.SUPPORTS_SUB_DIRS or self.SUPPORTS_SUB_DIRS_FOR_SCAN:
# build a list of files to check, so we can accurately report progress
flist = []
for path, dirs, files in safe_walk(ebook_dir):
for filename in files:
if filename != self.METADATA_CACHE:
flist.append({'filename': self.path_to_unicode(filename),
'path':self.path_to_unicode(path)})
for i, f in enumerate(flist):
self.report_progress(i/float(len(flist)), _('Getting list of books on device...'))
changed = update_booklist(f['filename'], f['path'], prefix)
if changed:
need_sync = True
else:
paths = os.listdir(ebook_dir)
for i, filename in enumerate(paths):
self.report_progress((i+1) / float(len(paths)), _('Getting list of books on device...'))
changed = update_booklist(self.path_to_unicode(filename), ebook_dir, prefix)
if changed:
need_sync = True
# Remove books that are no longer in the filesystem. Cache contains
# indices into the booklist if book not in filesystem, None otherwise
# Do the operation in reverse order so indices remain valid
for idx in sorted(itervalues(bl_cache), reverse=True, key=lambda x: -1 if x is None else x):
if idx is not None:
need_sync = True
del bl[idx]
debug_print('USBMS: count found in cache: %d, count of files in metadata: %d, need_sync: %s' %
(len(bl_cache), len(bl), need_sync))
if need_sync: # self.count_found_in_bl != len(bl) or need_sync:
if oncard == 'cardb':
self.sync_booklists((None, None, bl))
elif oncard == 'carda':
self.sync_booklists((None, bl, None))
else:
self.sync_booklists((bl, None, None))
self.report_progress(1.0, _('Getting list of books on device...'))
debug_print('USBMS: Finished fetching list of books from device. oncard=', oncard)
return bl
def upload_books(self, files, names, on_card=None, end_session=True,
metadata=None):
debug_print('USBMS: uploading %d books'%(len(files)))
path = self._sanity_check(on_card, files)
paths = []
names = iter(names)
metadata = iter(metadata)
for i, infile in enumerate(files):
mdata, fname = next(metadata), next(names)
filepath = self.normalize_path(self.create_upload_path(path, mdata, fname))
if not hasattr(infile, 'read'):
infile = self.normalize_path(infile)
filepath = self.put_file(infile, filepath, replace_file=True)
paths.append(filepath)
try:
self.upload_cover(os.path.dirname(filepath),
os.path.splitext(os.path.basename(filepath))[0],
mdata, filepath)
except: # Failure to upload cover is not catastrophic
import traceback
traceback.print_exc()
self.report_progress((i+1) / float(len(files)), _('Transferring books to device...'))
self.report_progress(1.0, _('Transferring books to device...'))
debug_print('USBMS: finished uploading %d books'%(len(files)))
return list(zip(paths, cycle([on_card])))
def upload_cover(self, path, filename, metadata, filepath):
'''
Upload book cover to the device. Default implementation does nothing.
:param path: The full path to the folder where the associated book is located.
:param filename: The name of the book file without the extension.
:param metadata: metadata belonging to the book. Use metadata.thumbnail
for cover
:param filepath: The full path to the e-book file
'''
pass
def add_books_to_metadata(self, locations, metadata, booklists):
debug_print('USBMS: adding metadata for %d books'%(len(metadata)))
metadata = iter(metadata)
locations = tuple(locations)
for i, location in enumerate(locations):
self.report_progress((i+1) / float(len(locations)), _('Adding books to device metadata listing...'))
info = next(metadata)
blist = 2 if location[1] == 'cardb' else 1 if location[1] == 'carda' else 0
# Extract the correct prefix from the pathname. To do this correctly,
# we must ensure that both the prefix and the path are normalized
# so that the comparison will work. Book's __init__ will fix up
# lpath, so we don't need to worry about that here.
path = self.normalize_path(location[0])
if self._main_prefix:
prefix = self._main_prefix if \
path.startswith(self.normalize_path(self._main_prefix)) else None
if not prefix and self._card_a_prefix:
prefix = self._card_a_prefix if \
path.startswith(self.normalize_path(self._card_a_prefix)) else None
if not prefix and self._card_b_prefix:
prefix = self._card_b_prefix if \
path.startswith(self.normalize_path(self._card_b_prefix)) else None
if prefix is None:
prints('in add_books_to_metadata. Prefix is None!', path,
self._main_prefix)
continue
lpath = path.partition(prefix)[2]
if lpath.startswith('/') or lpath.startswith('\\'):
lpath = lpath[1:]
book = self.book_class(prefix, lpath, other=info)
if book.size is None:
book.size = os.stat(self.normalize_path(path)).st_size
b = booklists[blist].add_book(book, replace_metadata=True)
if b:
b._new_book = True
self.report_progress(1.0, _('Adding books to device metadata listing...'))
debug_print('USBMS: finished adding metadata')
def delete_single_book(self, path):
os.unlink(path)
def delete_extra_book_files(self, path):
filepath = os.path.splitext(path)[0]
for ext in self.DELETE_EXTS:
for x in (filepath, path):
x += ext
if os.path.exists(x):
if os.path.isdir(x):
shutil.rmtree(x, ignore_errors=True)
else:
os.unlink(x)
if self.SUPPORTS_SUB_DIRS:
try:
os.removedirs(os.path.dirname(path))
except:
pass
def delete_books(self, paths, end_session=True):
debug_print('USBMS: deleting %d books'%(len(paths)))
for i, path in enumerate(paths):
self.report_progress((i+1) / float(len(paths)), _('Removing books from device...'))
path = self.normalize_path(path)
if os.path.exists(path):
# Delete the ebook
self.delete_single_book(path)
self.delete_extra_book_files(path)
self.report_progress(1.0, _('Removing books from device...'))
debug_print('USBMS: finished deleting %d books'%(len(paths)))
def remove_books_from_metadata(self, paths, booklists):
debug_print('USBMS: removing metadata for %d books'%(len(paths)))
for i, path in enumerate(paths):
self.report_progress((i+1) / float(len(paths)), _('Removing books from device metadata listing...'))
for bl in booklists:
for book in bl:
if path.endswith(book.path):
bl.remove_book(book)
self.report_progress(1.0, _('Removing books from device metadata listing...'))
debug_print('USBMS: finished removing metadata for %d books'%(len(paths)))
# If you override this method and you use book._new_book, then you must
# complete the processing before you call this method. The flag is cleared
# at the end just before the return
def sync_booklists(self, booklists, end_session=True):
debug_print('USBMS: starting sync_booklists')
json_codec = JsonCodec()
if not os.path.exists(self.normalize_path(self._main_prefix)):
os.makedirs(self.normalize_path(self._main_prefix))
def write_prefix(prefix, listid):
if (prefix is not None and len(booklists) > listid and
isinstance(booklists[listid], self.booklist_class)):
if not os.path.exists(prefix):
os.makedirs(self.normalize_path(prefix))
with open(self.normalize_path(os.path.join(prefix, self.METADATA_CACHE)), 'wb') as f:
json_codec.encode_to_file(f, booklists[listid])
fsync(f)
write_prefix(self._main_prefix, 0)
write_prefix(self._card_a_prefix, 1)
write_prefix(self._card_b_prefix, 2)
# Clear the _new_book indication, as we are supposed to be done with
# adding books at this point
for blist in booklists:
if blist is not None:
for book in blist:
book._new_book = False
self.report_progress(1.0, _('Sending metadata to device...'))
debug_print('USBMS: finished sync_booklists')
@classmethod
def build_template_regexp(cls):
from calibre.devices.utils import build_template_regexp
return build_template_regexp(cls.save_template())
@classmethod
def path_to_unicode(cls, path):
if isbytestring(path):
path = path.decode(filesystem_encoding)
return path
@classmethod
def normalize_path(cls, path):
'Return path with platform native path separators'
if path is None:
return None
if os.sep == '\\':
path = path.replace('/', '\\')
else:
path = path.replace('\\', '/')
return cls.path_to_unicode(path)
@classmethod
def parse_metadata_cache(cls, bl, prefix, name):
json_codec = JsonCodec()
need_sync = False
cache_file = cls.normalize_path(os.path.join(prefix, name))
if os.access(cache_file, os.R_OK):
try:
with open(cache_file, 'rb') as f:
json_codec.decode_from_file(f, bl, cls.book_class, prefix)
except:
import traceback
traceback.print_exc()
bl = []
need_sync = True
else:
need_sync = True
return need_sync
@classmethod
def update_metadata_item(cls, book):
changed = False
size = os.stat(cls.normalize_path(book.path)).st_size
if size != book.size:
changed = True
mi = cls.metadata_from_path(book.path)
book.smart_update(mi)
book.size = size
return changed
@classmethod
def metadata_from_path(cls, path):
return cls.metadata_from_formats([path])
@classmethod
def metadata_from_formats(cls, fmts):
from calibre.customize.ui import quick_metadata
from calibre.ebooks.metadata.meta import metadata_from_formats
with quick_metadata:
return metadata_from_formats(fmts, force_read_metadata=True,
pattern=cls.build_template_regexp())
@classmethod
def book_from_path(cls, prefix, lpath):
from calibre.ebooks.metadata.book.base import Metadata
if cls.settings().read_metadata or cls.MUST_READ_METADATA:
mi = cls.metadata_from_path(cls.normalize_path(os.path.join(prefix, lpath)))
else:
from calibre.ebooks.metadata.meta import metadata_from_filename
mi = metadata_from_filename(cls.normalize_path(os.path.basename(lpath)),
cls.build_template_regexp())
if mi is None:
mi = Metadata(os.path.splitext(os.path.basename(lpath))[0],
[_('Unknown')])
size = os.stat(cls.normalize_path(os.path.join(prefix, lpath))).st_size
book = cls.book_class(prefix, lpath, other=mi, size=size)
return book
| 24,387 | Python | .py | 485 | 37.552577 | 114 | 0.581847 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,803 | deviceconfig.py | kovidgoyal_calibre/src/calibre/devices/usbms/deviceconfig.py | __license__ = 'GPL 3'
__copyright__ = '2009, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
from calibre.utils.config_base import Config, ConfigProxy
class DeviceConfig:
HELP_MESSAGE = _('Configure device')
#: Can be None, a string or a list of strings. When it is a string
#: that string is used for the help text and the actual customization value
#: can be read from ``dev.settings().extra_customization``.
#: If it a list of strings, then dev.settings().extra_customization will
#: also be a list. In this case, you *must* ensure that
#: EXTRA_CUSTOMIZATION_DEFAULT is also a list. The list can contain either
#: boolean values or strings, in which case a checkbox or line edit will be
#: used for them in the config widget, automatically.
#: If a string contains ::: then the text after it is interpreted as the
#: tooltip
EXTRA_CUSTOMIZATION_MESSAGE = None
#: The default value for extra customization. If you set
#: EXTRA_CUSTOMIZATION_MESSAGE you *must* set this as well.
EXTRA_CUSTOMIZATION_DEFAULT = None
#: A dictionary providing choices for options that should be displayed as a
#: combo box to the user. The dictionary maps extra #: customization indexes
#: to a set of choices.
EXTRA_CUSTOMIZATION_CHOICES = None
SUPPORTS_SUB_DIRS = False
SUPPORTS_SUB_DIRS_FOR_SCAN = False # This setting is used when scanning for books when SUPPORTS_SUB_DIRS is False
SUPPORTS_SUB_DIRS_DEFAULT = True
MUST_READ_METADATA = False
SUPPORTS_USE_AUTHOR_SORT = False
#: If None the default is used
SAVE_TEMPLATE = None
#: If True the user can add new formats to the driver
USER_CAN_ADD_NEW_FORMATS = True
@classmethod
def _default_save_template(cls):
from calibre.library.save_to_disk import config
return cls.SAVE_TEMPLATE if cls.SAVE_TEMPLATE else \
config().parse().send_template
@classmethod
def _config_base_name(cls):
klass = cls if isinstance(cls, type) else cls.__class__
return klass.__name__
@classmethod
def _config(cls):
name = cls._config_base_name()
c = Config('device_drivers_%s' % name, _('settings for device drivers'))
c.add_opt('format_map', default=cls.FORMATS,
help=_('Ordered list of formats the device will accept'))
c.add_opt('use_subdirs', default=cls.SUPPORTS_SUB_DIRS_DEFAULT,
help=_('Place files in sub-folders if the device supports them'))
c.add_opt('read_metadata', default=True,
help=_('Read metadata from files on device'))
c.add_opt('use_author_sort', default=False,
help=_('Use author sort instead of author'))
c.add_opt('save_template', default=cls._default_save_template(),
help=_('Template to control how books are saved'))
c.add_opt('extra_customization',
default=cls.EXTRA_CUSTOMIZATION_DEFAULT,
help=_('Extra customization'))
return c
@classmethod
def _configProxy(cls):
return ConfigProxy(cls._config())
@classmethod
def config_widget(cls):
from calibre.gui2.device_drivers.configwidget import ConfigWidget
cw = ConfigWidget(cls.settings(), cls.FORMATS, cls.SUPPORTS_SUB_DIRS,
cls.MUST_READ_METADATA, cls.SUPPORTS_USE_AUTHOR_SORT,
cls.EXTRA_CUSTOMIZATION_MESSAGE, cls, extra_customization_choices=cls.EXTRA_CUSTOMIZATION_CHOICES)
return cw
@classmethod
def save_settings(cls, config_widget):
proxy = cls._configProxy()
proxy['format_map'] = config_widget.format_map()
if cls.SUPPORTS_SUB_DIRS:
proxy['use_subdirs'] = config_widget.use_subdirs()
if not cls.MUST_READ_METADATA:
proxy['read_metadata'] = config_widget.read_metadata()
if cls.SUPPORTS_USE_AUTHOR_SORT:
proxy['use_author_sort'] = config_widget.use_author_sort()
if cls.EXTRA_CUSTOMIZATION_MESSAGE:
if isinstance(cls.EXTRA_CUSTOMIZATION_MESSAGE, list):
ec = []
for i in range(0, len(cls.EXTRA_CUSTOMIZATION_MESSAGE)):
if config_widget.opt_extra_customization[i] is None:
ec.append(None)
continue
if hasattr(config_widget.opt_extra_customization[i], 'isChecked'):
ec.append(config_widget.opt_extra_customization[i].isChecked())
elif hasattr(config_widget.opt_extra_customization[i], 'currentText'):
ec.append(str(config_widget.opt_extra_customization[i].currentText()).strip())
else:
ec.append(str(config_widget.opt_extra_customization[i].text()).strip())
else:
ec = str(config_widget.opt_extra_customization.text()).strip()
if not ec:
ec = None
proxy['extra_customization'] = ec
st = str(config_widget.opt_save_template.text())
proxy['save_template'] = st
@classmethod
def migrate_extra_customization(cls, vals):
return vals
@classmethod
def settings(cls):
opts = cls._config().parse()
if isinstance(cls.EXTRA_CUSTOMIZATION_DEFAULT, list):
if opts.extra_customization is None:
opts.extra_customization = []
if not isinstance(opts.extra_customization, list):
opts.extra_customization = [opts.extra_customization]
for i,d in enumerate(cls.EXTRA_CUSTOMIZATION_DEFAULT):
if i >= len(opts.extra_customization):
opts.extra_customization.append(d)
opts.extra_customization = cls.migrate_extra_customization(opts.extra_customization)
return opts
@classmethod
def save_template(cls):
st = cls.settings().save_template
if st:
return st
else:
return cls._default_save_template()
@classmethod
def customization_help(cls, gui=False):
return cls.HELP_MESSAGE
| 6,191 | Python | .py | 126 | 39.238095 | 118 | 0.639312 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,804 | __init__.py | kovidgoyal_calibre/src/calibre/web/__init__.py | __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
class Recipe:
pass
def get_download_filename_from_response(response):
from polyglot.urllib import unquote, urlparse
filename = last_part_name = ''
try:
purl = urlparse(response.geturl())
last_part_name = unquote(purl.path.split('/')[-1])
disposition = response.info().get('Content-disposition', '')
if isinstance(disposition, bytes):
disposition = disposition.decode('utf-8', 'replace')
for p in disposition.split(';'):
if 'filename' in p:
if '*=' in disposition:
parts = disposition.split('*=')[-1]
filename = parts.split('\'')[-1]
else:
filename = disposition.split('=')[-1]
if filename[0] in ('\'', '"'):
filename = filename[1:]
if filename[-1] in ('\'', '"'):
filename = filename[:-1]
filename = unquote(filename)
break
except Exception:
import traceback
traceback.print_exc()
return filename or last_part_name
def get_download_filename(url, cookie_file=None):
'''
Get a local filename for a URL using the content disposition header
Returns empty string if an error occurs.
'''
from contextlib import closing
from calibre import browser
filename = ''
br = browser()
if cookie_file:
from mechanize import MozillaCookieJar
cj = MozillaCookieJar()
cj.load(cookie_file)
br.set_cookiejar(cj)
try:
with closing(br.open(url)) as r:
filename = get_download_filename_from_response(r)
except:
import traceback
traceback.print_exc()
return filename
| 1,856 | Python | .py | 51 | 27.019608 | 71 | 0.578595 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,805 | nytimes.py | kovidgoyal_calibre/src/calibre/web/site_parsers/nytimes.py | #!/usr/bin/env python
# License: GPL v3 Copyright: 2022, Kovid Goyal <kovid at kovidgoyal.net>
import json
import re
import sys
from pprint import pprint
from xml.sax.saxutils import escape, quoteattr
from calibre.utils.iso8601 import parse_iso8601
module_version = 11 # needed for live updates
pprint
def parse_image(i):
crop = i.get('crops') or i.get('spanImageCrops')
if crop:
yield f'<div><img src="{crop[0]["renditions"][0]["url"]}" title="{i.get("altText", "")}">'
if i.get('caption'):
yield f'<div class="cap">{"".join(parse_types(i["caption"]))}'
if i.get('credit'):
yield f'<span class="cred"> {i["credit"]}</span>'
yield '</div>'
elif i.get('legacyHtmlCaption'):
if i['legacyHtmlCaption'].strip():
yield f'<div class="cap">{i["legacyHtmlCaption"]}</div>'
yield '</div>'
def parse_img_grid(g):
for grd in g.get('gridMedia', {}):
yield ''.join(parse_image(grd))
if g.get('caption'):
yield f'<div class="cap">{g["caption"]}'
if g.get('credit'):
yield f'<span class="cred"> {g["credit"]}</span>'
yield '</div>'
def parse_vid(v):
if v.get('promotionalMedia'):
headline = v.get('headline', {}).get('default', '')
rendition = v.get('renditions')
yield (
f'<div><b><a href="{rendition[0]["url"]}">Video</a>: {headline}</b></div>'
if rendition
else f'<div><b>{headline}</b></div>'
)
yield ''.join(parse_types(v['promotionalMedia']))
if v.get('promotionalSummary'):
yield f'<div class="cap">{v["promotionalSummary"]}</div>'
def parse_emb(e):
if e.get('html') and 'datawrapper.dwcdn.net' in e.get('html', ''):
dw = re.search(r'datawrapper.dwcdn.net/(.{5})', e['html']).group(1)
yield f'<div><img src="https://datawrapper.dwcdn.net/{dw}/full.png"></div>'
elif e.get('promotionalMedia'):
if e.get('headline'):
yield f'<div><b>{e["headline"]["default"]}</b></div>'
yield ''.join(parse_types(e['promotionalMedia']))
if e.get('note'):
yield f'<div class="cap">{e["note"]}</div>'
def parse_byline(byl):
for b in byl.get('bylines', {}):
yield f'<div><b>{b["renderedRepresentation"]}</b></div>'
yield '<div><i>'
for rl in byl.get('role', {}):
if ''.join(parse_cnt(rl)).strip():
yield ''.join(parse_cnt(rl))
yield '</i></div>'
def iso_date(x):
dt = parse_iso8601(x, as_utc=False)
return dt.strftime('%b %d, %Y at %I:%M %p')
def parse_header(h):
if h.get('label'):
yield f'<div class="lbl">{"".join(parse_types(h["label"]))}</div>'
if h.get('headline'):
yield ''.join(parse_types(h['headline']))
if h.get('summary'):
yield f'<p><i>{"".join(parse_types(h["summary"]))}</i></p>'
if h.get('ledeMedia'):
yield ''.join(parse_types(h['ledeMedia']))
if h.get('byline'):
yield ''.join(parse_types(h['byline']))
if h.get('timestampBlock'):
yield ''.join(parse_types(h['timestampBlock']))
def parse_fmt_type(fm):
for f in fm.get('formats', {}):
ftype = f.get('__typename', '')
if ftype == 'BoldFormat':
yield '<strong>'
if ftype == 'ItalicFormat':
yield '<em>'
if ftype == 'LinkFormat':
hrf = f['url']
yield f'<a href="{hrf}">'
yield fm.get('text', '')
for f in reversed(fm.get('formats', {})):
ftype = f.get('__typename', '')
if ftype == 'BoldFormat':
yield '</strong>'
if ftype == 'ItalicFormat':
yield '</em>'
if ftype == 'LinkFormat':
yield '</a>'
def parse_cnt(cnt):
for k in cnt:
if isinstance(cnt[k], list):
if k == 'formats':
yield ''.join(parse_fmt_type(cnt))
else:
for cnt_ in cnt[k]:
yield ''.join(parse_types(cnt_))
if isinstance(cnt[k], dict):
yield ''.join(parse_types(cnt[k]))
if cnt.get('text') and 'formats' not in cnt and 'content' not in cnt:
if isinstance(cnt['text'], str):
yield cnt['text']
def parse_types(x):
typename = x.get('__typename', '')
align = ''
if x.get('textAlign'):
align = f' style="text-align: {x["textAlign"].lower()};"'
if 'Header' in typename:
yield '\n'.join(parse_header(x))
elif typename.startswith('Heading'):
htag = 'h' + re.match(r'Heading([1-6])Block', typename).group(1)
yield f'<{htag}{align}>{"".join(parse_cnt(x))}</{htag}>'
elif typename == 'ParagraphBlock':
yield f'<p>{"".join(parse_cnt(x))}</p>'
elif typename in {'DetailBlock', 'TextRunKV'}:
yield f'<p style="font-size: small;">{"".join(parse_cnt(x))}</p>'
elif typename == 'BylineBlock':
yield f'<div class="byl"><br/>{"".join(parse_byline(x))}</div>'
elif typename == 'LabelBlock':
yield f'<div class="sc">{"".join(parse_cnt(x))}</div>'
elif typename == 'BlockquoteBlock':
yield f'<blockquote>{"".join(parse_cnt(x))}</blockquote>'
elif typename == 'TimestampBlock':
yield f'<div class="time">{iso_date(x["timestamp"])}</div>'
elif typename == 'LineBreakInline':
yield '<br/>'
elif typename == 'RuleBlock':
yield '<hr/>'
elif typename == 'Image':
yield ''.join(parse_image(x))
elif typename == 'GridBlock':
yield ''.join(parse_img_grid(x))
elif typename == 'Video':
yield ''.join(parse_vid(x))
elif typename == 'EmbeddedInteractive':
yield ''.join(parse_emb(x))
elif typename == 'ListBlock':
yield f'\n<ul>{"".join(parse_cnt(x))}</ul>'
elif typename == 'ListItemBlock':
yield f'\n<li>{"".join(parse_cnt(x))}</li>'
elif typename and typename not in {
'RelatedLinksBlock',
'EmailSignupBlock',
'Dropzone',
'AudioBlock',
}:
yield ''.join(parse_cnt(x))
def article_parse(data):
yield '<html><body>'
for d in data:
yield from parse_types(d)
yield '</body></html>'
def json_to_html(raw):
data = json.loads(raw.replace(':undefined', ':null'))
# open('/t/raw.json', 'w').write(json.dumps(data, indent=2))
try:
data = data['initialData']['data']
except TypeError:
data = data['initialState']
return live_json_to_html(data)
content = data['article']['sprinkledBody']['content']
return '\n'.join(article_parse(content))
def add_live_item(item, item_type, lines):
a = lines.append
if item_type == 'text':
a('<p>' + item['value'] + '</p>')
elif item_type == 'list':
a('<li>' + item['value'] + '</li>')
elif item_type == 'bulletedList':
a('<ul>')
for x in item['value']:
a('<li>' + x + '</li>')
a('</ul>')
elif item_type == 'items':
for x in item['value']:
a('<h5>' + x['subtitle'] + '</h5>')
add_live_item({'value': x['text']}, 'text', lines)
elif item_type == 'section':
for item in item['value']:
add_live_item(item, item['type'], lines)
elif item_type == '':
b = item
if b.get('title'):
a('<h3>' + b['title'] + '</h3>')
if b.get('imageUrl'):
a('<div><img src=' + quoteattr(b['imageUrl']) + '/></div>')
if b.get('leadIn'):
a('<p>' + b['leadIn'] + '</p>')
if 'items' in b:
add_live_item({'value': b['items']}, 'items', lines)
return
if 'bulletedList' in b:
add_live_item({'value': b['bulletedList']}, 'bulletedList', lines)
return
if 'sections' in b:
for section in b['sections']:
add_live_item({'value': section['section']}, 'section', lines)
return
raise Exception('Unknown item: %s' % b)
else:
raise Exception('Unknown item: %s' % b)
def live_json_to_html(data):
for k, v in data['ROOT_QUERY'].items():
if isinstance(v, dict) and 'id' in v:
root = data[v['id']]
s = data[root['storylines'][0]['id']]
s = data[s['storyline']['id']]
title = s['displayName']
lines = ['<h1>' + escape(title) + '</h1>']
for b in json.loads(s['experimentalJsonBlob'])['data'][0]['data']:
b = b['data']
if isinstance(b, list):
for x in b:
add_live_item(x, x['type'], lines)
else:
add_live_item(b, '', lines)
return '<html><body>' + '\n'.join(lines) + '</body></html>'
def extract_html(soup, url):
if '/interactive/' in url:
return (
'<html><body><p><em>'
+ 'This is an interactive article, which is supposed to be read in a browser.'
+ '</p></em></body></html>'
)
script = soup.findAll('script', text=lambda x: x and 'window.__preloadedData' in x)[0]
script = str(script)
raw = script[script.find('{') : script.rfind(';')].strip().rstrip(';')
return json_to_html(raw)
def download_url_from_wayback(category, url, br=None):
from mechanize import Request
host = 'http://localhost:8090'
host = 'https://wayback1.calibre-ebook.com'
rq = Request(
host + '/' + category,
data=json.dumps({'url': url}),
headers={'User-Agent': 'calibre', 'Content-Type': 'application/json'},
)
if br is None:
from calibre import browser
br = browser()
br.set_handle_gzip(True)
return br.open_novisit(rq, timeout=3 * 60).read()
def download_url(url=None, br=None):
# Get the URL from the Wayback machine
if url is None:
url = sys.argv[-1]
return download_url_from_wayback('nytimes', url, br)
if __name__ == '__main__':
f = sys.argv[-1]
raw = open(f).read()
if f.endswith('.html'):
from calibre.ebooks.BeautifulSoup import BeautifulSoup
soup = BeautifulSoup(raw)
print(extract_html(soup))
else:
print(json_to_html(raw))
| 10,118 | Python | .py | 263 | 30.745247 | 98 | 0.54999 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,806 | natgeo.py | kovidgoyal_calibre/src/calibre/web/site_parsers/natgeo.py | #!/usr/bin/env python
from __future__ import absolute_import, division, print_function, unicode_literals
import json
from pprint import pprint
from calibre import prepare_string_for_xml as escape
from calibre.utils.iso8601 import parse_iso8601
module_version = 1 # needed for live updates
pprint
def extract_json(raw):
s = raw.find("window['__natgeo__']")
script = raw[s : raw.find('</script>', s)]
content = json.loads(script[script.find('{') :].rstrip(';'))['page']['content']
if content.get('prismarticle'):
return content['prismarticle']
if content.get('article'):
return content['article']
def parse_contributors(grp):
for item in grp:
line = '<div class="auth">' + escape(item['title']) + ' '
for c in item['contributors']:
line += escape(c['displayName'])
yield line + '</div>'
def parse_lead_image(media):
if 'image' in media:
yield '<p>'
if 'dsc' in media['image']:
yield (
f'<div><img src="{escape(media["image"]["src"], True)}" '
f'alt="{escape(media["image"]["dsc"], True)}"></div>'
)
else:
yield f'<div><img src="{escape(media["image"]["src"], True)}"></div>'
if 'caption' in media and 'credit' in media:
yield (
'<div class="cap">'
+ media['caption']
+ '<span class="cred"> '
+ media['credit']
+ '</span></div>'
)
elif 'caption' in media:
yield '<div class="cap">' + media['caption'] + '</div>'
yield '</p>'
def parse_inline(inl):
if inl.get('content', {}).get('name', '') == 'Image':
props = inl['content']['props']
yield '<p>'
if 'image' in props:
yield f'<div class="img"><img src="{props["image"]["src"]}"></div>'
if 'caption' in props:
yield (
f'<div class="cap">{props["caption"].get("text", "")}<span '
f'class="cred"> {props["caption"].get("credit", "")}</span></div>'
)
yield '</p>'
if inl.get('content', {}).get('name', '') == 'ImageGroup':
if 'images' in inl['content']['props']:
for imgs in inl['content']['props']['images']:
yield '<p>'
if 'src' in imgs:
yield f'<div class="img"><img src="{imgs["src"]}"></div>'
if 'caption' in imgs:
yield (
f'<div class="cap">{imgs["caption"].get("text", "")}<span '
f'class="cred"> {imgs["caption"].get("credit", "")}</span></div>'
)
yield '</p>'
def parse_cont(content):
for cont in content.get('content', {}):
if isinstance(cont, dict):
yield from parse_body(cont)
if isinstance(cont, str):
yield cont
def parse_body(x):
if isinstance(x, dict):
if 'type' in x:
tag = x['type']
if tag == 'inline':
yield ''.join(parse_inline(x))
elif 'attrs' in x and 'href' in x.get('attrs', ''):
yield '<' + tag + f' href="{x["attrs"]["href"]}">'
yield from parse_cont(x)
yield '</' + tag + '>'
else:
yield '<' + tag + '>'
yield from parse_cont(x)
yield '</' + tag + '>'
elif isinstance(x, list):
for y in x:
if isinstance(y, dict):
yield from parse_body(y)
def parse_bdy(item):
c = item['cntnt']
if item.get('type') == 'inline':
if c.get('cmsType') == 'listicle':
if 'title' in c:
yield '<h3>' + escape(c['title']) + '</h3>'
yield c['text']
elif c.get('cmsType') == 'image':
yield from parse_lead_image(c)
elif c.get('cmsType') == 'imagegroup':
for imgs in c['images']:
yield from parse_lead_image(imgs)
elif c.get('cmsType') == 'pullquote':
if 'quote' in c:
yield '<blockquote>' + c['quote'] + '</blockquote>'
elif c.get('cmsType') == 'editorsNote':
if 'note' in c:
yield '<blockquote>' + c['note'] + '</blockquote>'
else:
if c['mrkup'].strip().startswith('<'):
yield c['mrkup']
else:
yield '<{tag}>{markup}</{tag}>'.format(
tag=item['type'], markup=c['mrkup'])
def parse_article(edg):
sc = edg['schma']
yield '<div class="sub">' + escape(edg['sctn']) + '</div>'
yield '<h1>' + escape(sc['sclTtl']) + '</h1>'
if sc.get('sclDsc'):
yield '<div class="byline">' + escape(sc['sclDsc']) + '</div>'
yield '<p>'
yield from parse_contributors(edg.get('cntrbGrp', {}))
ts = parse_iso8601(edg['mdDt'], as_utc=False).strftime('%B %d, %Y')
yield '<div class="time">Published: ' + escape(ts) + '</div>'
if 'readTime' in edg:
yield '<div class="time">' + escape(edg['readTime']) + '</div>'
yield '</p>'
if edg.get('ldMda', {}).get('cmsType') == 'image':
yield from parse_lead_image(edg['ldMda'])
if edg.get('prismData'):
for main in edg['prismData']['mainComponents']:
if main['name'] == 'Body':
for item in main['props']['body']:
if isinstance(item, dict):
if item.get('type', '') == 'inline':
yield ''.join(parse_inline(item))
elif isinstance(item, list):
for line in item:
yield ''.join(parse_body(line))
elif edg.get('bdy'):
for item in edg['bdy']:
yield from parse_bdy(item)
def article_parse(data):
yield '<html><body>'
for frm in data['frms']:
if not frm:
continue
for mod in frm.get('mods', ()):
for edg in mod.get('edgs', ()):
if edg.get('cmsType') == 'ImmersiveLeadTile':
if 'image' in edg.get('cmsImage', {}):
yield from parse_lead_image(edg['cmsImage'])
if edg.get('cmsType') == 'ArticleBodyTile':
yield from parse_article(edg)
yield '</body></html>'
def extract_html(raw_html):
data = extract_json(raw_html)
return '\n'.join(article_parse(data))
| 6,472 | Python | .py | 159 | 29.805031 | 89 | 0.492926 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,807 | utils.py | kovidgoyal_calibre/src/calibre/web/fetch/utils.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2016, Kovid Goyal <kovid at kovidgoyal.net>
from calibre.utils.img import blend_on_canvas, image_from_data, image_to_data, scale_image
def rescale_image(data, scale_news_images, compress_news_images_max_size, compress_news_images_auto_size):
orig_data = data # save it in case compression fails
img = image_from_data(data)
orig_w, orig_h = img.width(), img.height()
if scale_news_images is not None:
wmax, hmax = scale_news_images
if wmax < orig_w or hmax < orig_h:
orig_w, orig_h, data = scale_image(img, wmax, hmax, compression_quality=95)
if compress_news_images_max_size is None:
if compress_news_images_auto_size is None: # not compressing
return data
maxsizeb = (orig_w * orig_h)/compress_news_images_auto_size
else:
maxsizeb = compress_news_images_max_size * 1024
if len(data) <= maxsizeb: # no compression required
return data
scaled_data = data # save it in case compression fails
quality = 90
while len(data) >= maxsizeb and quality >= 5:
data = image_to_data(image_from_data(scaled_data), compression_quality=quality)
quality -= 5
if len(data) >= len(scaled_data): # compression failed
return orig_data if len(orig_data) <= len(scaled_data) else scaled_data
if len(data) >= len(orig_data): # no improvement
return orig_data
return data
def prepare_masthead_image(path_to_image, out_path, mi_width, mi_height):
with open(path_to_image, 'rb') as f:
img = image_from_data(f.read())
img = blend_on_canvas(img, mi_width, mi_height)
with open(out_path, 'wb') as f:
f.write(image_to_data(img))
if __name__ == '__main__':
import sys
data = sys.stdin.read()
sys.stdout.write(rescale_image(data, (768, 1024), None, 8))
| 1,885 | Python | .py | 39 | 41.974359 | 106 | 0.66503 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,808 | simple.py | kovidgoyal_calibre/src/calibre/web/fetch/simple.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
'''
Fetch a webpage and its links recursively. The webpages are saved to disk in
UTF-8 encoding with any charset declarations removed.
'''
import os
import re
import socket
import sys
import threading
import time
import traceback
from base64 import standard_b64decode
from urllib.request import urlopen
from calibre import browser, relpath, unicode_path
from calibre.constants import filesystem_encoding, iswindows
from calibre.ebooks.BeautifulSoup import BeautifulSoup
from calibre.ebooks.chardet import xml_to_unicode
from calibre.utils.config import OptionParser
from calibre.utils.filenames import ascii_filename
from calibre.utils.imghdr import what
from calibre.utils.localization import _
from calibre.utils.logging import Log
from calibre.web.fetch.utils import rescale_image
from polyglot.http_client import responses
from polyglot.urllib import URLError, quote, url2pathname, urljoin, urlparse, urlsplit, urlunparse, urlunsplit
class AbortArticle(Exception):
pass
class FetchError(Exception):
pass
class closing:
'Context to automatically close something at the end of a block.'
def __init__(self, thing):
self.thing = thing
def __enter__(self):
return self.thing
def __exit__(self, *exc_info):
try:
self.thing.close()
except Exception:
pass
def canonicalize_url(url):
# mechanize does not handle quoting automatically
if re.search(r'\s+', url) is not None:
purl = list(urlparse(url))
for i in range(2, 6):
purl[i] = quote(purl[i])
url = urlunparse(purl)
return url
bad_url_counter = 0
def basename(url):
try:
parts = urlsplit(url)
path = url2pathname(parts.path)
res = os.path.basename(path)
except:
global bad_url_counter
bad_url_counter += 1
return 'bad_url_%d.html'%bad_url_counter
if not os.path.splitext(res)[1]:
return 'index.html'
return res
def save_soup(soup, target):
for meta in soup.findAll('meta', content=True):
if 'charset' in meta['content'].lower():
meta.extract()
for meta in soup.findAll('meta', charset=True):
meta.extract()
head = soup.find('head')
if head is not None:
nm = soup.new_tag('meta', charset='utf-8')
head.insert(0, nm)
selfdir = os.path.dirname(target)
for tag in soup.findAll(['img', 'link', 'a']):
for key in ('src', 'href'):
path = tag.get(key, None)
if path and os.path.isfile(path) and os.path.exists(path) and os.path.isabs(path):
tag[key] = unicode_path(relpath(path, selfdir).replace(os.sep, '/'))
html = str(soup)
with open(target, 'wb') as f:
f.write(html.encode('utf-8'))
class response(bytes):
def __new__(cls, *args):
obj = super().__new__(cls, *args)
obj.newurl = None
return obj
def default_is_link_wanted(url, tag):
raise NotImplementedError()
class RecursiveFetcher:
LINK_FILTER = tuple(re.compile(i, re.IGNORECASE) for i in
('.exe\\s*$', '.mp3\\s*$', '.ogg\\s*$', '^\\s*mailto:', '^\\s*$'))
# ADBLOCK_FILTER = tuple(re.compile(i, re.IGNORECASE) for it in
# (
#
# )
# )
CSS_IMPORT_PATTERN = re.compile(r'\@import\s+url\((.*?)\)', re.IGNORECASE)
default_timeout = socket.getdefaulttimeout() # Needed here as it is used in __del__
def __init__(self, options, log, image_map={}, css_map={}, job_info=None):
bd = options.dir
if not isinstance(bd, str):
bd = bd.decode(filesystem_encoding)
self.base_dir = os.path.abspath(os.path.expanduser(bd))
if not os.path.exists(self.base_dir):
os.makedirs(self.base_dir)
self.log = log
self.verbose = options.verbose
self.timeout = options.timeout
self.encoding = options.encoding
self.browser = options.browser if hasattr(options, 'browser') else browser()
self.max_recursions = options.max_recursions
self.match_regexps = [re.compile(i, re.IGNORECASE) for i in options.match_regexps]
self.filter_regexps = [re.compile(i, re.IGNORECASE) for i in options.filter_regexps]
self.max_files = options.max_files
self.delay = options.delay
self.last_fetch_at = 0.
self.filemap = {}
self.imagemap = image_map
self.imagemap_lock = threading.RLock()
self.stylemap = css_map
self.image_url_processor = None
self.stylemap_lock = threading.RLock()
self.downloaded_paths = []
self.current_dir = self.base_dir
self.files = 0
self.preprocess_regexps = getattr(options, 'preprocess_regexps', [])
self.remove_tags = getattr(options, 'remove_tags', [])
self.remove_tags_after = getattr(options, 'remove_tags_after', None)
self.remove_tags_before = getattr(options, 'remove_tags_before', None)
self.keep_only_tags = getattr(options, 'keep_only_tags', [])
self.preprocess_html_ext = getattr(options, 'preprocess_html', lambda soup: soup)
self.preprocess_raw_html = getattr(options, 'preprocess_raw_html',
lambda raw, url: raw)
self.prepreprocess_html_ext = getattr(options, 'skip_ad_pages', lambda soup: None)
self.postprocess_html_ext = getattr(options, 'postprocess_html', None)
self.preprocess_image_ext = getattr(options, 'preprocess_image', None)
self._is_link_wanted = getattr(options, 'is_link_wanted',
default_is_link_wanted)
self.compress_news_images_max_size = getattr(options, 'compress_news_images_max_size', None)
self.compress_news_images = getattr(options, 'compress_news_images', False)
self.compress_news_images_auto_size = getattr(options, 'compress_news_images_auto_size', 16)
self.scale_news_images = getattr(options, 'scale_news_images', None)
self.get_delay = getattr(options, 'get_delay', lambda url: self.delay)
self.download_stylesheets = not options.no_stylesheets
self.show_progress = True
self.failed_links = []
self.job_info = job_info
self.preloaded_urls = {}
def get_soup(self, src, url=None):
nmassage = []
nmassage.extend(self.preprocess_regexps)
# Remove comments as they can leave detritus when extracting tags leaves
# multiple nested comments
nmassage.append((re.compile(r'<!--.*?-->', re.DOTALL), lambda m: ''))
usrc = xml_to_unicode(src, self.verbose, strip_encoding_pats=True)[0]
usrc = self.preprocess_raw_html(usrc, url)
for pat, repl in nmassage:
usrc = pat.sub(repl, usrc)
soup = BeautifulSoup(usrc)
replace = self.prepreprocess_html_ext(soup)
if replace is not None:
replace = xml_to_unicode(replace, self.verbose, strip_encoding_pats=True)[0]
for pat, repl in nmassage:
replace = pat.sub(repl, replace)
soup = BeautifulSoup(replace)
if self.keep_only_tags:
body = soup.new_tag('body')
try:
if isinstance(self.keep_only_tags, dict):
self.keep_only_tags = [self.keep_only_tags]
for spec in self.keep_only_tags:
for tag in soup.find('body').findAll(**spec):
body.insert(len(body.contents), tag)
soup.find('body').replaceWith(body)
except AttributeError: # soup has no body element
pass
def remove_beyond(tag, next):
while tag is not None and getattr(tag, 'name', None) != 'body':
after = getattr(tag, next)
while after is not None:
ns = getattr(tag, next)
after.extract()
after = ns
tag = tag.parent
if self.remove_tags_after is not None:
rt = [self.remove_tags_after] if isinstance(self.remove_tags_after, dict) else self.remove_tags_after
for spec in rt:
tag = soup.find(**spec)
remove_beyond(tag, 'nextSibling')
if self.remove_tags_before is not None:
rt = [self.remove_tags_before] if isinstance(self.remove_tags_before, dict) else self.remove_tags_before
for spec in rt:
tag = soup.find(**spec)
remove_beyond(tag, 'previousSibling')
for kwds in self.remove_tags:
for tag in soup.findAll(**kwds):
tag.extract()
return self.preprocess_html_ext(soup)
def fetch_url(self, url):
data = None
q = self.preloaded_urls.pop(url, None)
if q is not None:
ans = response(q)
ans.newurl = url
return ans
st = time.monotonic()
is_data_url = url.startswith('data:')
if not is_data_url:
self.log.debug('Fetching', url)
# Check for a URL pointing to the local filesystem and special case it
# for efficiency and robustness. Bypasses delay checking as it does not
# apply to local fetches. Ensures that unicode paths that are not
# representable in the filesystem_encoding work.
if is_data_url:
payload = url.partition(',')[2]
return standard_b64decode(payload)
is_local = 0
if url.startswith('file://'):
is_local = 7
elif url.startswith('file:'):
is_local = 5
if is_local > 0:
url = url[is_local:]
if iswindows and url.startswith('/'):
url = url[1:]
with open(url, 'rb') as f:
data = response(f.read())
data.newurl = 'file:'+url # This is what mechanize does for
# local URLs
self.log.debug(f'Fetched {url} in {time.monotonic() - st:.1f} seconds')
return data
delta = time.monotonic() - self.last_fetch_at
delay = self.get_delay(url)
if delta < delay:
time.sleep(delay - delta)
url = canonicalize_url(url)
open_func = getattr(self.browser, 'open_novisit', self.browser.open)
try:
with closing(open_func(url, timeout=self.timeout)) as f:
data = response(f.read()+f.read())
data.newurl = f.geturl()
except URLError as err:
if hasattr(err, 'code') and err.code in responses:
raise FetchError(responses[err.code])
is_temp = getattr(err, 'worth_retry', False)
reason = getattr(err, 'reason', None)
if isinstance(reason, socket.gaierror):
# see man gai_strerror() for details
if getattr(reason, 'errno', None) in (socket.EAI_AGAIN, socket.EAI_NONAME):
is_temp = True
if is_temp: # Connection reset by peer or Name or service not known
self.log.debug('Temporary error, retrying in 1 second')
time.sleep(1)
with closing(open_func(url, timeout=self.timeout)) as f:
data = response(f.read()+f.read())
data.newurl = f.geturl()
else:
raise err
finally:
self.last_fetch_at = time.monotonic()
self.log.debug(f'Fetched {url} in {time.monotonic() - st:f} seconds')
return data
def start_fetch(self, url):
soup = BeautifulSoup('<a href="'+url+'" />')
res = self.process_links(soup, url, 0, into_dir='')
self.log.debug(url, 'saved to', res)
return res
def is_link_ok(self, url):
for i in self.__class__.LINK_FILTER:
if i.search(url):
return False
return True
def is_link_wanted(self, url, tag):
try:
return self._is_link_wanted(url, tag)
except NotImplementedError:
pass
except:
return False
if self.filter_regexps:
for f in self.filter_regexps:
if f.search(url):
return False
if self.match_regexps:
for m in self.match_regexps:
if m.search(url):
return True
return False
return True
def process_stylesheets(self, soup, baseurl):
diskpath = unicode_path(os.path.join(self.current_dir, 'stylesheets'))
if not os.path.exists(diskpath):
os.mkdir(diskpath)
for c, tag in enumerate(soup.findAll(name=['link', 'style'])):
try:
mtype = tag['type']
except KeyError:
mtype = 'text/css' if tag.name.lower() == 'style' else ''
if mtype.lower() != 'text/css':
continue
if tag.has_attr('href'):
iurl = tag['href']
if not urlsplit(iurl).scheme:
iurl = urljoin(baseurl, iurl, False)
found_cached = False
with self.stylemap_lock:
if iurl in self.stylemap:
tag['href'] = self.stylemap[iurl]
found_cached = True
if found_cached:
continue
try:
data = self.fetch_url(iurl)
except Exception:
self.log.exception('Could not fetch stylesheet ', iurl)
continue
stylepath = os.path.join(diskpath, 'style'+str(c)+'.css')
with self.stylemap_lock:
self.stylemap[iurl] = stylepath
with open(stylepath, 'wb') as x:
x.write(data)
tag['href'] = stylepath
else:
for ns in tag.findAll(text=True):
src = str(ns)
m = self.__class__.CSS_IMPORT_PATTERN.search(src)
if m:
iurl = m.group(1)
if not urlsplit(iurl).scheme:
iurl = urljoin(baseurl, iurl, False)
found_cached = False
with self.stylemap_lock:
if iurl in self.stylemap:
ns.replaceWith(src.replace(m.group(1), self.stylemap[iurl]))
found_cached = True
if found_cached:
continue
try:
data = self.fetch_url(iurl)
except Exception:
self.log.exception('Could not fetch stylesheet ', iurl)
continue
c += 1
stylepath = os.path.join(diskpath, 'style'+str(c)+'.css')
with self.stylemap_lock:
self.stylemap[iurl] = stylepath
with open(stylepath, 'wb') as x:
x.write(data)
ns.replaceWith(src.replace(m.group(1), stylepath))
def rescale_image(self, data):
return rescale_image(data, self.scale_news_images, self.compress_news_images_max_size, self.compress_news_images_auto_size)
def process_images(self, soup, baseurl):
diskpath = unicode_path(os.path.join(self.current_dir, 'images'))
if not os.path.exists(diskpath):
os.mkdir(diskpath)
c = 0
for tag in soup.findAll('img', src=True):
iurl = tag['src']
if iurl.startswith('data:'):
try:
data = urlopen(iurl).read()
except Exception:
self.log.exception('Failed to decode embedded image')
continue
else:
if callable(self.image_url_processor):
iurl = self.image_url_processor(baseurl, iurl)
if not iurl:
continue
if not urlsplit(iurl).scheme:
iurl = urljoin(baseurl, iurl, False)
found_in_cache = False
with self.imagemap_lock:
if iurl in self.imagemap:
tag['src'] = self.imagemap[iurl]
found_in_cache = True
if found_in_cache:
continue
try:
data = self.fetch_url(iurl)
if data == b'GIF89a\x01':
# Skip empty GIF files as PIL errors on them anyway
continue
except Exception:
self.log.exception('Could not fetch image ', iurl)
continue
c += 1
fname = ascii_filename('img'+str(c))
data = self.preprocess_image_ext(data, iurl) if self.preprocess_image_ext is not None else data
if data is None:
continue
itype = what(None, data)
if itype == 'svg' or (itype is None and b'<svg' in data[:1024]):
# SVG image
imgpath = os.path.join(diskpath, fname+'.svg')
with self.imagemap_lock:
self.imagemap[iurl] = imgpath
with open(imgpath, 'wb') as x:
x.write(data)
tag['src'] = imgpath
else:
from calibre.utils.img import image_from_data, image_to_data
try:
# Ensure image is valid
img = image_from_data(data)
if itype not in {'png', 'jpg', 'jpeg'}:
itype = 'png' if itype == 'gif' else 'jpeg'
data = image_to_data(img, fmt=itype)
if self.compress_news_images and itype in {'jpg','jpeg'}:
try:
data = self.rescale_image(data)
except Exception:
self.log.exception('failed to compress image '+iurl)
# Moon+ apparently cannot handle .jpeg files
if itype == 'jpeg':
itype = 'jpg'
imgpath = os.path.join(diskpath, fname+'.'+itype)
with self.imagemap_lock:
self.imagemap[iurl] = imgpath
with open(imgpath, 'wb') as x:
x.write(data)
tag['src'] = imgpath
except Exception:
traceback.print_exc()
continue
def absurl(self, baseurl, tag, key, filter=True):
iurl = tag[key]
parts = urlsplit(iurl)
if not parts.netloc and not parts.path and not parts.query:
return None
if not parts.scheme:
iurl = urljoin(baseurl, iurl, False)
if not self.is_link_ok(iurl):
self.log.debug('Skipping invalid link:', iurl)
return None
if filter and not self.is_link_wanted(iurl, tag):
self.log.debug('Filtered link: '+iurl)
return None
return iurl
def normurl(self, url):
parts = list(urlsplit(url))
parts[4] = ''
return urlunsplit(parts)
def localize_link(self, tag, key, path):
parts = urlsplit(tag[key])
suffix = ('#'+parts.fragment) if parts.fragment else ''
tag[key] = path+suffix
def process_return_links(self, soup, baseurl):
for tag in soup.findAll('a', href=True):
iurl = self.absurl(baseurl, tag, 'href')
if not iurl:
continue
nurl = self.normurl(iurl)
if nurl in self.filemap:
self.localize_link(tag, 'href', self.filemap[nurl])
def process_links(self, soup, baseurl, recursion_level, into_dir='links'):
res = ''
diskpath = os.path.join(self.current_dir, into_dir)
if not os.path.exists(diskpath):
os.mkdir(diskpath)
prev_dir = self.current_dir
try:
self.current_dir = diskpath
tags = list(soup.findAll('a', href=True))
for c, tag in enumerate(tags):
if self.show_progress:
print('.', end=' ')
sys.stdout.flush()
sys.stdout.flush()
iurl = self.absurl(baseurl, tag, 'href', filter=recursion_level != 0)
if not iurl:
continue
nurl = self.normurl(iurl)
if nurl in self.filemap:
self.localize_link(tag, 'href', self.filemap[nurl])
continue
if self.files > self.max_files:
return res
linkdir = 'link'+str(c) if into_dir else ''
linkdiskpath = os.path.join(diskpath, linkdir)
if not os.path.exists(linkdiskpath):
os.mkdir(linkdiskpath)
try:
self.current_dir = linkdiskpath
dsrc = self.fetch_url(iurl)
newbaseurl = dsrc.newurl
if len(dsrc) == 0 or \
len(re.compile(b'<!--.*?-->', re.DOTALL).sub(b'', dsrc).strip()) == 0:
raise ValueError('No content at URL %r'%iurl)
if callable(self.encoding):
dsrc = self.encoding(dsrc)
elif self.encoding is not None:
dsrc = dsrc.decode(self.encoding, 'replace')
else:
dsrc = xml_to_unicode(dsrc, self.verbose)[0]
st = time.monotonic()
soup = self.get_soup(dsrc, url=iurl)
self.log.debug(f'Parsed {iurl} in {time.monotonic() - st:.1f} seconds')
base = soup.find('base', href=True)
if base is not None:
newbaseurl = base['href']
self.log.debug('Processing images...')
self.process_images(soup, newbaseurl)
if self.download_stylesheets:
self.process_stylesheets(soup, newbaseurl)
_fname = basename(iurl)
if not isinstance(_fname, str):
_fname.decode('latin1', 'replace')
_fname = _fname.replace('%', '').replace(os.sep, '')
_fname = ascii_filename(_fname)
_fname = os.path.splitext(_fname)[0][:120] + '.xhtml'
res = os.path.join(linkdiskpath, _fname)
self.downloaded_paths.append(res)
self.filemap[nurl] = res
if recursion_level < self.max_recursions:
self.log.debug('Processing links...')
self.process_links(soup, newbaseurl, recursion_level+1)
else:
self.process_return_links(soup, newbaseurl)
self.log.debug('Recursion limit reached. Skipping links in', iurl)
if newbaseurl and not newbaseurl.startswith('/'):
for atag in soup.findAll('a', href=lambda x: x and x.startswith('/')):
atag['href'] = urljoin(newbaseurl, atag['href'], True)
if callable(self.postprocess_html_ext):
soup = self.postprocess_html_ext(soup,
c==0 and recursion_level==0 and not getattr(self, 'called_first', False),
self.job_info)
if c==0 and recursion_level == 0:
self.called_first = True
save_soup(soup, res)
self.localize_link(tag, 'href', res)
except Exception as err:
if isinstance(err, AbortArticle):
raise
self.failed_links.append((iurl, traceback.format_exc()))
self.log.exception('Could not fetch link', iurl)
finally:
self.current_dir = diskpath
self.files += 1
finally:
self.current_dir = prev_dir
if self.show_progress:
print()
return res
def option_parser(usage=_('%prog URL\n\nWhere URL is for example https://google.com')):
parser = OptionParser(usage=usage)
parser.add_option('-d', '--base-dir',
help=_('Base folder into which URL is saved. Default is %default'),
default='.', type='string', dest='dir')
parser.add_option('-t', '--timeout',
help=_('Timeout in seconds to wait for a response from the server. Default: %default s'),
default=10.0, type='float', dest='timeout')
parser.add_option('-r', '--max-recursions', default=1,
help=_('Maximum number of levels to recurse i.e. depth of links to follow. Default %default'),
type='int', dest='max_recursions')
parser.add_option('-n', '--max-files', default=sys.maxsize, type='int', dest='max_files',
help=_('The maximum number of files to download. This only applies to files from <a href> tags. Default is %default'))
parser.add_option('--delay', default=0, dest='delay', type='float',
help=_('Minimum interval in seconds between consecutive fetches. Default is %default s'))
parser.add_option('--encoding', default=None,
help=_('The character encoding for the websites you are trying to download. The default is to try and guess the encoding.'))
parser.add_option('--match-regexp', default=[], action='append', dest='match_regexps',
help=_('Only links that match this regular expression will be followed. '
'This option can be specified multiple times, in which case as long '
'as a link matches any one regexp, it will be followed. By default all '
'links are followed.'))
parser.add_option('--filter-regexp', default=[], action='append', dest='filter_regexps',
help=_('Any link that matches this regular expression will be ignored.'
' This option can be specified multiple times, in which case as'
' long as any regexp matches a link, it will be ignored. By'
' default, no links are ignored. If both filter regexp and match'
' regexp are specified, then filter regexp is applied first.'))
parser.add_option('--dont-download-stylesheets', action='store_true', default=False,
help=_('Do not download CSS stylesheets.'), dest='no_stylesheets')
parser.add_option('--verbose', help=_('Show detailed output information. Useful for debugging'),
default=False, action='store_true', dest='verbose')
return parser
def create_fetcher(options, image_map={}, log=None):
if log is None:
log = Log(level=Log.DEBUG) if options.verbose else Log()
return RecursiveFetcher(options, log, image_map={})
def main(args=sys.argv):
parser = option_parser()
options, args = parser.parse_args(args)
if len(args) != 2:
parser.print_help()
return 1
fetcher = create_fetcher(options)
fetcher.start_fetch(args[1])
if __name__ == '__main__':
sys.exit(main())
| 28,019 | Python | .py | 593 | 33.418212 | 146 | 0.543048 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,809 | __init__.py | kovidgoyal_calibre/src/calibre/web/fetch/__init__.py | __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
| 87 | Python | .py | 2 | 42.5 | 61 | 0.647059 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,810 | templates.py | kovidgoyal_calibre/src/calibre/web/feeds/templates.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
import copy
from lxml import etree, html
from lxml.html.builder import BODY, BR, DIV, H2, H3, HEAD, HR, HTML, IMG, LI, SPAN, STRONG, STYLE, TABLE, TD, TITLE, TR, UL, A
from lxml.html.builder import P as PT
from calibre import isbytestring, strftime
from calibre.utils.localization import _
def attrs(*args, **kw):
rescale = kw.pop('rescale', None)
if rescale is not None:
kw['data-calibre-rescale'] = str(rescale)
if args:
kw['class'] = ' '.join(args)
return kw
# Regular templates
class Template:
IS_HTML = True
def __init__(self, lang=None):
self.html_lang = lang
def generate(self, *args, **kwargs):
if 'style' not in kwargs:
kwargs['style'] = ''
for key in kwargs.keys():
if isbytestring(kwargs[key]):
kwargs[key] = kwargs[key].decode('utf-8', 'replace')
if kwargs[key] is None:
kwargs[key] = ''
args = list(args)
for i in range(len(args)):
if isbytestring(args[i]):
args[i] = args[i].decode('utf-8', 'replace')
if args[i] is None:
args[i] = ''
self._generate(*args, **kwargs)
return self
def render(self, *args, **kwargs):
if self.IS_HTML:
return html.tostring(self.root, encoding='utf-8',
include_meta_content_type=True, pretty_print=True)
return etree.tostring(self.root, encoding='utf-8', xml_declaration=True,
pretty_print=True)
class EmbeddedContent(Template):
def _generate(self, article, style=None, extra_css=None):
content = article.content if article.content else ''
summary = article.summary if article.summary else ''
text = content if len(content) > len(summary) else summary
head = HEAD(TITLE(article.title))
if style:
head.append(STYLE(style, type='text/css'))
if extra_css:
head.append(STYLE(extra_css, type='text/css'))
if isbytestring(text):
text = text.decode('utf-8', 'replace')
elements = html.fragments_fromstring(text)
self.root = HTML(head,
BODY(H2(article.title), DIV()))
div = self.root.find('body').find('div')
if elements and isinstance(elements[0], str):
div.text = elements[0]
elements = list(elements)[1:]
for elem in elements:
if hasattr(elem, 'getparent'):
elem.getparent().remove(elem)
else:
elem = SPAN(elem)
div.append(elem)
class IndexTemplate(Template):
def _generate(self, title, masthead, datefmt, feeds, extra_css=None, style=None):
self.IS_HTML = False
date = strftime(datefmt)
head = HEAD(TITLE(title))
if style:
head.append(STYLE(style, type='text/css'))
if extra_css:
head.append(STYLE(extra_css, type='text/css'))
ul = UL(attrs('calibre_feed_list'))
for i, feed in enumerate(feeds):
if len(feed):
li = LI(A(feed.title, attrs('feed', rescale=120,
href='feed_%d/index.html'%i)), id='feed_%d'%i)
ul.append(li)
div = DIV(
PT(IMG(src=masthead,alt="masthead"),style='text-align:center'),
PT(date, style='text-align:right'),
ul,
attrs(rescale=100))
self.root = HTML(head, BODY(div))
if self.html_lang:
self.root.set('lang', self.html_lang)
class FeedTemplate(Template):
def get_navbar(self, f, feeds, top=True):
if len(feeds) < 2:
return DIV()
navbar = DIV('| ', attrs('calibre_navbar', rescale=70,
style='text-align:center'))
if not top:
hr = HR()
navbar.append(hr)
navbar.text = None
hr.tail = '| '
if f+1 < len(feeds):
link = A(_('Next section'), href='../feed_%d/index.html'%(f+1))
link.tail = ' | '
navbar.append(link)
link = A(_('Main menu'), href="../index.html")
link.tail = ' | '
navbar.append(link)
if f > 0:
link = A(_('Previous section'), href='../feed_%d/index.html'%(f-1))
link.tail = ' |'
navbar.append(link)
if top:
navbar.append(HR())
return navbar
def _generate(self, f, feeds, cutoff, extra_css=None, style=None):
from calibre.utils.cleantext import clean_xml_chars
feed = feeds[f]
head = HEAD(TITLE(feed.title))
if style:
head.append(STYLE(style, type='text/css'))
if extra_css:
head.append(STYLE(extra_css, type='text/css'))
body = BODY()
body.append(self.get_navbar(f, feeds))
div = DIV(
H2(feed.title,
attrs('calibre_feed_title', rescale=160)),
attrs(rescale=100)
)
body.append(div)
if getattr(feed, 'image', None):
div.append(DIV(IMG(
alt=feed.image_alt if feed.image_alt else '',
src=feed.image_url
),
attrs('calibre_feed_image')))
if getattr(feed, 'description', None):
d = DIV(clean_xml_chars(feed.description), attrs('calibre_feed_description', rescale=80))
d.append(BR())
div.append(d)
ul = UL(attrs('calibre_article_list'))
for i, article in enumerate(feed.articles):
if not getattr(article, 'downloaded', False):
continue
li = LI(
A(article.title, attrs('article', rescale=120,
href=article.url)),
SPAN(article.formatted_date, attrs('article_date')),
attrs(rescale=100, id='article_%d'%i,
style='padding-bottom:0.5em')
)
if article.summary:
li.append(DIV(clean_xml_chars(cutoff(article.text_summary)),
attrs('article_description', rescale=70)))
ul.append(li)
div.append(ul)
div.append(self.get_navbar(f, feeds, top=False))
self.root = HTML(head, body)
if self.html_lang:
self.root.set('lang', self.html_lang)
class NavBarTemplate(Template):
def _generate(self, bottom, feed, art, number_of_articles_in_feed,
two_levels, url, __appname__, prefix='', center=True,
extra_css=None, style=None):
head = HEAD(TITLE('navbar'))
if style:
head.append(STYLE(style, type='text/css'))
if extra_css:
head.append(STYLE(extra_css, type='text/css'))
if prefix and not prefix.endswith('/'):
prefix += '/'
align = 'center' if center else 'left'
navbar = DIV(attrs('calibre_navbar', rescale=70,
style='text-align:'+align))
if bottom:
if not url.startswith('file://'):
navbar.append(HR())
text = 'This article was downloaded by '
p = PT(text, STRONG(__appname__), A(url, href=url, rel='calibre-downloaded-from'),
style='text-align:left; max-width: 100%; overflow: hidden;')
p[0].tail = ' from '
navbar.append(p)
navbar.append(BR())
navbar.append(BR())
else:
next_art = 'feed_%d'%(feed+1) if art == number_of_articles_in_feed - 1 \
else 'article_%d'%(art+1)
up = '../..' if art == number_of_articles_in_feed - 1 else '..'
href = '%s%s/%s/index.html'%(prefix, up, next_art)
navbar.text = '| '
navbar.append(A(_('Next'), href=href))
href = '%s../index.html#article_%d'%(prefix, art)
next(navbar.iterchildren(reversed=True)).tail = ' | '
navbar.append(A(_('Section menu'), href=href))
href = '%s../../index.html#feed_%d'%(prefix, feed)
next(navbar.iterchildren(reversed=True)).tail = ' | '
navbar.append(A(_('Main menu'), href=href))
if art > 0 and not bottom:
href = '%s../article_%d/index.html'%(prefix, art-1)
next(navbar.iterchildren(reversed=True)).tail = ' | '
navbar.append(A(_('Previous'), href=href))
next(navbar.iterchildren(reversed=True)).tail = ' | '
if not bottom:
navbar.append(HR())
self.root = HTML(head, BODY(navbar))
# Touchscreen templates
class TouchscreenIndexTemplate(Template):
def _generate(self, title, masthead, datefmt, feeds, extra_css=None, style=None):
self.IS_HTML = False
date = '{}, {} {}, {}'.format(strftime('%A'), strftime('%B'), strftime('%d').lstrip('0'), strftime('%Y'))
masthead_p = etree.Element("p")
masthead_p.set("style","text-align:center")
masthead_img = etree.Element("img")
masthead_img.set("src",masthead)
masthead_img.set("alt","masthead")
masthead_p.append(masthead_img)
head = HEAD(TITLE(title))
if style:
head.append(STYLE(style, type='text/css'))
if extra_css:
head.append(STYLE(extra_css, type='text/css'))
toc = TABLE(attrs('toc'),width="100%",border="0",cellpadding="3px")
for i, feed in enumerate(feeds):
if len(feed):
tr = TR()
tr.append(TD(attrs(rescale=120), A(feed.title, href='feed_%d/index.html'%i)))
tr.append(TD('%s' % len(feed.articles), style="text-align:right"))
toc.append(tr)
div = DIV(
masthead_p,
H3(attrs('publish_date'),date),
DIV(attrs('divider')),
toc)
self.root = HTML(head, BODY(div))
if self.html_lang:
self.root.set('lang', self.html_lang)
class TouchscreenFeedTemplate(Template):
def _generate(self, f, feeds, cutoff, extra_css=None, style=None):
from calibre.utils.cleantext import clean_xml_chars
def trim_title(title,clip=18):
if len(title)>clip:
tokens = title.split(' ')
new_title_tokens = []
new_title_len = 0
if len(tokens[0]) > clip:
return tokens[0][:clip] + '...'
for token in tokens:
if len(token) + new_title_len < clip:
new_title_tokens.append(token)
new_title_len += len(token)
else:
new_title_tokens.append('...')
title = ' '.join(new_title_tokens)
break
return title
self.IS_HTML = False
feed = feeds[f]
# Construct the navbar
navbar_t = TABLE(attrs('touchscreen_navbar'))
navbar_tr = TR()
# Previous Section
link = ''
if f > 0:
link = A(attrs('feed_link'),
trim_title(feeds[f-1].title),
href='../feed_%d/index.html' % int(f-1))
navbar_tr.append(TD(attrs('feed_prev'),link))
# Up to Sections
link = A(_('Sections'), href="../index.html")
navbar_tr.append(TD(attrs('feed_up'),link))
# Next Section
link = ''
if f < len(feeds)-1:
link = A(attrs('feed_link'),
trim_title(feeds[f+1].title),
href='../feed_%d/index.html' % int(f+1))
navbar_tr.append(TD(attrs('feed_next'),link))
navbar_t.append(navbar_tr)
top_navbar = navbar_t
bottom_navbar = copy.copy(navbar_t)
# print "\n%s\n" % etree.tostring(navbar_t, pretty_print=True)
# Build the page
head = HEAD(TITLE(feed.title))
if style:
head.append(STYLE(style, type='text/css'))
if extra_css:
head.append(STYLE(extra_css, type='text/css'))
body = BODY()
div = DIV(
top_navbar,
H2(feed.title, attrs('feed_title'))
)
body.append(div)
if getattr(feed, 'image', None):
div.append(DIV(IMG(
alt=feed.image_alt if feed.image_alt else '',
src=feed.image_url
),
attrs('calibre_feed_image')))
if getattr(feed, 'description', None):
d = DIV(clean_xml_chars(feed.description), attrs('calibre_feed_description', rescale=80))
d.append(BR())
div.append(d)
for i, article in enumerate(feed.articles):
if not getattr(article, 'downloaded', False):
continue
div_td = DIV(attrs('article_summary'),
A(article.title, attrs('summary_headline',rescale=120,
href=article.url)))
if article.author:
div_td.append(DIV(article.author,
attrs('summary_byline', rescale=100)))
if article.summary:
div_td.append(DIV(cutoff(article.text_summary),
attrs('summary_text', rescale=100)))
div.append(div_td)
div.append(bottom_navbar)
self.root = HTML(head, body)
if self.html_lang:
self.root.set('lang', self.html_lang)
class TouchscreenNavBarTemplate(Template):
def _generate(self, bottom, feed, art, number_of_articles_in_feed,
two_levels, url, __appname__, prefix='', center=True,
extra_css=None, style=None):
head = HEAD(TITLE('navbar'))
if style:
head.append(STYLE(style, type='text/css'))
if extra_css:
head.append(STYLE(extra_css, type='text/css'))
navbar = DIV()
navbar_t = TABLE(attrs('touchscreen_navbar'))
navbar_tr = TR()
if bottom and not url.startswith('file://'):
navbar.append(HR())
text = 'This article was downloaded by '
p = PT(text, STRONG(__appname__), A(url, href=url, rel='calibre-downloaded-from'),
style='text-align:left; max-width: 100%; overflow: hidden;')
p[0].tail = ' from '
navbar.append(p)
navbar.append(BR())
# | Previous
if art > 0:
link = A(attrs('article_link'),_('Previous'),href='%s../article_%d/index.html'%(prefix, art-1))
navbar_tr.append(TD(attrs('article_prev'),link))
else:
navbar_tr.append(TD(attrs('article_prev'),''))
# | Articles | Sections |
link = A(attrs('articles_link'),_('Articles'), href='%s../index.html#article_%d'%(prefix, art))
navbar_tr.append(TD(attrs('article_articles_list'),link))
link = A(attrs('sections_link'),_('Sections'), href='%s../../index.html#feed_%d'%(prefix, feed))
navbar_tr.append(TD(attrs('article_sections_list'),link))
# | Next
next_art = 'feed_%d'%(feed+1) if art == number_of_articles_in_feed - 1 \
else 'article_%d'%(art+1)
up = '../..' if art == number_of_articles_in_feed - 1 else '..'
link = A(attrs('article_link'), _('Next'), href='%s%s/%s/index.html'%(prefix, up, next_art))
navbar_tr.append(TD(attrs('article_next'),link))
navbar_t.append(navbar_tr)
navbar.append(navbar_t)
# print "\n%s\n" % etree.tostring(navbar, pretty_print=True)
self.root = HTML(head, BODY(navbar))
| 15,799 | Python | .py | 363 | 31.584022 | 126 | 0.534903 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,811 | __init__.py | kovidgoyal_calibre/src/calibre/web/feeds/__init__.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
'''
Contains the logic for parsing feeds.
'''
import copy
import re
import time
import traceback
from builtins import _
from calibre import force_unicode, replace_entities, strftime
from calibre.utils.cleantext import clean_ascii_chars, clean_xml_chars
from calibre.utils.date import dt_factory, local_tz, utcnow
from calibre.utils.logging import default_log
from polyglot.builtins import string_or_bytes
class Article:
def __init__(self, id, title, url, author, summary, published, content):
from lxml import html
self.downloaded = False
self.id = id
if not title or not isinstance(title, string_or_bytes):
title = _('Unknown')
title = force_unicode(title, 'utf-8')
self._title = clean_xml_chars(title).strip()
try:
self._title = replace_entities(self._title)
except Exception:
pass
self._title = clean_ascii_chars(self._title)
self.url = url
self.author = author
self.toc_thumbnail = None
self.internal_toc_entries = ()
if author and not isinstance(author, str):
author = author.decode('utf-8', 'replace')
if summary and not isinstance(summary, str):
summary = summary.decode('utf-8', 'replace')
summary = clean_xml_chars(summary) if summary else summary
self.summary = summary
if summary and '<' in summary:
try:
s = html.fragment_fromstring(summary, create_parent=True)
summary = html.tostring(s, method='text', encoding='unicode')
except:
print('Failed to process article summary, deleting:')
print(summary.encode('utf-8'))
traceback.print_exc()
summary = ''
self.text_summary = clean_ascii_chars(summary)
self.author = author
self.content = content
self.date = published
self.utctime = dt_factory(self.date, assume_utc=True, as_utc=True)
self.localtime = self.utctime.astimezone(local_tz)
self._formatted_date = None
@property
def formatted_date(self):
if self._formatted_date is None:
self._formatted_date = strftime(" [%a, %d %b %H:%M]",
t=self.localtime.timetuple())
return self._formatted_date
@formatted_date.setter
def formatted_date(self, val):
if isinstance(val, str):
self._formatted_date = val
@property
def title(self):
t = self._title
if not isinstance(t, str) and hasattr(t, 'decode'):
t = t.decode('utf-8', 'replace')
return t
@title.setter
def title(self, val):
self._title = clean_ascii_chars(val)
def __repr__(self):
return \
('''\
Title : %s
URL : %s
Author : %s
Summary : %s
Date : %s
TOC thumb : %s
Has content : %s
'''%(self.title, self.url, self.author, self.summary[:20]+'...',
self.localtime.strftime('%a, %d %b, %Y %H:%M'), self.toc_thumbnail,
bool(self.content)))
def __str__(self):
return repr(self)
def is_same_as(self, other_article):
# if self.title != getattr(other_article, 'title', False):
# return False
if self.url:
return self.url == getattr(other_article, 'url', False)
return self.content == getattr(other_article, 'content', False)
class Feed:
def __init__(self, get_article_url=lambda item: item.get('link', None),
log=default_log):
'''
Parse a feed into articles.
'''
self.logger = log
self.get_article_url = get_article_url
def populate_from_feed(self, feed, title=None, oldest_article=7,
max_articles_per_feed=100):
entries = feed.entries
feed = feed.feed
self.title = feed.get('title', _('Unknown section')) if not title else title
self.description = feed.get('description', '')
image = feed.get('image', {})
self.image_url = image.get('href', None)
self.image_width = image.get('width', 88)
self.image_height = image.get('height', 31)
self.image_alt = image.get('title', '')
self.articles = []
self.id_counter = 0
self.added_articles = []
self.oldest_article = oldest_article
for item in entries:
if len(self.articles) >= max_articles_per_feed:
break
self.parse_article(item)
def populate_from_preparsed_feed(self, title, articles, oldest_article=7,
max_articles_per_feed=100):
self.title = str(title if title else _('Unknown feed'))
self.description = ''
self.image_url = None
self.articles = []
self.added_articles = []
self.oldest_article = oldest_article
self.id_counter = 0
for item in articles:
if len(self.articles) >= max_articles_per_feed:
break
self.id_counter += 1
id = item.get('id', None)
if not id:
id = 'internal id#%s'%self.id_counter
if id in self.added_articles:
return
self.added_articles.append(id)
published = time.gmtime(item.get('timestamp', time.time()))
title = item.get('title', _('Untitled article'))
link = item.get('url', None)
description = item.get('description', '')
content = item.get('content', '')
author = item.get('author', '')
article = Article(id, title, link, author, description, published, content)
delta = utcnow() - article.utctime
if delta.days*24*3600 + delta.seconds <= 24*3600*self.oldest_article:
self.articles.append(article)
else:
t = strftime('%a, %d %b, %Y %H:%M', article.localtime.timetuple())
self.logger.debug('Skipping article %s (%s) from feed %s as it is too old.'%
(title, t, self.title))
d = item.get('date', '')
article.formatted_date = d
def parse_article(self, item):
self.id_counter += 1
id = item.get('id', None)
if not id:
id = 'internal id#%s'%self.id_counter
if id in self.added_articles:
return
published = None
for date_field in ('date_parsed', 'published_parsed',
'updated_parsed'):
published = item.get(date_field, None)
if published is not None:
break
if not published:
from dateutil.parser import parse
for date_field in ('date', 'published', 'updated'):
try:
published = parse(item[date_field]).timetuple()
except Exception:
continue
break
if not published:
published = time.gmtime()
self.added_articles.append(id)
title = item.get('title', _('Untitled article'))
if title.startswith('<'):
title = re.sub(r'<.+?>', '', title)
try:
link = self.get_article_url(item)
except:
self.logger.warning('Failed to get link for %s'%title)
self.logger.debug(traceback.format_exc())
link = None
description = item.get('summary', None)
author = item.get('author', None)
content = [i.value for i in item.get('content', []) if i.value]
content = [i if isinstance(i, str) else i.decode('utf-8', 'replace')
for i in content]
content = '\n'.join(content)
if not content.strip():
content = None
if not link and not content:
return
article = Article(id, title, link, author, description, published, content)
delta = utcnow() - article.utctime
if delta.days*24*3600 + delta.seconds <= 24*3600*self.oldest_article:
self.articles.append(article)
else:
try:
self.logger.debug('Skipping article %s (%s) from feed %s as it is too old.'%
(title, article.localtime.strftime('%a, %d %b, %Y %H:%M'), self.title))
except UnicodeDecodeError:
if not isinstance(title, str):
title = title.decode('utf-8', 'replace')
self.logger.debug('Skipping article %s as it is too old'%title)
def reverse(self):
self.articles.reverse()
def __iter__(self):
return iter(self.articles)
def __len__(self):
return len(self.articles)
def __repr__(self):
res = [('%20s\n'%'').replace(' ', '_')+repr(art) for art in self]
return '\n'+'\n'.join(res)+'\n'
def __str__(self):
return repr(self)
def has_embedded_content(self):
length = 0
for a in self:
if a.content or a.summary:
length += max(len(a.content if a.content else ''),
len(a.summary if a.summary else ''))
return length > 2000 * len(self)
def has_article(self, article):
for a in self:
if a.is_same_as(article):
return True
return False
def find(self, article):
for i, a in enumerate(self):
if a.is_same_as(article):
return i
return -1
def remove(self, article):
i = self.index(article)
if i > -1:
self.articles[i:i+1] = []
def remove_article(self, article):
try:
self.articles.remove(article)
except ValueError:
pass
class FeedCollection(list):
def __init__(self, feeds):
list.__init__(self, [f for f in feeds if len(f.articles) > 0])
found_articles = set()
duplicates = set()
def in_set(s, a):
for x in s:
if a.is_same_as(x):
return x
return None
print('#feeds', len(self))
print(list(map(len, self)))
for f in self:
dups = []
for a in f:
first = in_set(found_articles, a)
if first is not None:
dups.append(a)
duplicates.add((first, f))
else:
found_articles.add(a)
for x in dups:
f.articles.remove(x)
self.duplicates = duplicates
print(len(duplicates))
print(list(map(len, self)))
# raise
def find_article(self, article):
for j, f in enumerate(self):
for i, a in enumerate(f):
if a is article:
return (j, i)
def restore_duplicates(self):
temp = []
for article, feed in self.duplicates:
art = copy.deepcopy(article)
j, i = self.find_article(article)
art.url = '../feed_%d/article_%d/index.html'%(j, i)
temp.append((feed, art))
for feed, art in temp:
feed.articles.append(art)
def feed_from_xml(raw_xml, title=None, oldest_article=7,
max_articles_per_feed=100,
get_article_url=lambda item: item.get('link', None),
log=default_log):
from calibre.web.feeds.feedparser import parse
# Handle unclosed escaped entities. They trip up feedparser and HBR for one
# generates them
raw_xml = re.sub(br'(&#\d+)([^0-9;])', br'\1;\2', raw_xml)
feed = parse(raw_xml)
pfeed = Feed(get_article_url=get_article_url, log=log)
pfeed.populate_from_feed(feed, title=title,
oldest_article=oldest_article,
max_articles_per_feed=max_articles_per_feed)
return pfeed
def feeds_from_index(index, oldest_article=7, max_articles_per_feed=100,
log=default_log):
'''
@param index: A parsed index as returned by L{BasicNewsRecipe.parse_index}.
@return: A list of L{Feed} objects.
@rtype: list
'''
feeds = []
for title, articles in index:
pfeed = Feed(log=log)
pfeed.populate_from_preparsed_feed(title, articles, oldest_article=oldest_article,
max_articles_per_feed=max_articles_per_feed)
feeds.append(pfeed)
return feeds
| 12,640 | Python | .py | 319 | 29.178683 | 105 | 0.555266 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,812 | news.py | kovidgoyal_calibre/src/calibre/web/feeds/news.py | __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
'''
Defines various abstract base classes that can be subclassed to create powerful news fetching recipes.
'''
__docformat__ = "restructuredtext en"
import io
import os
import re
import sys
import time
import traceback
from collections import defaultdict
from contextlib import closing
from urllib.parse import urlparse, urlsplit
from calibre import __appname__, as_unicode, browser, force_unicode, iswindows, preferred_encoding, random_user_agent, strftime
from calibre.ebooks.BeautifulSoup import BeautifulSoup, CData, NavigableString, Tag
from calibre.ebooks.metadata import MetaInformation
from calibre.ebooks.metadata.opf2 import OPFCreator
from calibre.ebooks.metadata.toc import TOC
from calibre.ptempfile import PersistentTemporaryFile
from calibre.utils.date import now as nowf
from calibre.utils.icu import numeric_sort_key
from calibre.utils.img import add_borders_to_image, image_to_data, save_cover_data_to
from calibre.utils.localization import _, canonicalize_lang, ngettext
from calibre.utils.logging import ThreadSafeWrapper
from calibre.utils.threadpool import NoResultsPending, ThreadPool, WorkRequest
from calibre.web import Recipe
from calibre.web.feeds import Feed, feed_from_xml, feeds_from_index, templates
from calibre.web.fetch.simple import AbortArticle, RecursiveFetcher
from calibre.web.fetch.simple import option_parser as web2disk_option_parser
from calibre.web.fetch.utils import prepare_masthead_image
from polyglot.builtins import string_or_bytes
def classes(classes):
q = frozenset(classes.split(' '))
return dict(attrs={
'class': lambda x: x and frozenset(x.split()).intersection(q)})
def prefixed_classes(classes):
q = frozenset(classes.split(' '))
def matcher(x):
if x:
for candidate in frozenset(x.split()):
for x in q:
if candidate.startswith(x):
return True
return False
return {'attrs': {'class': matcher}}
class LoginFailed(ValueError):
pass
class DownloadDenied(ValueError):
pass
class BasicNewsRecipe(Recipe):
'''
Base class that contains logic needed in all recipes. By overriding
progressively more of the functionality in this class, you can make
progressively more customized/powerful recipes. For a tutorial introduction
to creating recipes, see :doc:`news`.
'''
#: The title to use for the e-book
title = _('Unknown News Source')
#: A couple of lines that describe the content this recipe downloads.
#: This will be used primarily in a GUI that presents a list of recipes.
description = ''
#: The author of this recipe
__author__ = __appname__
#: Minimum calibre version needed to use this recipe
requires_version = (0, 6, 0)
#: The language that the news is in. Must be an ISO-639 code either
#: two or three characters long
language = 'und'
#: Maximum number of articles to download from each feed. This is primarily
#: useful for feeds that don't have article dates. For most feeds, you should
#: use :attr:`BasicNewsRecipe.oldest_article`
max_articles_per_feed = 100
#: Oldest article to download from this news source. In days.
oldest_article = 7.0
#: Number of levels of links to follow on article webpages
recursions = 0
#: The default delay between consecutive downloads in seconds. The argument may be a
#: floating point number to indicate a more precise time. See :meth:`get_url_specific_delay`
#: to implement per URL delays.
delay = 0
#: Publication type
#: Set to newspaper, magazine or blog. If set to None, no publication type
#: metadata will be written to the opf file.
publication_type = 'unknown'
#: Number of simultaneous downloads. Set to 1 if the server is picky.
#: Automatically reduced to 1 if :attr:`BasicNewsRecipe.delay` > 0
simultaneous_downloads = 5
#: Timeout for fetching files from server in seconds
timeout = 120.0
#: The format string for the date shown on the first page.
#: By default: Day_Name, Day_Number Month_Name Year
timefmt = ' [%a, %d %b %Y]'
#: List of feeds to download.
#: Can be either ``[url1, url2, ...]`` or ``[('title1', url1), ('title2', url2),...]``
feeds = None
#: Max number of characters in the short description
summary_length = 500
#: Convenient flag to disable loading of stylesheets for websites
#: that have overly complex stylesheets unsuitable for conversion
#: to e-book formats.
#: If True stylesheets are not downloaded and processed
no_stylesheets = False
#: Convenient flag to strip all JavaScript tags from the downloaded HTML
remove_javascript = True
#: If True the GUI will ask the user for a username and password
#: to use while downloading.
#: If set to "optional" the use of a username and password becomes optional
needs_subscription = False
#: If True the navigation bar is center aligned, otherwise it is left aligned
center_navbar = True
#: Specify an override encoding for sites that have an incorrect
#: charset specification. The most common being specifying ``latin1`` and
#: using ``cp1252``. If None, try to detect the encoding. If it is a
#: callable, the callable is called with two arguments: The recipe object
#: and the source to be decoded. It must return the decoded source.
encoding = None
#: Normally we try to guess if a feed has full articles embedded in it
#: based on the length of the embedded content. If `None`, then the
#: default guessing is used. If `True` then the we always assume the feeds has
#: embedded content and if `False` we always assume the feed does not have
#: embedded content.
use_embedded_content = None
#: Set to True and implement :meth:`get_obfuscated_article` to handle
#: websites that try to make it difficult to scrape content.
articles_are_obfuscated = False
#: Reverse the order of articles in each feed
reverse_article_order = False
#: Automatically extract all the text from downloaded article pages. Uses
#: the algorithms from the readability project. Setting this to True, means
#: that you do not have to worry about cleaning up the downloaded HTML
#: manually (though manual cleanup will always be superior).
auto_cleanup = False
#: Specify elements that the auto cleanup algorithm should never remove.
#: The syntax is a XPath expression. For example::
#:
#: auto_cleanup_keep = '//div[@id="article-image"]' will keep all divs with
#: id="article-image"
#: auto_cleanup_keep = '//*[@class="important"]' will keep all elements
#: with class="important"
#: auto_cleanup_keep = '//div[@id="article-image"]|//span[@class="important"]'
#: will keep all divs with id="article-image" and spans
#: with class="important"
#:
auto_cleanup_keep = None
#: Specify any extra :term:`CSS` that should be added to downloaded :term:`HTML` files.
#: It will be inserted into `<style>` tags, just before the closing
#: `</head>` tag thereby overriding all :term:`CSS` except that which is
#: declared using the style attribute on individual :term:`HTML` tags.
#: Note that if you want to programmatically generate the extra_css override
#: the :meth:`get_extra_css()` method instead.
#: For example::
#:
#: extra_css = '.heading { font: serif x-large }'
#:
extra_css = None
#: If True empty feeds are removed from the output.
#: This option has no effect if parse_index is overridden in
#: the sub class. It is meant only for recipes that return a list
#: of feeds using `feeds` or :meth:`get_feeds`. It is also used if you use
#: the ignore_duplicate_articles option.
remove_empty_feeds = False
#: List of regular expressions that determines which links to follow.
#: If empty, it is ignored. Used only if is_link_wanted is
#: not implemented. For example::
#:
#: match_regexps = [r'page=[0-9]+']
#:
#: will match all URLs that have `page=some number` in them.
#:
#: Only one of :attr:`BasicNewsRecipe.match_regexps` or
#: :attr:`BasicNewsRecipe.filter_regexps` should be defined.
match_regexps = []
#: List of regular expressions that determines which links to ignore.
#: If empty it is ignored. Used only if is_link_wanted is not
#: implemented. For example::
#:
#: filter_regexps = [r'ads\.doubleclick\.net']
#:
#: will remove all URLs that have `ads.doubleclick.net` in them.
#:
#: Only one of :attr:`BasicNewsRecipe.match_regexps` or
#: :attr:`BasicNewsRecipe.filter_regexps` should be defined.
filter_regexps = []
#: Recipe specific options to control the conversion of the downloaded
#: content into an e-book. These will override any user or plugin specified
#: values, so only use if absolutely necessary. For example::
#:
#: conversion_options = {
#: 'base_font_size' : 16,
#: 'linearize_tables' : True,
#: }
#:
conversion_options = {}
#: List of tags to be removed. Specified tags are removed from downloaded HTML.
#: A tag is specified as a dictionary of the form::
#:
#: {
#: name : 'tag name', #e.g. 'div'
#: attrs : a dictionary, #e.g. {'class': 'advertisment'}
#: }
#:
#: All keys are optional. For a full explanation of the search criteria, see
#: `Beautiful Soup <https://www.crummy.com/software/BeautifulSoup/bs4/doc/#searching-the-tree>`__
#: A common example::
#:
#: remove_tags = [dict(name='div', class_='advert')]
#:
#: This will remove all `<div class="advert">` tags and all
#: their children from the downloaded :term:`HTML`.
remove_tags = []
#: Remove all tags that occur after the specified tag.
#: For the format for specifying a tag see :attr:`BasicNewsRecipe.remove_tags`.
#: For example::
#:
#: remove_tags_after = [dict(id='content')]
#:
#: will remove all
#: tags after the first element with `id="content"`.
remove_tags_after = None
#: Remove all tags that occur before the specified tag.
#: For the format for specifying a tag see :attr:`BasicNewsRecipe.remove_tags`.
#: For example::
#:
#: remove_tags_before = dict(id='content')
#:
#: will remove all
#: tags before the first element with `id="content"`.
remove_tags_before = None
#: List of attributes to remove from all tags.
#: For example::
#:
#: remove_attributes = ['style', 'font']
remove_attributes = []
#: Keep only the specified tags and their children.
#: For the format for specifying a tag see :attr:`BasicNewsRecipe.remove_tags`.
#: If this list is not empty, then the `<body>` tag will be emptied and re-filled with
#: the tags that match the entries in this list. For example::
#:
#: keep_only_tags = [dict(id=['content', 'heading'])]
#:
#: will keep only tags that have an `id` attribute of `"content"` or `"heading"`.
keep_only_tags = []
#: List of :term:`regexp` substitution rules to run on the downloaded :term:`HTML`.
#: Each element of the
#: list should be a two element tuple. The first element of the tuple should
#: be a compiled regular expression and the second a callable that takes
#: a single match object and returns a string to replace the match. For example::
#:
#: preprocess_regexps = [
#: (re.compile(r'<!--Article ends here-->.*</body>', re.DOTALL|re.IGNORECASE),
#: lambda match: '</body>'),
#: ]
#:
#: will remove everything from `<!--Article ends here-->` to `</body>`.
preprocess_regexps = []
#: The CSS that is used to style the templates, i.e., the navigation bars and
#: the Tables of Contents. Rather than overriding this variable, you should
#: use `extra_css` in your recipe to customize look and feel.
template_css = '''
.article_date {
color: gray; font-family: monospace;
}
.article_description {
text-indent: 0pt;
}
a.article {
font-weight: bold; text-align:left;
}
a.feed {
font-weight: bold;
}
.calibre_navbar {
font-family:monospace;
}
'''
#: By default, calibre will use a default image for the masthead (Kindle only).
#: Override this in your recipe to provide a URL to use as a masthead.
masthead_url = None
#: By default, the cover image returned by get_cover_url() will be used as
#: the cover for the periodical. Overriding this in your recipe instructs
#: calibre to render the downloaded cover into a frame whose width and height
#: are expressed as a percentage of the downloaded cover.
#: cover_margins = (10, 15, '#ffffff') pads the cover with a white margin
#: 10px on the left and right, 15px on the top and bottom.
#: Color names are defined `here <https://www.imagemagick.org/script/color.php>`_.
#: Note that for some reason, white does not always work in Windows. Use
#: #ffffff instead
cover_margins = (0, 0, '#ffffff')
#: Set to a non empty string to disable this recipe.
#: The string will be used as the disabled message
recipe_disabled = None
#: Ignore duplicates of articles that are present in more than one section.
#: A duplicate article is an article that has the same title and/or URL.
#: To ignore articles with the same title, set this to::
#:
#: ignore_duplicate_articles = {'title'}
#:
#: To use URLs instead, set it to::
#:
#: ignore_duplicate_articles = {'url'}
#:
#: To match on title or URL, set it to::
#:
#: ignore_duplicate_articles = {'title', 'url'}
ignore_duplicate_articles = None
# The following parameters control how the recipe attempts to minimize
# JPEG image sizes
#: Set this to False to ignore all scaling and compression parameters and
#: pass images through unmodified. If True and the other compression
#: parameters are left at their default values, JPEG images will be scaled to fit
#: in the screen dimensions set by the output profile and compressed to size at
#: most (w * h)/16 where w x h are the scaled image dimensions.
compress_news_images = False
#: The factor used when auto compressing JPEG images. If set to None,
#: auto compression is disabled. Otherwise, the images will be reduced in size to
#: (w * h)/compress_news_images_auto_size bytes if possible by reducing
#: the quality level, where w x h are the image dimensions in pixels.
#: The minimum JPEG quality will be 5/100 so it is possible this constraint
#: will not be met. This parameter can be overridden by the parameter
#: compress_news_images_max_size which provides a fixed maximum size for images.
#: Note that if you enable scale_news_images_to_device then the image will
#: first be scaled and then its quality lowered until its size is less than
#: (w * h)/factor where w and h are now the *scaled* image dimensions. In
#: other words, this compression happens after scaling.
compress_news_images_auto_size = 16
#: Set JPEG quality so images do not exceed the size given (in KBytes).
#: If set, this parameter overrides auto compression via compress_news_images_auto_size.
#: The minimum JPEG quality will be 5/100 so it is possible this constraint
#: will not be met.
compress_news_images_max_size = None
#: Rescale images to fit in the device screen dimensions set by the output profile.
#: Ignored if no output profile is set.
scale_news_images_to_device = True
#: Maximum dimensions (w,h) to scale images to. If scale_news_images_to_device is True
#: this is set to the device screen dimensions set by the output profile unless
#: there is no profile set, in which case it is left at whatever value it has been
#: assigned (default None).
scale_news_images = None
#: If set to True then links in downloaded articles that point to other downloaded articles are
#: changed to point to the downloaded copy of the article rather than its original web URL. If you
#: set this to True, you might also need to implement :meth:`canonicalize_internal_url` to work
#: with the URL scheme of your particular website.
resolve_internal_links = False
#: Specify options specific to this recipe. These will be available for the user to customize
#: in the Advanced tab of the Fetch News dialog or at the ebook-convert command line. The options
#: are specified as a dictionary mapping option name to metadata about the option. For example::
#:
#: recipe_specific_options = {
#: 'edition_date': {
#: 'short': 'The issue date to download',
#: 'long': 'Specify a date in the format YYYY-mm-dd to download the issue corresponding to that date',
#: 'default': 'current',
#: }
#: }
#:
#: When the recipe is run, self.recipe_specific_options will be a dict mapping option name to the option value
#: specified by the user. When the option is unspecified by the user, it will have the value specified by 'default'.
#: If no default is specified, the option will not be in the dict at all, when unspecified by the user.
recipe_specific_options = None
#: The simulated browser engine to use when downloading from servers. The default is to use the Python mechanize
#: browser engine, which supports logging in. However, if you don't need logging in, consider changing this
#: to either 'webengine' which uses an actual Chromium browser to do the network requests or 'qt' which
#: uses the Qt Networking backend. Both 'webengine' and 'qt' support HTTP/2, which mechanize does not and
#: are thus harder to fingerprint for bot protection services.
browser_type = 'mechanize'
#: Set to False if you do not want to use gzipped transfers with the mechanize browser.
#: Note that some old servers flake out with gzip.
handle_gzip = True
# See the built-in recipes for examples of these settings.
def short_title(self):
return force_unicode(self.title, preferred_encoding)
def is_link_wanted(self, url, tag):
'''
Return True if the link should be followed or False otherwise. By
default, raises NotImplementedError which causes the downloader to
ignore it.
:param url: The URL to be followed
:param tag: The tag from which the URL was derived
'''
raise NotImplementedError()
def get_extra_css(self):
'''
By default returns `self.extra_css`. Override if you want to programmatically generate the
extra_css.
'''
return self.extra_css
def get_cover_url(self):
'''
Return a :term:`URL` to the cover image for this issue or `None`.
By default it returns the value of the member `self.cover_url` which
is normally `None`. If you want your recipe to download a cover for the e-book
override this method in your subclass, or set the member variable `self.cover_url`
before this method is called.
'''
return getattr(self, 'cover_url', None)
def get_masthead_url(self):
'''
Return a :term:`URL` to the masthead image for this issue or `None`.
By default it returns the value of the member `self.masthead_url` which
is normally `None`. If you want your recipe to download a masthead for the e-book
override this method in your subclass, or set the member variable `self.masthead_url`
before this method is called.
Masthead images are used in Kindle MOBI files.
'''
return getattr(self, 'masthead_url', None)
def get_feeds(self):
'''
Return a list of :term:`RSS` feeds to fetch for this profile. Each element of the list
must be a 2-element tuple of the form (title, url). If title is None or an
empty string, the title from the feed is used. This method is useful if your recipe
needs to do some processing to figure out the list of feeds to download. If
so, override in your subclass.
'''
if not self.feeds:
raise NotImplementedError()
if self.test:
return self.feeds[:self.test[0]]
return self.feeds
def get_url_specific_delay(self, url):
'''
Return the delay in seconds before downloading this URL. If you want to programmatically
determine the delay for the specified URL, override this method in your subclass, returning
self.delay by default for URLs you do not want to affect.
:return: A floating point number, the delay in seconds.
'''
return self.delay
@classmethod
def print_version(cls, url):
'''
Take a `url` pointing to the webpage with article content and return the
:term:`URL` pointing to the print version of the article. By default does
nothing. For example::
def print_version(self, url):
return url + '?&pagewanted=print'
'''
raise NotImplementedError()
@classmethod
def image_url_processor(cls, baseurl, url):
'''
Perform some processing on image urls (perhaps removing size restrictions for
dynamically generated images, etc.) and return the processed URL. Return None
or an empty string to skip fetching the image.
'''
return url
def preprocess_image(self, img_data, image_url):
'''
Perform some processing on downloaded image data. This is called on the raw
data before any resizing is done. Must return the processed raw data. Return
None to skip the image.
'''
return img_data
def get_browser(self, *args, **kwargs):
'''
Return a browser instance used to fetch documents from the web. By default
it returns a `mechanize <https://mechanize.readthedocs.io/en/latest/>`_
browser instance that supports cookies, ignores robots.txt, handles
refreshes and has a random common user agent.
To customize the browser override this method in your sub-class as::
def get_browser(self, *a, **kw):
br = super().get_browser(*a, **kw)
# Add some headers
br.addheaders += [
('My-Header', 'one'),
('My-Header2', 'two'),
]
# Set some cookies
br.set_cookie('name', 'value')
br.set_cookie('name2', 'value2', domain='.mydomain.com')
# Make a POST request with some data
br.open('https://someurl.com', {'username': 'def', 'password': 'pwd'}).read()
# Do a login via a simple web form (only supported with mechanize browsers)
if self.username is not None and self.password is not None:
br.open('https://www.nytimes.com/auth/login')
br.select_form(name='login')
br['USERID'] = self.username
br['PASSWORD'] = self.password
br.submit()
return br
'''
if 'user_agent' not in kwargs:
# More and more news sites are serving JPEG XR images to IE
ua = getattr(self, 'last_used_user_agent', None) or self.calibre_most_common_ua or random_user_agent(allow_ie=False)
kwargs['user_agent'] = self.last_used_user_agent = ua
self.log('Using user agent:', kwargs['user_agent'])
if self.browser_type != 'mechanize':
from calibre.scraper.qt import Browser, WebEngineBrowser
return {'qt': Browser, 'webengine': WebEngineBrowser}[self.browser_type](
user_agent=kwargs['user_agent'], verify_ssl_certificates=kwargs.get('verify_ssl_certificates', False))
br = browser(*args, **kwargs)
br.addheaders += [('Accept', '*/*')]
if self.handle_gzip:
br.set_handle_gzip(True)
return br
def clone_browser(self, br):
'''
Clone the browser br. Cloned browsers are used for multi-threaded
downloads, since mechanize is not thread safe. The default cloning
routines should capture most browser customization, but if you do
something exotic in your recipe, you should override this method in
your recipe and clone manually.
Cloned browser instances use the same, thread-safe CookieJar by
default, unless you have customized cookie handling.
'''
if callable(getattr(br, 'clone_browser', None)):
return br.clone_browser()
# Uh-oh recipe using something exotic, call get_browser
return self.get_browser()
@property
def cloned_browser(self):
if hasattr(self.get_browser, 'is_base_class_implementation') and self.browser_type == 'mechanize':
# We are using the default get_browser, which means no need to
# clone
br = BasicNewsRecipe.get_browser(self)
else:
br = self.clone_browser(self.browser)
return br
def get_article_url(self, article):
'''
Override in a subclass to customize extraction of the :term:`URL` that points
to the content for each article. Return the
article URL. It is called with `article`, an object representing a parsed article
from a feed. See `feedparser <https://pythonhosted.org/feedparser/>`_.
By default it looks for the original link (for feeds syndicated via a
service like FeedBurner or Pheedo) and if found,
returns that or else returns
`article.link <https://pythonhosted.org/feedparser/reference-entry-link.html>`_.
'''
for key in article.keys():
if key.endswith('_origlink'):
url = article[key]
if url and (url.startswith('http://') or url.startswith('https://')):
return url
ans = article.get('link', None)
if not ans and getattr(article, 'links', None):
for item in article.links:
if item.get('rel', 'alternate') == 'alternate':
ans = item['href']
break
return ans
def skip_ad_pages(self, soup):
'''
This method is called with the source of each downloaded :term:`HTML` file, before
any of the cleanup attributes like remove_tags, keep_only_tags are
applied. Note that preprocess_regexps will have already been applied.
It is meant to allow the recipe to skip ad pages. If the soup represents
an ad page, return the HTML of the real page. Otherwise return
None.
`soup`: A `BeautifulSoup <https://www.crummy.com/software/BeautifulSoup/bs4/doc/>`__
instance containing the downloaded :term:`HTML`.
'''
return None
def abort_article(self, msg=None):
''' Call this method inside any of the preprocess methods to abort the
download for the current article. Useful to skip articles that contain
inappropriate content, such as pure video articles. '''
raise AbortArticle(msg or _('Article download aborted'))
def preprocess_raw_html(self, raw_html, url):
'''
This method is called with the source of each downloaded :term:`HTML` file, before
it is parsed into an object tree. raw_html is a unicode string
representing the raw HTML downloaded from the web. url is the URL from
which the HTML was downloaded.
Note that this method acts *before* preprocess_regexps.
This method must return the processed raw_html as a unicode object.
'''
return raw_html
def preprocess_raw_html_(self, raw_html, url):
raw_html = self.preprocess_raw_html(raw_html, url)
if self.auto_cleanup:
try:
raw_html = self.extract_readable_article(raw_html, url)
except:
self.log.exception('Auto cleanup of URL: %r failed'%url)
return raw_html
def preprocess_html(self, soup):
'''
This method is called with the source of each downloaded :term:`HTML` file, before
it is parsed for links and images. It is called after the cleanup as
specified by remove_tags etc.
It can be used to do arbitrarily powerful pre-processing on the :term:`HTML`.
It should return `soup` after processing it.
`soup`: A `BeautifulSoup <https://www.crummy.com/software/BeautifulSoup/bs4/doc/>`__
instance containing the downloaded :term:`HTML`.
'''
return soup
def postprocess_html(self, soup, first_fetch):
'''
This method is called with the source of each downloaded :term:`HTML` file, after
it is parsed for links and images.
It can be used to do arbitrarily powerful post-processing on the :term:`HTML`.
It should return `soup` after processing it.
:param soup: A `BeautifulSoup <https://www.crummy.com/software/BeautifulSoup/bs4/doc/>`__ instance containing the downloaded :term:`HTML`.
:param first_fetch: True if this is the first page of an article.
'''
return soup
def cleanup(self):
'''
Called after all articles have been download. Use it to do any cleanup like
logging out of subscription sites, etc.
'''
pass
def canonicalize_internal_url(self, url, is_link=True):
'''
Return a set of canonical representations of ``url``. The default
implementation uses just the server hostname and path of the URL,
ignoring any query parameters, fragments, etc. The canonical
representations must be unique across all URLs for this news source. If
they are not, then internal links may be resolved incorrectly.
:param is_link: Is True if the URL is coming from an internal link in
an HTML file. False if the URL is the URL used to
download an article.
'''
try:
parts = urlparse(url)
except Exception:
self.log.error('Failed to parse url: %r, ignoring' % url)
return frozenset()
nl = parts.netloc
path = parts.path or ''
if isinstance(nl, bytes):
nl = nl.decode('utf-8', 'replace')
if isinstance(path, bytes):
path = path.decode('utf-8', 'replace')
return frozenset({(nl, path.rstrip('/'))})
def index_to_soup(self, url_or_raw, raw=False, as_tree=False, save_raw=None):
'''
Convenience method that takes an URL to the index page and returns
a `BeautifulSoup <https://www.crummy.com/software/BeautifulSoup/bs4/doc>`__
of it.
`url_or_raw`: Either a URL or the downloaded index page as a string
'''
if re.match((br'\w+://' if isinstance(url_or_raw, bytes) else r'\w+://'), url_or_raw):
# We may be called in a thread (in the skip_ad_pages method), so
# clone the browser to be safe. We cannot use self.cloned_browser
# as it may or may not actually clone the browser, depending on if
# the recipe implements get_browser() or not
br = self.clone_browser(self.browser)
open_func = getattr(br, 'open_novisit', br.open)
with closing(open_func(url_or_raw, timeout=self.timeout)) as f:
_raw = f.read()
if not _raw:
raise RuntimeError('Could not fetch index from %s'%url_or_raw)
else:
_raw = url_or_raw
if raw:
return _raw
if not isinstance(_raw, str) and self.encoding:
if callable(self.encoding):
_raw = self.encoding(_raw)
else:
_raw = _raw.decode(self.encoding, 'replace')
from calibre.ebooks.chardet import strip_encoding_declarations, xml_to_unicode
from calibre.utils.cleantext import clean_xml_chars
if isinstance(_raw, str):
_raw = strip_encoding_declarations(_raw)
else:
_raw = xml_to_unicode(_raw, strip_encoding_pats=True, resolve_entities=True)[0]
_raw = clean_xml_chars(_raw)
if save_raw:
with open(save_raw, 'wb') as f:
f.write(_raw.encode('utf-8'))
if as_tree:
from html5_parser import parse
return parse(_raw)
return BeautifulSoup(_raw)
def extract_readable_article(self, html, url):
'''
Extracts main article content from 'html', cleans up and returns as a (article_html, extracted_title) tuple.
Based on the original readability algorithm by Arc90.
'''
from lxml.html import document_fromstring, fragment_fromstring, tostring
from calibre.ebooks.readability import readability
doc = readability.Document(html, self.log, url=url,
keep_elements=self.auto_cleanup_keep)
article_html = doc.summary()
extracted_title = doc.title()
try:
frag = fragment_fromstring(article_html)
except:
doc = document_fromstring(article_html)
frag = doc.xpath('//body')[-1]
if frag.tag == 'html':
root = frag
elif frag.tag == 'body':
root = document_fromstring(
'<html><head><title>%s</title></head></html>' %
extracted_title)
root.append(frag)
else:
root = document_fromstring(
'<html><head><title>%s</title></head><body/></html>' %
extracted_title)
root.xpath('//body')[0].append(frag)
body = root.xpath('//body')[0]
has_title = False
for x in body.iterdescendants():
if x.text == extracted_title:
has_title = True
inline_titles = body.xpath('//h1|//h2')
if not has_title and not inline_titles:
heading = body.makeelement('h2')
heading.text = extracted_title
body.insert(0, heading)
raw_html = tostring(root, encoding='unicode')
return raw_html
def sort_index_by(self, index, weights):
'''
Convenience method to sort the titles in `index` according to `weights`.
`index` is sorted in place. Returns `index`.
`index`: A list of titles.
`weights`: A dictionary that maps weights to titles. If any titles
in index are not in weights, they are assumed to have a weight of 0.
'''
weights = defaultdict(int, weights)
index.sort(key=lambda x: weights[x])
return index
def parse_index(self):
'''
This method should be implemented in recipes that parse a website
instead of feeds to generate a list of articles. Typical uses are for
news sources that have a "Print Edition" webpage that lists all the
articles in the current print edition. If this function is implemented,
it will be used in preference to :meth:`BasicNewsRecipe.parse_feeds`.
It must return a list. Each element of the list must be a 2-element tuple
of the form ``('feed title', list of articles)``.
Each list of articles must contain dictionaries of the form::
{
'title' : article title,
'url' : URL of print version,
'date' : The publication date of the article as a string,
'description' : A summary of the article
'content' : The full article (can be an empty string). Obsolete
do not use, instead save the content to a temporary
file and pass a file:///path/to/temp/file.html as
the URL.
}
For an example, see the recipe for downloading `The Atlantic`.
In addition, you can add 'author' for the author of the article.
If you want to abort processing for some reason and have
calibre show the user a simple message instead of an error, call
:meth:`abort_recipe_processing`.
'''
raise NotImplementedError()
def abort_recipe_processing(self, msg):
'''
Causes the recipe download system to abort the download of this recipe,
displaying a simple feedback message to the user.
'''
from calibre.ebooks.conversion import ConversionUserFeedBack
raise ConversionUserFeedBack(_('Failed to download %s')%self.title,
msg)
def get_obfuscated_article(self, url):
'''
If you set `articles_are_obfuscated` this method is called with
every article URL. It should return the path to a file on the filesystem
that contains the article HTML. That file is processed by the recursive
HTML fetching engine, so it can contain links to pages/images on the web.
Alternately, you can return a dictionary of the form:
{'data': <HTML data>, 'url': <the resolved URL of the article>}. This avoids
needing to create temporary files. The `url` key in the dictionary is useful if
the effective URL of the article is different from the URL passed into this method,
for example, because of redirects. It can be omitted if the URL is unchanged.
This method is typically useful for sites that try to make it difficult to
access article content automatically.
'''
raise NotImplementedError()
def add_toc_thumbnail(self, article, src):
'''
Call this from populate_article_metadata with the src attribute of an
<img> tag from the article that is appropriate for use as the thumbnail
representing the article in the Table of Contents. Whether the
thumbnail is actually used is device dependent (currently only used by
the Kindles). Note that the referenced image must be one that was
successfully downloaded, otherwise it will be ignored.
'''
if not src or not hasattr(article, 'toc_thumbnail'):
return
src = src.replace('\\', '/')
if re.search(r'feed_\d+/article_\d+/images/img', src, flags=re.I) is None:
self.log.warn('Ignoring invalid TOC thumbnail image: %r'%src)
return
article.toc_thumbnail = re.sub(r'^.*?feed', 'feed',
src, flags=re.IGNORECASE)
def populate_article_metadata(self, article, soup, first):
'''
Called when each HTML page belonging to article is downloaded.
Intended to be used to get article metadata like author/summary/etc.
from the parsed HTML (soup).
:param article: A object of class :class:`calibre.web.feeds.Article`.
If you change the summary, remember to also change the text_summary
:param soup: Parsed HTML belonging to this article
:param first: True iff the parsed HTML is the first page of the article.
'''
pass
def postprocess_book(self, oeb, opts, log):
'''
Run any needed post processing on the parsed downloaded e-book.
:param oeb: An OEBBook object
:param opts: Conversion options
'''
pass
def __init__(self, options, log, progress_reporter):
'''
Initialize the recipe.
:param options: Parsed commandline options
:param log: Logging object
:param progress_reporter: A Callable that takes two arguments: progress (a number between 0 and 1) and a string message. The message should be optional.
'''
self.log = ThreadSafeWrapper(log)
if not isinstance(self.title, str):
self.title = str(self.title, 'utf-8', 'replace')
self.debug = options.verbose > 1
self.output_dir = os.path.abspath(os.getcwd())
self.verbose = options.verbose
self.test = options.test
if self.test and not isinstance(self.test, tuple):
self.test = (2, 2)
self.username = options.username
self.password = options.password
self.lrf = options.lrf
self.output_profile = options.output_profile
self.touchscreen = getattr(self.output_profile, 'touchscreen', False)
if self.touchscreen:
self.template_css += self.output_profile.touchscreen_news_css
if self.test:
self.max_articles_per_feed = self.test[1]
self.simultaneous_downloads = min(4, self.simultaneous_downloads)
if self.debug:
self.verbose = True
self.report_progress = progress_reporter
if self.needs_subscription and (
self.username is None or self.password is None or (
not self.username and not self.password)):
if self.needs_subscription != 'optional':
raise ValueError(_('The "%s" recipe needs a username and password.')%self.title)
self.browser = self.get_browser()
self.image_map, self.image_counter = {}, 1
self.css_map = {}
web2disk_cmdline = ['web2disk',
'--timeout', str(self.timeout),
'--max-recursions', str(self.recursions),
'--delay', str(self.delay),
]
if self.verbose:
web2disk_cmdline.append('--verbose')
if self.no_stylesheets:
web2disk_cmdline.append('--dont-download-stylesheets')
for reg in self.match_regexps:
web2disk_cmdline.extend(['--match-regexp', reg])
for reg in self.filter_regexps:
web2disk_cmdline.extend(['--filter-regexp', reg])
if options.output_profile.short_name in ('default', 'tablet'):
self.scale_news_images_to_device = False
elif self.scale_news_images_to_device:
self.scale_news_images = options.output_profile.screen_size
self.web2disk_options = web2disk_option_parser().parse_args(web2disk_cmdline)[0]
for extra in ('keep_only_tags', 'remove_tags', 'preprocess_regexps',
'skip_ad_pages', 'preprocess_html', 'remove_tags_after',
'remove_tags_before', 'is_link_wanted',
'compress_news_images', 'compress_news_images_max_size',
'compress_news_images_auto_size', 'scale_news_images'):
setattr(self.web2disk_options, extra, getattr(self, extra))
self.web2disk_options.postprocess_html = self._postprocess_html
self.web2disk_options.preprocess_image = self.preprocess_image
self.web2disk_options.encoding = self.encoding
self.web2disk_options.preprocess_raw_html = self.preprocess_raw_html_
self.web2disk_options.get_delay = self.get_url_specific_delay
if self.delay > 0:
self.simultaneous_downloads = 1
self.navbar = templates.TouchscreenNavBarTemplate() if self.touchscreen else \
templates.NavBarTemplate()
self.failed_downloads = []
self.partial_failures = []
self.aborted_articles = []
self.recipe_specific_options_metadata = rso = self.recipe_specific_options or {}
self.recipe_specific_options = {k: rso[k]['default'] for k in rso if 'default' in rso[k]}
for x in (options.recipe_specific_option or ()):
k, sep, v = x.partition(':')
if not sep:
raise ValueError(f'{x} is not a valid recipe specific option')
if k not in rso:
raise KeyError(f'{k} is not an option supported by: {self.title}')
self.recipe_specific_options[k] = v
if self.recipe_specific_options:
log('Recipe specific options:')
for k, v in self.recipe_specific_options.items():
log(' ', f'{k} = {v}')
def _postprocess_html(self, soup, first_fetch, job_info):
if self.no_stylesheets:
for link in soup.findAll('link'):
if (link.get('type') or 'text/css').lower() == 'text/css' and 'stylesheet' in (link.get('rel') or ('stylesheet',)):
link.extract()
for style in soup.findAll('style'):
style.extract()
head = soup.find('head')
if not head:
head = soup.find('body')
if not head:
head = soup.find(True)
css = self.template_css + '\n\n' + (self.get_extra_css() or '')
style = soup.new_tag('style', type='text/css', title='override_css')
style.append(css)
head.append(style)
if first_fetch and job_info:
url, f, a, feed_len = job_info
body = soup.find('body')
if body is not None:
templ = self.navbar.generate(False, f, a, feed_len,
not self.has_single_feed,
url, __appname__,
center=self.center_navbar,
extra_css=self.get_extra_css() or '')
elem = BeautifulSoup(templ.render(doctype='xhtml').decode('utf-8')).find('div')
body.insert(0, elem)
# This is needed because otherwise inserting elements into
# the soup breaks find()
soup = BeautifulSoup(soup.decode_contents())
if self.remove_javascript:
for script in list(soup.findAll('script')):
script.extract()
for o in soup.findAll(onload=True):
del o['onload']
for attr in self.remove_attributes:
for x in soup.findAll(attrs={attr:True}):
del x[attr]
for bad_tag in list(soup.findAll(['base', 'iframe', 'canvas', 'embed', 'button',
'command', 'datalist', 'video', 'audio', 'noscript', 'link', 'meta'])):
# link tags can be used for preloading causing network activity in
# calibre viewer. meta tags can do all sorts of crazy things,
# including http-equiv refresh, viewport shenanigans, etc.
bad_tag.extract()
# srcset causes some viewers, like calibre's to load images from the
# web, and it also possible causes iBooks on iOS to barf, see
# https://bugs.launchpad.net/bugs/1713986
for img in soup.findAll('img', srcset=True):
del img['srcset']
ans = self.postprocess_html(soup, first_fetch)
# Nuke HTML5 tags
for x in ans.findAll(['article', 'aside', 'header', 'footer', 'nav',
'figcaption', 'figure', 'section']):
x.get_attribute_list('class').append(f'calibre-nuked-tag-{x.name}')
x.name = 'div'
if job_info:
url, f, a, feed_len = job_info
try:
article = self.feed_objects[f].articles[a]
except:
self.log.exception('Failed to get article object for postprocessing')
pass
else:
self.populate_article_metadata(article, ans, first_fetch)
return ans
def download(self):
'''
Download and pre-process all articles from the feeds in this recipe.
This method should be called only once on a particular Recipe instance.
Calling it more than once will lead to undefined behavior.
:return: Path to index.html
'''
try:
res = self.build_index()
self.report_progress(1, _('Download finished'))
if self.failed_downloads:
self.log.warning(_('Failed to download the following articles:'))
for feed, article, debug in self.failed_downloads:
self.log.warning(article.title, 'from', feed.title)
self.log.debug(article.url)
self.log.debug(debug)
if self.partial_failures:
self.log.warning(_('Failed to download parts of the following articles:'))
for feed, atitle, aurl, debug in self.partial_failures:
self.log.warning(atitle + _(' from ') + feed)
self.log.debug(aurl)
self.log.warning(_('\tFailed links:'))
for l, tb in debug:
self.log.warning(l)
self.log.debug(tb)
return res
finally:
self.cleanup()
@property
def lang_for_html(self):
try:
lang = self.language.replace('_', '-').partition('-')[0].lower()
if lang == 'und':
lang = None
except:
lang = None
return lang
def feeds2index(self, feeds):
templ = (templates.TouchscreenIndexTemplate if self.touchscreen else
templates.IndexTemplate)
templ = templ(lang=self.lang_for_html)
css = self.template_css + '\n\n' +(self.get_extra_css() or '')
timefmt = self.timefmt
return templ.generate(self.title, "mastheadImage.jpg", timefmt, feeds,
extra_css=css).render(doctype='xhtml')
@classmethod
def description_limiter(cls, src):
if not src:
return ''
src = force_unicode(src, 'utf-8')
pos = cls.summary_length
fuzz = 50
si = src.find(';', pos)
if si > 0 and si-pos > fuzz:
si = -1
gi = src.find('>', pos)
if gi > 0 and gi-pos > fuzz:
gi = -1
npos = max(si, gi)
if npos < 0:
npos = pos
ans = src[:npos+1]
if len(ans) < len(src):
from calibre.utils.cleantext import clean_xml_chars
# Truncating the string could cause a dangling UTF-16 half-surrogate, which will cause lxml to barf, clean it
ans = clean_xml_chars(ans) + '\u2026'
return ans
def feed2index(self, f, feeds):
feed = feeds[f]
if feed.image_url is not None: # Download feed image
imgdir = os.path.join(self.output_dir, 'images')
if not os.path.isdir(imgdir):
os.makedirs(imgdir)
if feed.image_url in self.image_map:
feed.image_url = self.image_map[feed.image_url]
else:
bn = urlsplit(feed.image_url).path
if bn:
bn = bn.rpartition('/')[-1]
if bn:
img = os.path.join(imgdir, 'feed_image_%d%s'%(self.image_counter, os.path.splitext(bn)[-1]))
try:
with open(img, 'wb') as fi, closing(self.browser.open(feed.image_url, timeout=self.timeout)) as r:
fi.write(r.read())
self.image_counter += 1
feed.image_url = img
self.image_map[feed.image_url] = img
except:
pass
if isinstance(feed.image_url, bytes):
feed.image_url = feed.image_url.decode(sys.getfilesystemencoding(), 'strict')
templ = (templates.TouchscreenFeedTemplate if self.touchscreen else
templates.FeedTemplate)
templ = templ(lang=self.lang_for_html)
css = self.template_css + '\n\n' +(self.get_extra_css() or '')
return templ.generate(f, feeds, self.description_limiter,
extra_css=css).render(doctype='xhtml')
def _fetch_article(self, url, dir_, f, a, num_of_feeds, preloaded=None):
br = self.browser
if hasattr(self.get_browser, 'is_base_class_implementation'):
# We are using the default get_browser, which means no need to
# clone
br = BasicNewsRecipe.get_browser(self)
else:
br = self.clone_browser(self.browser)
self.web2disk_options.browser = br
fetcher = RecursiveFetcher(self.web2disk_options, self.log,
self.image_map, self.css_map,
(url, f, a, num_of_feeds))
fetcher.browser = br
fetcher.base_dir = dir_
fetcher.current_dir = dir_
fetcher.show_progress = False
fetcher.image_url_processor = self.image_url_processor
if preloaded is not None:
fetcher.preloaded_urls[url] = preloaded
res, path, failures = fetcher.start_fetch(url), fetcher.downloaded_paths, fetcher.failed_links
if not res or not os.path.exists(res):
msg = _('Could not fetch article.') + ' '
if self.debug:
msg += _('The debug traceback is available earlier in this log')
else:
msg += _('Run with -vv to see the reason')
raise Exception(msg)
return res, path, failures
def fetch_article(self, url, dir, f, a, num_of_feeds):
return self._fetch_article(url, dir, f, a, num_of_feeds)
def fetch_obfuscated_article(self, url, dir, f, a, num_of_feeds):
x = self.get_obfuscated_article(url)
if isinstance(x, dict):
data = x['data']
if isinstance(data, str):
data = data.encode(self.encoding or 'utf-8')
url = x.get('url', url)
else:
with open(x, 'rb') as of:
data = of.read()
os.remove(x)
return self._fetch_article(url, dir, f, a, num_of_feeds, preloaded=data)
def fetch_embedded_article(self, article, dir, f, a, num_of_feeds):
templ = templates.EmbeddedContent()
raw = templ.generate(article).render('html')
with PersistentTemporaryFile('_feeds2disk.html') as pt:
pt.write(raw)
url = ('file:'+pt.name) if iswindows else ('file://'+pt.name)
return self._fetch_article(url, dir, f, a, num_of_feeds)
def remove_duplicate_articles(self, feeds):
seen_keys = defaultdict(set)
remove = []
for f in feeds:
for article in f:
for key in self.ignore_duplicate_articles:
val = getattr(article, key)
seen = seen_keys[key]
if val:
if val in seen:
remove.append((f, article))
else:
seen.add(val)
for feed, article in remove:
self.log.debug('Removing duplicate article: %s from section: %s'%(
article.title, feed.title))
feed.remove_article(article)
if self.remove_empty_feeds:
feeds = [f for f in feeds if len(f) > 0]
return feeds
def build_index(self):
self.report_progress(0, _('Fetching feeds...'))
feeds = None
try:
feeds = feeds_from_index(self.parse_index(), oldest_article=self.oldest_article,
max_articles_per_feed=self.max_articles_per_feed,
log=self.log)
self.report_progress(0, _('Got feeds from index page'))
except NotImplementedError:
pass
if feeds is None:
feeds = self.parse_feeds()
if not feeds:
raise ValueError('No articles found, aborting')
if self.ignore_duplicate_articles is not None:
feeds = self.remove_duplicate_articles(feeds)
self.report_progress(0, _('Trying to download cover...'))
self.download_cover()
self.report_progress(0, _('Generating masthead...'))
self.resolve_masthead()
if self.test:
feeds = feeds[:self.test[0]]
self.has_single_feed = len(feeds) == 1
index = os.path.join(self.output_dir, 'index.html')
html = self.feeds2index(feeds)
with open(index, 'wb') as fi:
fi.write(html)
self.jobs = []
if self.reverse_article_order:
for feed in feeds:
if hasattr(feed, 'reverse'):
feed.reverse()
self.feed_objects = feeds
for f, feed in enumerate(feeds):
feed_dir = os.path.join(self.output_dir, 'feed_%d'%f)
if not os.path.isdir(feed_dir):
os.makedirs(feed_dir)
for a, article in enumerate(feed):
if a >= self.max_articles_per_feed:
break
art_dir = os.path.join(feed_dir, 'article_%d'%a)
if not os.path.isdir(art_dir):
os.makedirs(art_dir)
try:
url = self.print_version(article.url)
except NotImplementedError:
url = article.url
except:
self.log.exception('Failed to find print version for: '+article.url)
url = None
if not url:
continue
func, arg = (self.fetch_embedded_article, article) \
if self.use_embedded_content or (self.use_embedded_content is None and feed.has_embedded_content()) \
else \
((self.fetch_obfuscated_article if self.articles_are_obfuscated
else self.fetch_article), url)
req = WorkRequest(func, (arg, art_dir, f, a, len(feed)),
{}, (f, a), self.article_downloaded,
self.error_in_article_download)
req.feed = feed
req.article = article
req.feed_dir = feed_dir
self.jobs.append(req)
self.jobs_done = 0
tp = ThreadPool(self.simultaneous_downloads)
for req in self.jobs:
tp.putRequest(req, block=True, timeout=0)
self.report_progress(0, ngettext(
'Starting download in a single thread...',
'Starting download [{} threads]...', self.simultaneous_downloads).format(self.simultaneous_downloads))
while True:
try:
tp.poll()
time.sleep(0.1)
except NoResultsPending:
break
for f, feed in enumerate(feeds):
html = self.feed2index(f,feeds)
feed_dir = os.path.join(self.output_dir, 'feed_%d'%f)
with open(os.path.join(feed_dir, 'index.html'), 'wb') as fi:
fi.write(html)
self.create_opf(feeds)
self.report_progress(1, _('Feeds downloaded to %s')%index)
return index
def _download_cover(self):
self.cover_path = None
try:
cu = self.get_cover_url()
except Exception as err:
self.log.error(_('Could not download cover: %s')%as_unicode(err))
self.log.debug(traceback.format_exc())
else:
if not cu:
return
cdata = None
if hasattr(cu, 'read'):
cdata = cu.read()
cu = getattr(cu, 'name', 'cover.jpg')
elif os.access(cu, os.R_OK):
with open(cu, 'rb') as f:
cdata = f.read()
else:
self.report_progress(1, _('Downloading cover from %s')%cu)
with closing(self.browser.open(cu, timeout=self.timeout)) as r:
cdata = r.read()
if not cdata:
return
ext = cu.split('/')[-1].rpartition('.')[-1].lower().strip()
if ext == 'pdf':
from calibre.ebooks.metadata.pdf import get_metadata
stream = io.BytesIO(cdata)
cdata = None
mi = get_metadata(stream)
if mi.cover_data and mi.cover_data[1]:
cdata = mi.cover_data[1]
if not cdata:
return
if self.cover_margins[0] or self.cover_margins[1]:
cdata = image_to_data(add_borders_to_image(cdata,
left=self.cover_margins[0],right=self.cover_margins[0],
top=self.cover_margins[1],bottom=self.cover_margins[1],
border_color=self.cover_margins[2]))
cpath = os.path.join(self.output_dir, 'cover.jpg')
save_cover_data_to(cdata, cpath)
self.cover_path = cpath
def download_cover(self):
self.cover_path = None
try:
self._download_cover()
except:
self.log.exception('Failed to download cover')
self.cover_path = None
def _download_masthead(self, mu):
if hasattr(mu, 'rpartition'):
ext = mu.rpartition('.')[-1]
if '?' in ext:
ext = ''
else:
ext = mu.name.rpartition('.')[-1]
ext = ext.lower() if ext else 'jpg'
mpath = os.path.join(self.output_dir, 'masthead_source.'+ext)
outfile = os.path.join(self.output_dir, 'mastheadImage.jpg')
if hasattr(mu, 'read'):
with open(mpath, 'wb') as mfile:
mfile.write(mu.read())
elif os.access(mu, os.R_OK):
with open(mpath, 'wb') as mfile:
mfile.write(open(mu, 'rb').read())
else:
with open(mpath, 'wb') as mfile, closing(self.browser.open(mu, timeout=self.timeout)) as r:
mfile.write(r.read())
self.report_progress(1, _('Masthead image downloaded'))
self.prepare_masthead_image(mpath, outfile)
self.masthead_path = outfile
if os.path.exists(mpath):
os.remove(mpath)
def download_masthead(self, url):
try:
self._download_masthead(url)
except:
self.log.exception("Failed to download supplied masthead_url")
def resolve_masthead(self):
self.masthead_path = None
try:
murl = self.get_masthead_url()
except:
self.log.exception('Failed to get masthead url')
murl = None
if murl is not None:
# Try downloading the user-supplied masthead_url
# Failure sets self.masthead_path to None
self.download_masthead(murl)
if self.masthead_path is None:
self.log.info("Synthesizing mastheadImage")
self.masthead_path = os.path.join(self.output_dir, 'mastheadImage.jpg')
try:
self.default_masthead_image(self.masthead_path)
except:
self.log.exception('Failed to generate default masthead image')
self.masthead_path = None
def default_cover(self, cover_file):
'''
Create a generic cover for recipes that don't have a cover
'''
try:
from calibre.ebooks.covers import create_cover
title = self.title if isinstance(self.title, str) else \
self.title.decode(preferred_encoding, 'replace')
date = strftime(self.timefmt).replace('[', '').replace(']', '')
img_data = create_cover(title, [date])
cover_file.write(img_data)
cover_file.flush()
except:
self.log.exception('Failed to generate default cover')
return False
return True
def get_masthead_title(self):
'Override in subclass to use something other than the recipe title'
return self.title
MI_WIDTH = 600
MI_HEIGHT = 60
def default_masthead_image(self, out_path):
from calibre.ebooks import generate_masthead
generate_masthead(self.get_masthead_title(), output_path=out_path,
width=self.MI_WIDTH, height=self.MI_HEIGHT)
def prepare_masthead_image(self, path_to_image, out_path):
prepare_masthead_image(path_to_image, out_path, self.MI_WIDTH, self.MI_HEIGHT)
def publication_date(self):
'''
Use this method to set the date when this issue was published.
Defaults to the moment of download. Must return a :class:`datetime.datetime`
object.
'''
return nowf()
def create_opf(self, feeds, dir=None):
if dir is None:
dir = self.output_dir
title = self.short_title()
pdate = self.publication_date()
if self.output_profile.periodical_date_in_title:
title += strftime(self.timefmt, pdate)
mi = MetaInformation(title, [__appname__])
mi.publisher = __appname__
mi.author_sort = __appname__
if self.publication_type:
mi.publication_type = 'periodical:'+self.publication_type+':'+self.short_title()
mi.timestamp = nowf()
article_titles, aseen = [], set()
for (af, aa) in self.aborted_articles:
aseen.add(aa.title)
for (ff, fa, tb) in self.failed_downloads:
aseen.add(fa.title)
for f in feeds:
for a in f:
if a.title and a.title not in aseen:
aseen.add(a.title)
article_titles.append(force_unicode(a.title, 'utf-8'))
desc = self.description
if not isinstance(desc, str):
desc = desc.decode('utf-8', 'replace')
mi.comments = (_('Articles in this issue:'
) + '\n\n' + '\n\n'.join(article_titles)) + '\n\n' + desc
language = canonicalize_lang(self.language)
if language is not None:
mi.language = language
mi.pubdate = pdate
opf_path = os.path.join(dir, 'index.opf')
ncx_path = os.path.join(dir, 'index.ncx')
opf = OPFCreator(dir, mi)
# Add mastheadImage entry to <guide> section
mp = getattr(self, 'masthead_path', None)
if mp is not None and os.access(mp, os.R_OK):
from calibre.ebooks.metadata.opf2 import Guide
ref = Guide.Reference(os.path.basename(self.masthead_path), os.getcwd())
ref.type = 'masthead'
ref.title = 'Masthead Image'
opf.guide.append(ref)
manifest = [os.path.join(dir, 'feed_%d'%i) for i in range(len(feeds))]
manifest.append(os.path.join(dir, 'index.html'))
manifest.append(os.path.join(dir, 'index.ncx'))
# Get cover
cpath = getattr(self, 'cover_path', None)
if cpath is None:
pf = open(os.path.join(dir, 'cover.jpg'), 'wb')
if self.default_cover(pf):
cpath = pf.name
if cpath is not None and os.access(cpath, os.R_OK):
opf.cover = cpath
manifest.append(cpath)
# Get masthead
mpath = getattr(self, 'masthead_path', None)
if mpath is not None and os.access(mpath, os.R_OK):
manifest.append(mpath)
opf.create_manifest_from_files_in(manifest)
for mani in opf.manifest:
if mani.path.endswith('.ncx'):
mani.id = 'ncx'
if mani.path.endswith('mastheadImage.jpg'):
mani.id = 'masthead-image'
entries = ['index.html']
toc = TOC(base_path=dir)
self.play_order_counter = 0
self.play_order_map = {}
self.article_url_map = aumap = defaultdict(set)
def feed_index(num, parent):
f = feeds[num]
for j, a in enumerate(f):
if getattr(a, 'downloaded', False):
adir = 'feed_%d/article_%d/'%(num, j)
auth = a.author
if not auth:
auth = None
desc = a.text_summary
if not desc:
desc = None
else:
desc = self.description_limiter(desc)
tt = a.toc_thumbnail if a.toc_thumbnail else None
entries.append('%sindex.html'%adir)
po = self.play_order_map.get(entries[-1], None)
if po is None:
self.play_order_counter += 1
po = self.play_order_counter
arelpath = '%sindex.html'%adir
for curl in self.canonicalize_internal_url(a.orig_url, is_link=False):
aumap[curl].add(arelpath)
article_toc_entry = parent.add_item(arelpath, None,
a.title if a.title else _('Untitled article'),
play_order=po, author=auth,
description=desc, toc_thumbnail=tt)
for entry in a.internal_toc_entries:
anchor = entry.get('anchor')
if anchor:
self.play_order_counter += 1
po += 1
article_toc_entry.add_item(
arelpath, entry['anchor'], entry['title'] or _('Unknown section'),
play_order=po
)
last = os.path.join(self.output_dir, ('%sindex.html'%adir).replace('/', os.sep))
for sp in a.sub_pages:
prefix = os.path.commonprefix([opf_path, sp])
relp = sp[len(prefix):]
entries.append(relp.replace(os.sep, '/'))
last = sp
if os.path.exists(last):
with open(last, 'rb') as fi:
src = fi.read().decode('utf-8')
soup = BeautifulSoup(src)
body = soup.find('body')
if body is not None:
prefix = '/'.join('..'for i in range(2*len(re.findall(r'link\d+', last))))
templ = self.navbar.generate(True, num, j, len(f),
not self.has_single_feed,
a.orig_url, __appname__, prefix=prefix,
center=self.center_navbar)
elem = BeautifulSoup(templ.render(doctype='xhtml').decode('utf-8')).find('div')
body.insert(len(body.contents), elem)
with open(last, 'wb') as fi:
fi.write(str(soup).encode('utf-8'))
if len(feeds) == 0:
raise Exception('All feeds are empty, aborting.')
if len(feeds) > 1:
for i, f in enumerate(feeds):
entries.append('feed_%d/index.html'%i)
po = self.play_order_map.get(entries[-1], None)
if po is None:
self.play_order_counter += 1
po = self.play_order_counter
auth = getattr(f, 'author', None)
if not auth:
auth = None
desc = getattr(f, 'description', None)
if not desc:
desc = None
feed_index(i, toc.add_item('feed_%d/index.html'%i, None,
f.title, play_order=po, description=desc, author=auth))
else:
entries.append('feed_%d/index.html'%0)
feed_index(0, toc)
for i, p in enumerate(entries):
entries[i] = os.path.join(dir, p.replace('/', os.sep))
opf.create_spine(entries)
opf.set_toc(toc)
with open(opf_path, 'wb') as opf_file, open(ncx_path, 'wb') as ncx_file:
opf.render(opf_file, ncx_file)
def article_downloaded(self, request, result):
index = os.path.join(os.path.dirname(result[0]), 'index.html')
if index != result[0]:
if os.path.exists(index):
os.remove(index)
os.rename(result[0], index)
a = request.requestID[1]
article = request.article
self.log.debug('Downloaded article:', article.title, 'from', article.url)
article.orig_url = article.url
article.url = 'article_%d/index.html'%a
article.downloaded = True
article.sub_pages = result[1][1:]
self.jobs_done += 1
self.report_progress(float(self.jobs_done)/len(self.jobs),
_('Article downloaded: %s')%force_unicode(article.title))
if result[2]:
self.partial_failures.append((request.feed.title, article.title, article.url, result[2]))
def error_in_article_download(self, request, traceback):
self.jobs_done += 1
if traceback and re.search('^AbortArticle:', traceback, flags=re.M) is not None:
self.log.warn('Aborted download of article:', request.article.title,
'from', request.article.url)
self.report_progress(float(self.jobs_done)/len(self.jobs),
_('Article download aborted: %s')%force_unicode(request.article.title))
self.aborted_articles.append((request.feed, request.article))
else:
self.log.error('Failed to download article:', request.article.title,
'from', request.article.url)
self.log.debug(traceback)
self.log.debug('\n')
self.report_progress(float(self.jobs_done)/len(self.jobs),
_('Article download failed: %s')%force_unicode(request.article.title))
self.failed_downloads.append((request.feed, request.article, traceback))
def parse_feeds(self):
'''
Create a list of articles from the list of feeds returned by :meth:`BasicNewsRecipe.get_feeds`.
Return a list of :class:`Feed` objects.
'''
feeds = self.get_feeds()
parsed_feeds = []
br = self.browser
for obj in feeds:
if isinstance(obj, string_or_bytes):
title, url = None, obj
else:
title, url = obj
if isinstance(title, bytes):
title = title.decode('utf-8')
if isinstance(url, bytes):
url = url.decode('utf-8')
if url.startswith('feed://'):
url = 'http'+url[4:]
self.report_progress(0, _('Fetching feed')+' %s...'%(title if title else url))
try:
purl = urlparse(url, allow_fragments=False)
if purl.username or purl.password:
hostname = purl.hostname
if purl.port:
hostname += f':{purl.port}'
url = purl._replace(netloc=hostname).geturl()
if purl.username and purl.password:
br.add_password(url, purl.username, purl.password)
with closing(br.open_novisit(url, timeout=self.timeout)) as f:
raw = f.read()
parsed_feeds.append(feed_from_xml(
raw, title=title, log=self.log,
oldest_article=self.oldest_article,
max_articles_per_feed=self.max_articles_per_feed,
get_article_url=self.get_article_url
))
except Exception as err:
feed = Feed()
msg = 'Failed feed: %s'%(title if title else url)
feed.populate_from_preparsed_feed(msg, [])
feed.description = as_unicode(err)
parsed_feeds.append(feed)
self.log.exception(msg)
delay = self.get_url_specific_delay(url)
if delay > 0:
time.sleep(delay)
remove = [fl for fl in parsed_feeds if len(fl) == 0 and self.remove_empty_feeds]
for f in remove:
parsed_feeds.remove(f)
return parsed_feeds
@classmethod
def tag_to_string(self, tag, use_alt=True, normalize_whitespace=True):
'''
Convenience method to take a
`BeautifulSoup <https://www.crummy.com/software/BeautifulSoup/bs4/doc/>`_
:code:`Tag` and extract the text from it recursively, including any CDATA sections
and alt tag attributes. Return a possibly empty Unicode string.
`use_alt`: If `True` try to use the alt attribute for tags that don't
have any textual content
`tag`: `BeautifulSoup <https://www.crummy.com/software/BeautifulSoup/bs4/doc/>`_
:code:`Tag`
'''
if tag is None:
return ''
if isinstance(tag, string_or_bytes):
return tag
if callable(getattr(tag, 'xpath', None)) and not hasattr(tag, 'contents'): # a lxml tag
from lxml.etree import tostring
ans = tostring(tag, method='text', encoding='unicode', with_tail=False)
else:
strings = []
for item in tag.contents:
if isinstance(item, (NavigableString, CData)):
strings.append(item.string)
elif isinstance(item, Tag):
res = self.tag_to_string(item)
if res:
strings.append(res)
elif use_alt:
try:
strings.append(item['alt'])
except KeyError:
pass
ans = ''.join(strings)
if normalize_whitespace:
ans = re.sub(r'\s+', ' ', ans)
return ans
@classmethod
def soup(cls, raw):
return BeautifulSoup(raw)
@classmethod
def adeify_images(cls, soup):
'''
If your recipe when converted to EPUB has problems with images when
viewed in Adobe Digital Editions, call this method from within
:meth:`postprocess_html`.
'''
for item in soup.findAll('img'):
for attrib in ['height','width','border','align','style']:
try:
del item[attrib]
except KeyError:
pass
oldParent = item.parent
myIndex = oldParent.contents.index(item)
item.extract()
divtag = soup.new_tag('div')
brtag = soup.new_tag('br')
oldParent.insert(myIndex,divtag)
divtag.append(item)
divtag.append(brtag)
return soup
def internal_postprocess_book(self, oeb, opts, log):
if self.resolve_internal_links and self.article_url_map:
seen = set()
for item in oeb.spine:
for a in item.data.xpath('//*[local-name()="a" and @href]'):
if a.get('rel') == 'calibre-downloaded-from':
continue
url = a.get('href')
for curl in self.canonicalize_internal_url(url):
articles = self.article_url_map.get(curl)
if articles:
arelpath = sorted(articles, key=numeric_sort_key)[0]
a.set('href', item.relhref(arelpath))
if a.text and len(a) == 0:
a.text = a.text + '·' # mark as local link
if url not in seen:
log.debug(f'Resolved internal URL: {url} -> {arelpath}')
seen.add(url)
class CustomIndexRecipe(BasicNewsRecipe):
def custom_index(self):
'''
Return the filesystem path to a custom HTML document that will serve as the index for
this recipe. The index document will typically contain many `<a href="...">`
tags that point to resources on the internet that should be downloaded.
'''
raise NotImplementedError
def create_opf(self):
mi = MetaInformation(self.title + strftime(self.timefmt), [__appname__])
mi.publisher = __appname__
mi.author_sort = __appname__
mi = OPFCreator(self.output_dir, mi)
mi.create_manifest_from_files_in([self.output_dir])
mi.create_spine([os.path.join(self.output_dir, 'index.html')])
with open(os.path.join(self.output_dir, 'index.opf'), 'wb') as opf_file:
mi.render(opf_file)
def download(self):
index = os.path.abspath(self.custom_index())
url = 'file:'+index if iswindows else 'file://'+index
self.web2disk_options.browser = self.clone_browser(self.browser)
fetcher = RecursiveFetcher(self.web2disk_options, self.log)
fetcher.base_dir = self.output_dir
fetcher.current_dir = self.output_dir
fetcher.show_progress = False
res = fetcher.start_fetch(url)
self.create_opf()
return res
class AutomaticNewsRecipe(BasicNewsRecipe):
auto_cleanup = True
class CalibrePeriodical(BasicNewsRecipe):
#: Set this to the slug for the calibre periodical
calibre_periodicals_slug = None
LOG_IN = 'https://news.calibre-ebook.com/accounts/login'
needs_subscription = True
__author__ = 'calibre Periodicals'
def get_browser(self):
br = BasicNewsRecipe.get_browser(self)
br.open(self.LOG_IN)
br.select_form(name='login')
br['username'] = self.username
br['password'] = self.password
raw = br.submit().read()
if 'href="/my-account"' not in raw:
raise LoginFailed(
_('Failed to log in, check your username and password for'
' the calibre Periodicals service.'))
return br
get_browser.is_base_class_implementation = True
def download(self):
self.log('Fetching downloaded recipe')
try:
raw = self.browser.open_novisit(
'https://news.calibre-ebook.com/subscribed_files/%s/0/temp.downloaded_recipe'
% self.calibre_periodicals_slug
).read()
except Exception as e:
if hasattr(e, 'getcode') and e.getcode() == 403:
raise DownloadDenied(
_('You do not have permission to download this issue.'
' Either your subscription has expired or you have'
' exceeded the maximum allowed downloads for today.'))
raise
f = io.BytesIO(raw)
from calibre.utils.zipfile import ZipFile
zf = ZipFile(f)
zf.extractall()
zf.close()
from glob import glob
from calibre.web.feeds.recipes import compile_recipe
try:
recipe = compile_recipe(open(glob('*.recipe')[0],
'rb').read())
self.conversion_options = recipe.conversion_options
except:
self.log.exception('Failed to compile downloaded recipe')
return os.path.abspath('index.html')
| 83,763 | Python | .py | 1,729 | 37.218623 | 160 | 0.593016 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,813 | collection.py | kovidgoyal_calibre/src/calibre/web/feeds/recipes/collection.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import calendar
import json
import os
import zipfile
from datetime import timedelta
from threading import RLock
from typing import Dict, NamedTuple, Optional, Sequence
from lxml import etree
from lxml.builder import ElementMaker
from calibre import force_unicode
from calibre.constants import numeric_version
from calibre.utils.date import EPOCH, UNDEFINED_DATE, isoformat, local_tz, utcnow
from calibre.utils.date import now as nowf
from calibre.utils.iso8601 import parse_iso8601
from calibre.utils.localization import _
from calibre.utils.recycle_bin import delete_file
from calibre.utils.resources import get_path as P
from calibre.utils.xml_parse import safe_xml_fromstring
from polyglot.builtins import iteritems
NS = 'http://calibre-ebook.com/recipe_collection'
E = ElementMaker(namespace=NS, nsmap={None:NS})
def iterate_over_builtin_recipe_files():
exclude = ['craigslist', 'toronto_sun']
d = os.path.dirname
base = os.path.join(d(d(d(d(d(d(os.path.abspath(__file__))))))), 'recipes')
for f in os.listdir(base):
fbase, ext = os.path.splitext(f)
if ext != '.recipe' or fbase in exclude:
continue
f = os.path.join(base, f)
rid = os.path.splitext(os.path.relpath(f, base).replace(os.sep,
'/'))[0]
yield rid, f
def normalize_language(x: str) -> str:
lang, sep, country = x.replace('-', '_').partition('_')
if sep == '_':
x = f'{lang.lower()}{sep}{country.upper()}'
else:
x = lang.lower()
return x
def serialize_recipe(urn, recipe_class):
from xml.sax.saxutils import quoteattr
def attr(n, d, normalize=lambda x: x):
ans = getattr(recipe_class, n, d)
if isinstance(ans, bytes):
ans = ans.decode('utf-8', 'replace')
return quoteattr(normalize(ans))
default_author = _('You') if urn.startswith('custom:') else _('Unknown')
ns = getattr(recipe_class, 'needs_subscription', False)
if not ns:
ns = 'no'
if ns is True:
ns = 'yes'
options = ''
rso = getattr(recipe_class, 'recipe_specific_options', None)
if rso:
options = f' options={quoteattr(json.dumps(rso))}'
return (' <recipe id={id} title={title} author={author} language={language}'
' needs_subscription={needs_subscription} description={description}{options}/>').format(**{
'id' : quoteattr(str(urn)),
'title' : attr('title', _('Unknown')),
'author' : attr('__author__', default_author),
'language' : attr('language', 'und', normalize_language),
'needs_subscription' : quoteattr(ns),
'description' : attr('description', ''),
'options' : options,
})
def serialize_collection(mapping_of_recipe_classes):
collection = []
for urn in sorted(mapping_of_recipe_classes.keys(),
key=lambda key: force_unicode(
getattr(mapping_of_recipe_classes[key], 'title', 'zzz'),
'utf-8')):
try:
recipe = serialize_recipe(urn, mapping_of_recipe_classes[urn])
except:
import traceback
traceback.print_exc()
continue
collection.append(recipe)
items = '\n'.join(collection)
return f'''<?xml version='1.0' encoding='utf-8'?>
<recipe_collection xmlns="http://calibre-ebook.com/recipe_collection" count="{len(collection)}">
{items}
</recipe_collection>'''.encode()
def serialize_builtin_recipes():
from calibre.web.feeds.recipes import compile_recipe
recipe_mapping = {}
for rid, f in iterate_over_builtin_recipe_files():
with open(f, 'rb') as stream:
try:
recipe_class = compile_recipe(stream.read())
except:
print('Failed to compile: %s'%f)
raise
if recipe_class is not None:
recipe_mapping['builtin:'+rid] = recipe_class
return serialize_collection(recipe_mapping)
def get_builtin_recipe_collection():
return etree.parse(P('builtin_recipes.xml', allow_user_override=False)).getroot()
def get_custom_recipe_collection(*args):
from calibre.web.feeds.recipes import compile_recipe, custom_recipes
bdir = os.path.dirname(custom_recipes.file_path)
rmap = {}
for id_, x in iteritems(custom_recipes):
title, fname = x
recipe = os.path.join(bdir, fname)
try:
with open(recipe, 'rb') as f:
recipe = f.read().decode('utf-8')
recipe_class = compile_recipe(recipe)
if recipe_class is not None:
rmap['custom:%s'%id_] = recipe_class
except:
print('Failed to load recipe from: %r'%fname)
import traceback
traceback.print_exc()
continue
return safe_xml_fromstring(serialize_collection(rmap), recover=False)
def update_custom_recipe(id_, title, script):
update_custom_recipes([(id_, title, script)])
def update_custom_recipes(script_ids):
from calibre.web.feeds.recipes import custom_recipe_filename, custom_recipes
bdir = os.path.dirname(custom_recipes.file_path)
for id_, title, script in script_ids:
id_ = str(int(id_))
existing = custom_recipes.get(id_, None)
if existing is None:
fname = custom_recipe_filename(id_, title)
else:
fname = existing[1]
if isinstance(script, str):
script = script.encode('utf-8')
custom_recipes[id_] = (title, fname)
if not os.path.exists(bdir):
os.makedirs(bdir)
with open(os.path.join(bdir, fname), 'wb') as f:
f.write(script)
def add_custom_recipe(title, script):
add_custom_recipes({title:script})
def add_custom_recipes(script_map):
from calibre.web.feeds.recipes import custom_recipe_filename, custom_recipes
id_ = 1000
keys = tuple(map(int, custom_recipes))
if keys:
id_ = max(keys)+1
bdir = os.path.dirname(custom_recipes.file_path)
with custom_recipes:
for title, script in iteritems(script_map):
fid = str(id_)
fname = custom_recipe_filename(fid, title)
if isinstance(script, str):
script = script.encode('utf-8')
custom_recipes[fid] = (title, fname)
if not os.path.exists(bdir):
os.makedirs(bdir)
with open(os.path.join(bdir, fname), 'wb') as f:
f.write(script)
id_ += 1
def remove_custom_recipe(id_):
from calibre.web.feeds.recipes import custom_recipes
id_ = str(int(id_))
existing = custom_recipes.get(id_, None)
if existing is not None:
bdir = os.path.dirname(custom_recipes.file_path)
fname = existing[1]
del custom_recipes[id_]
try:
delete_file(os.path.join(bdir, fname))
except:
pass
def get_custom_recipe(id_):
from calibre.web.feeds.recipes import custom_recipes
id_ = str(int(id_))
existing = custom_recipes.get(id_, None)
if existing is not None:
bdir = os.path.dirname(custom_recipes.file_path)
fname = existing[1]
with open(os.path.join(bdir, fname), 'rb') as f:
return f.read().decode('utf-8')
def get_builtin_recipe_titles():
return [r.get('title') for r in get_builtin_recipe_collection()]
def download_builtin_recipe(urn):
import bz2
from calibre.utils.config_base import prefs
from calibre.utils.https import get_https_resource_securely
recipe_source = bz2.decompress(get_https_resource_securely(
'https://code.calibre-ebook.com/recipe-compressed/'+urn, headers={'CALIBRE-INSTALL-UUID':prefs['installation_uuid']}))
recipe_source = recipe_source.decode('utf-8')
from calibre.web.feeds.recipes import compile_recipe
recipe = compile_recipe(recipe_source) # ensure the downloaded recipe is at least compile-able
if recipe is None:
raise ValueError('Failed to find recipe object in downloaded recipe: ' + urn)
if recipe.requires_version > numeric_version:
raise ValueError(f'Downloaded recipe for {urn} requires calibre >= {recipe.requires_version}')
return recipe_source
def get_builtin_recipe(urn):
with zipfile.ZipFile(P('builtin_recipes.zip', allow_user_override=False), 'r') as zf:
return zf.read(urn+'.recipe').decode('utf-8')
def get_builtin_recipe_by_title(title, log=None, download_recipe=False):
for x in get_builtin_recipe_collection():
if x.get('title') == title:
urn = x.get('id')[8:]
if download_recipe:
try:
if log is not None:
log('Trying to get latest version of recipe:', urn)
return download_builtin_recipe(urn)
except:
if log is None:
import traceback
traceback.print_exc()
else:
log.exception(
'Failed to download recipe, using builtin version')
return get_builtin_recipe(urn)
def get_builtin_recipe_by_id(id_, log=None, download_recipe=False):
for x in get_builtin_recipe_collection():
if x.get('id') == id_:
urn = x.get('id')[8:]
if download_recipe:
try:
if log is not None:
log('Trying to get latest version of recipe:', urn)
return download_builtin_recipe(urn)
except:
if log is None:
import traceback
traceback.print_exc()
else:
log.exception(
'Failed to download recipe, using builtin version')
return get_builtin_recipe(urn)
class RecipeCustomization(NamedTuple):
add_title_tag: bool = False
custom_tags: Sequence[str] = ()
keep_issues: int = 0
recipe_specific_options: Optional[Dict[str, str]] = None
class SchedulerConfig:
def __init__(self):
from calibre.utils.config import config_dir
from calibre.utils.lock import ExclusiveFile
self.conf_path = os.path.join(config_dir, 'scheduler.xml')
old_conf_path = os.path.join(config_dir, 'scheduler.pickle')
self.root = E.recipe_collection()
self.lock = RLock()
if os.access(self.conf_path, os.R_OK):
with ExclusiveFile(self.conf_path) as f:
try:
self.root = safe_xml_fromstring(f.read(), recover=False)
except:
print('Failed to read recipe scheduler config')
import traceback
traceback.print_exc()
elif os.path.exists(old_conf_path):
self.migrate_old_conf(old_conf_path)
def iter_recipes(self):
for x in self.root:
if x.tag == '{%s}scheduled_recipe'%NS:
yield x
def iter_accounts(self):
for x in self.root:
if x.tag == '{%s}account_info'%NS:
yield x
def iter_customization(self):
for x in self.root:
if x.tag == '{%s}recipe_customization'%NS:
yield x
def schedule_recipe(self, recipe, schedule_type, schedule, last_downloaded=None):
with self.lock:
for x in list(self.iter_recipes()):
if x.get('id', False) == recipe.get('id'):
ld = x.get('last_downloaded', None)
if ld and last_downloaded is None:
try:
last_downloaded = parse_iso8601(ld)
except Exception:
pass
self.root.remove(x)
break
if last_downloaded is None:
last_downloaded = EPOCH
sr = E.scheduled_recipe({
'id' : recipe.get('id'),
'title': recipe.get('title'),
'last_downloaded':isoformat(last_downloaded),
}, self.serialize_schedule(schedule_type, schedule))
self.root.append(sr)
self.write_scheduler_file()
# 'keep_issues' argument for recipe-specific number of copies to keep
def customize_recipe(self, urn, val: RecipeCustomization):
with self.lock:
for x in list(self.iter_customization()):
if x.get('id') == urn:
self.root.remove(x)
cs = E.recipe_customization({
'keep_issues' : str(val.keep_issues),
'id' : urn,
'add_title_tag' : 'yes' if val.add_title_tag else 'no',
'custom_tags' : ','.join(val.custom_tags),
'recipe_specific_options': json.dumps(val.recipe_specific_options or {}),
})
self.root.append(cs)
self.write_scheduler_file()
def un_schedule_recipe(self, recipe_id):
with self.lock:
for x in list(self.iter_recipes()):
if x.get('id', False) == recipe_id:
self.root.remove(x)
break
self.write_scheduler_file()
def update_last_downloaded(self, recipe_id):
with self.lock:
now = utcnow()
for x in self.iter_recipes():
if x.get('id', False) == recipe_id:
typ, sch, last_downloaded = self.un_serialize_schedule(x)
if typ == 'interval':
# Prevent downloads more frequent than once an hour
actual_interval = now - last_downloaded
nominal_interval = timedelta(days=sch)
if abs(actual_interval - nominal_interval) < \
timedelta(hours=1):
now = last_downloaded + nominal_interval
x.set('last_downloaded', isoformat(now))
break
self.write_scheduler_file()
def get_to_be_downloaded_recipes(self):
ans = []
with self.lock:
for recipe in self.iter_recipes():
if self.recipe_needs_to_be_downloaded(recipe):
ans.append(recipe.get('id'))
return ans
def write_scheduler_file(self):
from calibre.utils.lock import ExclusiveFile
self.root.text = '\n\n\t'
for x in self.root:
x.tail = '\n\n\t'
if len(self.root) > 0:
self.root[-1].tail = '\n\n'
with ExclusiveFile(self.conf_path) as f:
f.seek(0)
f.truncate()
f.write(etree.tostring(self.root, encoding='utf-8',
xml_declaration=True, pretty_print=False))
def serialize_schedule(self, typ, schedule):
s = E.schedule({'type':typ})
if typ == 'interval':
if schedule < 0.04:
schedule = 0.04
text = '%f'%schedule
elif typ == 'day/time':
text = '%d:%d:%d'%schedule
elif typ in ('days_of_week', 'days_of_month'):
dw = ','.join(map(str, map(int, schedule[0])))
text = '%s:%d:%d'%(dw, schedule[1], schedule[2])
else:
raise ValueError('Unknown schedule type: %r'%typ)
s.text = text
return s
def un_serialize_schedule(self, recipe):
for x in recipe.iterdescendants():
if 'schedule' in x.tag:
sch, typ = x.text, x.get('type')
if typ == 'interval':
sch = float(sch)
elif typ == 'day/time':
sch = list(map(int, sch.split(':')))
elif typ in ('days_of_week', 'days_of_month'):
parts = sch.split(':')
days = list(map(int, [x.strip() for x in
parts[0].split(',')]))
sch = [days, int(parts[1]), int(parts[2])]
try:
ld = parse_iso8601(recipe.get('last_downloaded'))
except Exception:
ld = UNDEFINED_DATE
return typ, sch, ld
def recipe_needs_to_be_downloaded(self, recipe):
try:
typ, sch, ld = self.un_serialize_schedule(recipe)
except:
return False
def is_time(now, hour, minute):
return now.hour > hour or \
(now.hour == hour and now.minute >= minute)
def is_weekday(day, now):
return day < 0 or day > 6 or \
day == calendar.weekday(now.year, now.month, now.day)
def was_downloaded_already_today(ld_local, now):
return ld_local.date() == now.date()
if typ == 'interval':
return utcnow() - ld > timedelta(sch)
elif typ == 'day/time':
now = nowf()
try:
ld_local = ld.astimezone(local_tz)
except Exception:
return False
day, hour, minute = sch
return is_weekday(day, now) and \
not was_downloaded_already_today(ld_local, now) and \
is_time(now, hour, minute)
elif typ == 'days_of_week':
now = nowf()
try:
ld_local = ld.astimezone(local_tz)
except Exception:
return False
days, hour, minute = sch
have_day = False
for day in days:
if is_weekday(day, now):
have_day = True
break
return have_day and \
not was_downloaded_already_today(ld_local, now) and \
is_time(now, hour, minute)
elif typ == 'days_of_month':
now = nowf()
try:
ld_local = ld.astimezone(local_tz)
except Exception:
return False
days, hour, minute = sch
have_day = now.day in days
return have_day and \
not was_downloaded_already_today(ld_local, now) and \
is_time(now, hour, minute)
return False
def set_account_info(self, urn, un, pw):
with self.lock:
for x in list(self.iter_accounts()):
if x.get('id', False) == urn:
self.root.remove(x)
break
ac = E.account_info({'id':urn, 'username':un, 'password':pw})
self.root.append(ac)
self.write_scheduler_file()
def get_account_info(self, urn):
with self.lock:
for x in self.iter_accounts():
if x.get('id', False) == urn:
return x.get('username', ''), x.get('password', '')
def clear_account_info(self, urn):
with self.lock:
for x in self.iter_accounts():
if x.get('id', False) == urn:
x.getparent().remove(x)
self.write_scheduler_file()
break
def get_customize_info(self, urn):
keep_issues = 0
add_title_tag = True
custom_tags = ()
recipe_specific_options = {}
with self.lock:
for x in self.iter_customization():
if x.get('id', False) == urn:
keep_issues = int(x.get('keep_issues', '0'))
add_title_tag = x.get('add_title_tag', 'yes') == 'yes'
custom_tags = tuple(i.strip() for i in x.get('custom_tags', '').split(','))
recipe_specific_options = json.loads(x.get('recipe_specific_options', '{}'))
break
return RecipeCustomization(add_title_tag, custom_tags, keep_issues, recipe_specific_options)
def get_schedule_info(self, urn):
with self.lock:
for x in self.iter_recipes():
if x.get('id', False) == urn:
ans = list(self.un_serialize_schedule(x))
return ans
def migrate_old_conf(self, old_conf_path):
from calibre.utils.config import DynamicConfig
c = DynamicConfig('scheduler')
for r in c.get('scheduled_recipes', []):
try:
self.add_old_recipe(r)
except:
continue
for k in c.keys():
if k.startswith('recipe_account_info'):
try:
urn = k.replace('recipe_account_info_', '')
if urn.startswith('recipe_'):
urn = 'builtin:'+urn[7:]
else:
urn = 'custom:%d'%int(urn)
try:
username, password = c[k]
except:
username = password = ''
self.set_account_info(urn, str(username),
str(password))
except:
continue
del c
self.write_scheduler_file()
try:
os.remove(old_conf_path)
except:
pass
def add_old_recipe(self, r):
urn = None
if r['builtin'] and r['id'].startswith('recipe_'):
urn = 'builtin:'+r['id'][7:]
elif not r['builtin']:
try:
urn = 'custom:%d'%int(r['id'])
except:
return
schedule = r['schedule']
typ = 'interval'
if schedule > 1e5:
typ = 'day/time'
raw = '%d'%int(schedule)
day = int(raw[0]) - 1
hour = int(raw[2:4]) - 1
minute = int(raw[-2:]) - 1
if day >= 7:
day = -1
schedule = [day, hour, minute]
recipe = {'id':urn, 'title':r['title']}
self.schedule_recipe(recipe, typ, schedule,
last_downloaded=r['last_downloaded'])
| 22,209 | Python | .py | 529 | 30.15879 | 126 | 0.547884 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,814 | __init__.py | kovidgoyal_calibre/src/calibre/web/feeds/recipes/__init__.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
'''
Builtin recipes.
'''
import io
import re
import time
from calibre.ebooks.BeautifulSoup import BeautifulSoup
from calibre.utils.config import JSONConfig
from calibre.web.feeds.news import AutomaticNewsRecipe, BasicNewsRecipe, CalibrePeriodical, CustomIndexRecipe
from polyglot.builtins import codepoint_to_chr, itervalues
basic_recipes = (BasicNewsRecipe, AutomaticNewsRecipe, CustomIndexRecipe,
CalibrePeriodical)
custom_recipes = JSONConfig('custom_recipes/index.json')
def custom_recipe_filename(id_, title):
from calibre.utils.filenames import ascii_filename
return ascii_filename(title[:50]) + \
('_%s.recipe'%id_)
def compile_recipe(src):
'''
Compile the code in src and return a recipe object, if found.
:param src: Python source code as bytestring or unicode object
:return: Recipe class or None, if no such class was found in src
'''
if not isinstance(src, str):
match = re.search(br'coding[:=]\s*([-\w.]+)', src[:200])
enc = match.group(1).decode('utf-8') if match else 'utf-8'
src = src.decode(enc)
# Python complains if there is a coding declaration in a unicode string
src = re.sub(r'^#.*coding\s*[:=]\s*([-\w.]+)', '#', src.lstrip('\ufeff'), flags=re.MULTILINE)
# Translate newlines to \n
src = io.StringIO(src, newline=None).getvalue()
namespace = {
'BasicNewsRecipe':BasicNewsRecipe,
'AutomaticNewsRecipe':AutomaticNewsRecipe,
'time':time, 're':re,
'BeautifulSoup':BeautifulSoup,
'unicode': str,
'unichr': codepoint_to_chr,
'xrange': range,
}
exec(src, namespace)
ua = namespace.get('calibre_most_common_ua')
for x in itervalues(namespace):
if (isinstance(x, type) and issubclass(x, BasicNewsRecipe) and x not
in basic_recipes):
x.calibre_most_common_ua = ua
return x
return None
| 2,081 | Python | .py | 51 | 34.45098 | 109 | 0.667328 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,815 | model.py | kovidgoyal_calibre/src/calibre/web/feeds/recipes/model.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2009, Kovid Goyal <kovid at kovidgoyal.net>
import copy
import zipfile
from functools import total_ordering
from qt.core import QAbstractItemModel, QApplication, QFont, QIcon, QModelIndex, QPalette, QPixmap, Qt, pyqtSignal
from calibre import force_unicode
from calibre.utils.icu import primary_sort_key
from calibre.utils.localization import _, countrycode_to_name, get_language
from calibre.utils.resources import get_path as P
from calibre.utils.search_query_parser import ParseException, SearchQueryParser
from calibre.web.feeds.recipes.collection import (
SchedulerConfig,
add_custom_recipe,
add_custom_recipes,
download_builtin_recipe,
get_builtin_recipe,
get_builtin_recipe_collection,
get_custom_recipe,
get_custom_recipe_collection,
remove_custom_recipe,
update_custom_recipe,
update_custom_recipes,
)
from polyglot.builtins import iteritems
class NewsTreeItem:
def __init__(self, builtin, custom, scheduler_config, parent=None):
self.builtin, self.custom = builtin, custom
self.scheduler_config = scheduler_config
self.parent = parent
if self.parent is not None:
self.parent.append(self)
self.children = []
def row(self):
if self.parent is not None:
return self.parent.children.index(self)
return 0
def append(self, child):
child.parent = self
self.children.append(child)
def data(self, role):
return None
def flags(self):
return Qt.ItemFlag.ItemIsEnabled|Qt.ItemFlag.ItemIsSelectable
def sort(self):
self.children.sort()
for child in self.children:
child.sort()
def prune(self):
for child in list(self.children):
if len(child.children) == 0:
self.children.remove(child)
child.parent = None
def parse_lang_code(x: str) -> str:
lang, sep, country = x.partition('_')
country = country.upper()
ans = get_language(lang)
if country:
ans = _('{language} ({country})').format(language=ans, country=countrycode_to_name(country))
return ans
@total_ordering
class NewsCategory(NewsTreeItem):
def __init__(self, category, builtin, custom, scheduler_config, parent):
NewsTreeItem.__init__(self, builtin, custom, scheduler_config, parent)
self.category = self.cdata = category
self.cdata = self.category
if self.category == _('Scheduled'):
self.sortq = 0, ''
elif self.category == _('Custom'):
self.sortq = 1, ''
else:
self.cdata = parse_lang_code(self.cdata)
self.sortq = 2, self.cdata
self.bold_font = QFont()
self.bold_font.setBold(True)
self.bold_font = (self.bold_font)
def data(self, role):
if role == Qt.ItemDataRole.DisplayRole:
return (self.cdata + ' [%d]'%len(self.children))
elif role == Qt.ItemDataRole.FontRole:
return self.bold_font
elif role == Qt.ItemDataRole.ForegroundRole and self.category == _('Scheduled'):
return QApplication.instance().palette().color(QPalette.ColorRole.Link)
elif role == Qt.ItemDataRole.UserRole:
return f'::category::{self.sortq[0]}'
return None
def flags(self):
return Qt.ItemFlag.ItemIsEnabled
def __eq__(self, other):
return self.cdata == other.cdata
def __lt__(self, other):
return self.sortq < getattr(other, 'sortq', (3, ''))
@total_ordering
class NewsItem(NewsTreeItem):
def __init__(self, urn, title, default_icon, custom_icon, favicons, zf,
builtin, custom, scheduler_config, parent):
NewsTreeItem.__init__(self, builtin, custom, scheduler_config, parent)
self.urn, self.title = urn, title
if isinstance(self.title, bytes):
self.title = force_unicode(self.title)
self.sortq = primary_sort_key(self.title)
self.icon = self.default_icon = None
self.default_icon = default_icon
self.favicons, self.zf = favicons, zf
if 'custom:' in self.urn:
self.icon = custom_icon
def data(self, role):
if role == Qt.ItemDataRole.DisplayRole:
return (self.title)
if role == Qt.ItemDataRole.DecorationRole:
if self.icon is None:
icon = '%s.png'%self.urn[8:]
p = QPixmap()
if icon in self.favicons:
try:
with zipfile.ZipFile(self.zf, 'r') as zf:
p.loadFromData(zf.read(self.favicons[icon]))
except Exception:
pass
if not p.isNull():
self.icon = (QIcon(p))
else:
self.icon = self.default_icon
return self.icon
if role == Qt.ItemDataRole.UserRole:
return self.urn
def __eq__(self, other):
return self.urn == other.urn
def __lt__(self, other):
return self.sortq < other.sortq
class AdaptSQP(SearchQueryParser):
def __init__(self, *args, **kwargs):
pass
class RecipeModel(QAbstractItemModel, AdaptSQP):
LOCATIONS = ['all']
searched = pyqtSignal(object)
def __init__(self, *args):
QAbstractItemModel.__init__(self, *args)
SearchQueryParser.__init__(self, locations=['all'])
self.default_icon = (QIcon.ic('news.png'))
self.custom_icon = (QIcon.ic('user_profile.png'))
self.builtin_recipe_collection = get_builtin_recipe_collection()
self.scheduler_config = SchedulerConfig()
try:
with zipfile.ZipFile(P('builtin_recipes.zip',
allow_user_override=False), 'r') as zf:
self.favicons = {x.filename: x for x in zf.infolist() if
x.filename.endswith('.png')}
except:
self.favicons = {}
self.do_refresh()
def get_builtin_recipe(self, urn, download=True):
if download:
try:
return download_builtin_recipe(urn)
except:
import traceback
traceback.print_exc()
return get_builtin_recipe(urn)
def get_recipe(self, urn, download=True):
coll = self.custom_recipe_collection if urn.startswith('custom:') \
else self.builtin_recipe_collection
for recipe in coll:
if recipe.get('id', False) == urn:
if coll is self.builtin_recipe_collection:
return self.get_builtin_recipe(urn[8:], download=download)
return get_custom_recipe(int(urn[len('custom:'):]))
def update_custom_recipe(self, urn, title, script):
id_ = int(urn[len('custom:'):])
update_custom_recipe(id_, title, script)
self.custom_recipe_collection = get_custom_recipe_collection()
def update_custom_recipes(self, script_urn_map):
script_ids = []
for urn, title_script in iteritems(script_urn_map):
id_ = int(urn[len('custom:'):])
(title, script) = title_script
script_ids.append((id_, title, script))
update_custom_recipes(script_ids)
self.custom_recipe_collection = get_custom_recipe_collection()
def add_custom_recipe(self, title, script):
add_custom_recipe(title, script)
self.custom_recipe_collection = get_custom_recipe_collection()
def add_custom_recipes(self, scriptmap):
add_custom_recipes(scriptmap)
self.custom_recipe_collection = get_custom_recipe_collection()
def remove_custom_recipes(self, urns):
ids = [int(x[len('custom:'):]) for x in urns]
for id_ in ids:
remove_custom_recipe(id_)
self.custom_recipe_collection = get_custom_recipe_collection()
def do_refresh(self, restrict_to_urns=frozenset()):
self.custom_recipe_collection = get_custom_recipe_collection()
zf = P('builtin_recipes.zip', allow_user_override=False)
def factory(cls, parent, *args):
args = list(args)
if cls is NewsItem:
args.extend([self.default_icon, self.custom_icon,
self.favicons, zf])
args += [self.builtin_recipe_collection,
self.custom_recipe_collection, self.scheduler_config,
parent]
return cls(*args)
def ok(urn):
if restrict_to_urns is None:
return False
return not restrict_to_urns or urn in restrict_to_urns
new_root = factory(NewsTreeItem, None)
scheduled = factory(NewsCategory, new_root, _('Scheduled'))
custom = factory(NewsCategory, new_root, _('Custom'))
lang_map = {}
self.all_urns = set()
self.showing_count = 0
self.builtin_count = 0
for x in self.custom_recipe_collection:
urn = x.get('id')
self.all_urns.add(urn)
if ok(urn):
factory(NewsItem, custom, urn, x.get('title'))
self.showing_count += 1
for x in self.builtin_recipe_collection:
urn = x.get('id')
self.all_urns.add(urn)
if ok(urn):
lang = x.get('language', 'und')
if lang:
lang = lang.replace('-', '_')
if lang not in lang_map:
lang_map[lang] = factory(NewsCategory, new_root, lang)
factory(NewsItem, lang_map[lang], urn, x.get('title'))
self.showing_count += 1
self.builtin_count += 1
for x in self.scheduler_config.iter_recipes():
urn = x.get('id')
if urn not in self.all_urns:
self.scheduler_config.un_schedule_recipe(urn)
continue
if ok(urn):
factory(NewsItem, scheduled, urn, x.get('title'))
new_root.prune()
new_root.sort()
self.root = new_root
self.reset()
def reset(self):
self.beginResetModel(), self.endResetModel()
def recipe_from_urn(self, urn):
coll = self.custom_recipe_collection if 'custom:' in urn else \
self.builtin_recipe_collection
for x in coll:
if x.get('id', None) == urn:
return copy.deepcopy(x)
def schedule_info_from_urn(self, urn):
return self.scheduler_config.get_schedule_info(urn)
def account_info_from_urn(self, urn):
return self.scheduler_config.get_account_info(urn)
def universal_set(self):
return self.all_urns
def get_customize_info(self, urn):
return self.scheduler_config.get_customize_info(urn)
def get_recipe_specific_option_metadata(self, urn):
return self.scheduler_config.get_recipe_specific_option_metadata(urn)
def get_matches(self, location, query):
query = query.strip().lower()
if not query:
return self.universal_set()
results = set()
for urn in self.universal_set():
recipe = self.recipe_from_urn(urn)
if query in recipe.get('title', '').lower() or \
query in recipe.get('description', '').lower():
results.add(urn)
return results
def search(self, query):
results = []
try:
query = str(query).strip()
if query:
results = self.parse(query)
if not results:
results = None
except ParseException:
results = []
self.do_refresh(restrict_to_urns=results)
self.searched.emit(True)
def columnCount(self, parent):
return 1
def data(self, index, role):
if not index.isValid():
return None
item = index.internalPointer()
return item.data(role)
def headerData(self, *args):
return None
def flags(self, index):
if not index.isValid():
return Qt.ItemFlag.ItemIsEnabled|Qt.ItemFlag.ItemIsSelectable
item = index.internalPointer()
return item.flags()
def resort(self):
self.do_refresh()
def index(self, row, column, parent):
if not self.hasIndex(row, column, parent):
return QModelIndex()
if not parent.isValid():
parent_item = self.root
else:
parent_item = parent.internalPointer()
try:
child_item = parent_item.children[row]
except IndexError:
return QModelIndex()
ans = self.createIndex(row, column, child_item)
return ans
def parent(self, index):
if not index.isValid():
return QModelIndex()
child_item = index.internalPointer()
parent_item = child_item.parent
if parent_item is self.root or parent_item is None:
return QModelIndex()
ans = self.createIndex(parent_item.row(), 0, parent_item)
return ans
def rowCount(self, parent):
if parent.column() > 0:
return 0
if not parent.isValid():
parent_item = self.root
else:
parent_item = parent.internalPointer()
return len(parent_item.children)
def update_recipe_schedule(self, urn, schedule_type, schedule,
add_title_tag=True, custom_tags=[]):
recipe = self.recipe_from_urn(urn)
self.scheduler_config.schedule_recipe(recipe, schedule_type, schedule,
add_title_tag=add_title_tag, custom_tags=custom_tags)
def update_last_downloaded(self, urn):
self.scheduler_config.update_last_downloaded(urn)
def set_account_info(self, urn, un, pw):
self.scheduler_config.set_account_info(urn, un, pw)
def clear_account_info(self, urn):
self.scheduler_config.clear_account_info(urn)
def get_account_info(self, urn):
return self.scheduler_config.get_account_info(urn)
def get_schedule_info(self, urn):
return self.scheduler_config.get_schedule_info(urn)
def un_schedule_recipe(self, urn):
self.scheduler_config.un_schedule_recipe(urn)
def schedule_recipe(self, urn, sched_type, schedule):
self.scheduler_config.schedule_recipe(self.recipe_from_urn(urn),
sched_type, schedule)
def customize_recipe(self, urn, val):
self.scheduler_config.customize_recipe(urn, val)
def get_to_be_downloaded_recipes(self):
ans = self.scheduler_config.get_to_be_downloaded_recipes()
ans2 = [x for x in ans if self.get_recipe(x, download=False) is not None]
for x in set(ans) - set(ans2):
self.un_schedule_recipe(x)
return ans2
def scheduled_urns(self):
ans = []
with self.scheduler_config.lock:
for recipe in self.scheduler_config.iter_recipes():
ans.append(recipe.get('id'))
return ans
| 15,099 | Python | .py | 363 | 31.548209 | 114 | 0.605227 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,816 | break_iterator.py | kovidgoyal_calibre/src/calibre/spell/break_iterator.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
from threading import Lock
from calibre.utils.icu import _icu
from calibre.utils.localization import lang_as_iso639_1
_iterators = {}
_sentence_iterators = {}
_lock = Lock()
def get_iterator(lang):
it = _iterators.get(lang)
if it is None:
it = _iterators[lang] = _icu.BreakIterator(_icu.UBRK_WORD, lang_as_iso639_1(lang) or lang)
return it
def get_sentence_iterator(lang):
it = _sentence_iterators.get(lang)
if it is None:
it = _sentence_iterators[lang] = _icu.BreakIterator(_icu.UBRK_SENTENCE, lang_as_iso639_1(lang) or lang)
return it
def split_into_words(text, lang='en'):
with _lock:
it = get_iterator(lang)
it.set_text(text)
return [text[p:p+s] for p, s in it.split2()]
def split_into_words_and_positions(text, lang='en'):
with _lock:
it = get_iterator(lang)
it.set_text(text)
return it.split2()
def sentence_positions(text, lang='en'):
with _lock:
it = get_sentence_iterator(lang)
it.set_text(text)
return it.split2()
def split_into_sentences(text, lang='en'):
with _lock:
it = get_sentence_iterator(lang)
it.set_text(text)
return tuple(text[p:p+s] for p, s in it.split2())
def index_of(needle, haystack, lang='en'):
with _lock:
it = get_iterator(lang)
it.set_text(haystack)
return it.index(needle)
def count_words(text, lang='en'):
with _lock:
it = get_iterator(lang)
it.set_text(text)
return it.count_words()
def split_long_sentences(sentence: str, offset: int, lang: str = 'en', limit: int = 2048):
if len(sentence) <= limit:
yield offset, sentence
return
buf, total, start_at = [], 0, 0
def a(s, e):
nonlocal total, start_at
t = sentence[s:e]
if not buf:
start_at = s
buf.append(t)
total += len(t)
for start, length in split_into_words_and_positions(sentence, lang):
a(start, start + length)
if total >= limit:
yield offset + start_at, ' '.join(buf)
buf, total = [], 0
if buf:
yield offset + start_at, ' '.join(buf)
PARAGRAPH_SEPARATOR = '\u2029'
def split_into_sentences_for_tts_embed(
text: str, lang: str = 'en',
):
import re
def sub(m):
return PARAGRAPH_SEPARATOR + ' ' * (len(m.group()) - 1)
text = re.sub(r'\n{2,}', sub, text.replace('\r', ' ')).replace('\n', ' ')
yield from sentence_positions(text, lang)
def split_into_sentences_for_tts(
text: str, lang: str = 'en', min_sentence_length: int = 32, max_sentence_length: int = 1024, PARAGRAPH_SEPARATOR: str = PARAGRAPH_SEPARATOR):
import re
def sub(m):
return PARAGRAPH_SEPARATOR + ' ' * (len(m.group()) - 1)
text = re.sub(r'\n{2,}', sub, text.replace('\r', ' ')).replace('\n', ' ')
pending_start, pending_sentence = 0, ''
for start, length in sentence_positions(text, lang):
end = start + length
sentence = text[start:end].rstrip().replace('\n', ' ').strip()
if not sentence:
continue
if len(sentence) < min_sentence_length and text[end-1] != PARAGRAPH_SEPARATOR:
if pending_sentence:
pending_sentence += ' ' + sentence
if len(pending_sentence) >= min_sentence_length:
yield pending_start, pending_sentence
pending_start, pending_sentence = 0, ''
else:
pending_start, pending_sentence = start, sentence
continue
for start, sentence in split_long_sentences(sentence, start, lang, limit=max_sentence_length):
if pending_sentence:
sentence = pending_sentence + ' ' + sentence
start = pending_start
pending_start, pending_sentence = 0, ''
yield start, sentence
if pending_sentence:
yield pending_start, pending_sentence
| 4,087 | Python | .py | 106 | 31 | 145 | 0.605063 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,817 | __init__.py | kovidgoyal_calibre/src/calibre/spell/__init__.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
from collections import namedtuple
from calibre.utils.localization import canonicalize_lang, load_iso3166
DictionaryLocale = namedtuple('DictionaryLocale', 'langcode countrycode')
def get_codes():
data = load_iso3166()
return data['codes'], data['three_map']
def parse_lang_code(raw):
raw = raw or ''
parts = raw.replace('_', '-').split('-')
lc = canonicalize_lang(parts[0])
if lc is None:
raise ValueError('Invalid language code: %r' % raw)
cc = None
for sc in ['Cyrl', 'Latn']:
if sc in parts:
parts.remove(sc)
if len(parts) > 1:
ccodes, ccodemap = get_codes()[:2]
q = parts[1].upper()
if q in ccodes:
cc = q
else:
cc = ccodemap.get(q, None)
return DictionaryLocale(lc, cc)
| 916 | Python | .py | 27 | 28 | 73 | 0.622727 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,818 | import_from.py | kovidgoyal_calibre/src/calibre/spell/import_from.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
import codecs
import glob
import os
import re
import sys
import tempfile
from lxml import etree
from calibre import browser
from calibre.constants import config_dir
from calibre.utils.resources import get_path as P
from calibre.utils.xml_parse import safe_xml_fromstring
from calibre.utils.zipfile import ZipFile
from polyglot.builtins import iteritems
NS_MAP = {
'oor': "http://openoffice.org/2001/registry",
'xs': "http://www.w3.org/2001/XMLSchema",
'manifest': 'http://openoffice.org/2001/manifest',
}
def XPath(x):
return etree.XPath(x, namespaces=NS_MAP)
BUILTIN_LOCALES = {'en-US', 'en-GB', 'es-ES'}
ONLINE_DICTIONARY_BASE_URL = 'https://raw.githubusercontent.com/LibreOffice/dictionaries/master/'
def parse_xcu(raw, origin='%origin%'):
' Get the dictionary and affix file names as well as supported locales for each dictionary '
ans = {}
root = safe_xml_fromstring(raw)
for node in XPath('//prop[@oor:name="Format"]/value[text()="DICT_SPELL"]/../..')(root):
value = XPath('descendant::prop[@oor:name="Locations"]/value')(node)
if len(value[0]) == 0:
# The value node has no children, use its text
paths = ''.join(XPath('descendant::prop[@oor:name="Locations"]/value/text()')(node)).replace('%origin%', origin).split()
else:
# Use the text of the value nodes children
paths = [c.text.replace('%origin%', origin) for v in value for c in v.iterchildren('*') if c.text]
aff, dic = paths if paths[0].endswith('.aff') else reversed(paths)
locales = ''.join(XPath('descendant::prop[@oor:name="Locales"]/value/text()')(node)).split()
if not locales:
locales = [str(item) for item in XPath('descendant::prop[@oor:name="Locales"]/value/it/text()')(node)]
ans[(dic, aff)] = locales
return ans
def convert_to_utf8(dic_data, aff_data, errors='strict'):
m = re.search(br'^SET\s+(\S+)$', aff_data[:2048], flags=re.MULTILINE)
if m is not None:
enc = m.group(1).decode('ascii', 'replace')
if enc.upper() not in ('UTF-8', 'UTF8'):
try:
codecs.lookup(enc)
except LookupError:
pass
else:
aff_data = aff_data[:m.start()] + b'SET UTF-8' + aff_data[m.end():]
aff_data = aff_data.decode(enc, errors).encode('utf-8')
dic_data = dic_data.decode(enc, errors).encode('utf-8')
return dic_data, aff_data
def import_from_libreoffice_source_tree(source_path):
dictionaries = {}
for x in glob.glob(os.path.join(source_path, '*', 'dictionaries.xcu')):
origin = os.path.dirname(x)
with open(x, 'rb') as f:
dictionaries.update(parse_xcu(f.read(), origin))
base = P('dictionaries', allow_user_override=False)
want_locales = set(BUILTIN_LOCALES)
for (dic, aff), locales in iteritems(dictionaries):
c = set(locales) & want_locales
if c:
locale = tuple(c)[0]
want_locales.discard(locale)
dest = os.path.join(base, locale)
if not os.path.exists(dest):
os.makedirs(dest)
with open(dic, 'rb') as df, open(aff, 'rb') as af:
dd, ad = convert_to_utf8(df.read(), af.read())
for src, raw in ((dic, dd), (aff, ad)):
with open(os.path.join(dest, locale + os.path.splitext(src)[1]), 'wb') as df:
df.write(raw)
with open(os.path.join(dest, 'locales'), 'wb') as f:
locales.sort(key=lambda x: (0, x) if x == locale else (1, x))
f.write(('\n'.join(locales)).encode('utf-8'))
if want_locales:
raise Exception('Failed to find dictionaries for some wanted locales: %s' % want_locales)
def fill_country_code(x):
return {'lt':'lt_LT'}.get(x, x)
def uniq(vals, kmap=lambda x:x):
''' Remove all duplicates from vals, while preserving order. kmap must be a
callable that returns a hashable value for every item in vals '''
vals = vals or ()
lvals = (kmap(x) for x in vals)
seen = set()
seen_add = seen.add
return tuple(x for x, k in zip(vals, lvals) if k not in seen and not seen_add(k))
def _import_from_virtual_directory(read_file_func, name, dest_dir=None, prefix='dic-'):
from calibre.spell.dictionary import parse_lang_code
dest_dir = dest_dir or os.path.join(config_dir, 'dictionaries')
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
num = 0
root = safe_xml_fromstring(read_file_func('META-INF/manifest.xml'))
xcu = XPath('//manifest:file-entry[@manifest:media-type="application/vnd.sun.star.configuration-data"]')(root)[0].get(
'{%s}full-path' % NS_MAP['manifest'])
for (dic, aff), locales in iteritems(parse_xcu(read_file_func(xcu), origin='')):
dic, aff = dic.lstrip('/'), aff.lstrip('/')
d = tempfile.mkdtemp(prefix=prefix, dir=dest_dir)
locales = uniq([x for x in map(fill_country_code, locales) if parse_lang_code(x).countrycode])
if not locales:
continue
metadata = [name] + list(locales)
with open(os.path.join(d, 'locales'), 'wb') as f:
f.write(('\n'.join(metadata)).encode('utf-8'))
dd, ad = convert_to_utf8(read_file_func(dic), read_file_func(aff))
with open(os.path.join(d, '%s.dic' % locales[0]), 'wb') as f:
f.write(dd)
with open(os.path.join(d, '%s.aff' % locales[0]), 'wb') as f:
f.write(ad)
num += 1
return num
def import_from_oxt(source_path, name, dest_dir=None, prefix='dic-'):
with ZipFile(source_path) as zf:
def read_file(key):
try:
return zf.open(key).read()
except KeyError:
# Some dictionaries apparently put the xcu in a sub-directory
# and incorrectly make paths relative to that directory instead
# of the root, for example:
# http://extensions.libreoffice.org/extension-center/italian-dictionary-thesaurus-hyphenation-patterns/releases/4.1/dict-it.oxt
while key.startswith('../'):
key = key[3:]
return zf.open(key.lstrip('/')).read()
return _import_from_virtual_directory(read_file, name, dest_dir=dest_dir, prefix=prefix)
def import_from_online(directory, name, dest_dir=None, prefix='dic-'):
br = browser(timeout=30)
def read_file(key):
try:
rp = br.open('/'.join((ONLINE_DICTIONARY_BASE_URL, directory, key)))
return rp.read()
except Exception as err:
if getattr(err, 'code', -1) != 404:
raise
# Some dictionaries apparently put the dic and aff file in a
# sub-directory dictionaries and incorrectly make paths relative
# to that directory instead of the root, for example:
# https://github.com/LibreOffice/dictionaries/tree/master/ca
rp = br.open('/'.join((ONLINE_DICTIONARY_BASE_URL, directory, 'dictionaries', key)))
return rp.read()
return _import_from_virtual_directory(read_file, name, dest_dir=dest_dir, prefix=prefix)
if __name__ == '__main__':
import_from_libreoffice_source_tree(sys.argv[-1])
| 7,432 | Python | .py | 150 | 41.006667 | 143 | 0.617497 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,819 | dictionary.py | kovidgoyal_calibre/src/calibre/spell/dictionary.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2014, Kovid Goyal <kovid at kovidgoyal.net>
import glob
import json
import os
import re
import shutil
import sys
from collections import defaultdict, namedtuple
from functools import partial
from itertools import chain
from calibre import prints
from calibre.constants import config_dir, filesystem_encoding, iswindows
from calibre.spell import parse_lang_code
from calibre.utils.config import JSONConfig
from calibre.utils.icu import capitalize
from calibre.utils.localization import _, get_lang, get_system_locale
from calibre.utils.resources import get_path as P
from polyglot.builtins import iteritems, itervalues
Dictionary = namedtuple('Dictionary', 'primary_locale locales dicpath affpath builtin name id')
LoadedDictionary = namedtuple('Dictionary', 'primary_locale locales obj builtin name id')
dprefs = JSONConfig('dictionaries/prefs.json')
dprefs.defaults['preferred_dictionaries'] = {}
dprefs.defaults['preferred_locales'] = {}
dprefs.defaults['user_dictionaries'] = [{'name':_('Default'), 'is_active':True, 'words':[]}]
not_present = object()
class UserDictionary:
__slots__ = ('name', 'is_active', 'words')
def __init__(self, **kwargs):
self.name = kwargs['name']
self.is_active = kwargs['is_active']
self.words = {(w, langcode) for w, langcode in kwargs['words']}
def serialize(self):
return {'name':self.name, 'is_active': self.is_active, 'words':[
(w, l) for w, l in self.words]}
_builtins = _custom = None
def builtin_dictionaries():
global _builtins
if _builtins is None:
dics = []
for lc in glob.glob(os.path.join(P('dictionaries', allow_user_override=False), '*/locales')):
with open(lc, 'rb') as lcf:
locales = list(filter(None, lcf.read().decode('utf-8').splitlines()))
locale = locales[0]
base = os.path.dirname(lc)
dics.append(Dictionary(
parse_lang_code(locale), frozenset(map(parse_lang_code, locales)), os.path.join(base, '%s.dic' % locale),
os.path.join(base, '%s.aff' % locale), True, None, None))
_builtins = frozenset(dics)
return _builtins
def catalog_online_dictionaries():
loaded = json.loads(P('dictionaries/online-catalog.json', allow_user_override=False, data=True))
try:
loaded.update(json.loads(P('dictionaries/online-catalog.json', data=True)))
except:
pass
rslt = []
for lang, directory in loaded.items():
rslt.append({'primary_locale':parse_lang_code(lang), 'name':lang,'directory':directory})
return rslt
def custom_dictionaries(reread=False):
global _custom
if _custom is None or reread:
dics = []
for lc in glob.glob(os.path.join(config_dir, 'dictionaries', '*/locales')):
with open(lc, 'rb') as cdf:
locales = list(filter(None, cdf.read().decode('utf-8').splitlines()))
try:
name, locale, locales = locales[0], locales[1], locales[1:]
except IndexError:
continue
base = os.path.dirname(lc)
ploc = parse_lang_code(locale)
if ploc.countrycode is None:
continue
dics.append(Dictionary(
ploc, frozenset(filter(lambda x:x.countrycode is not None, map(parse_lang_code, locales))), os.path.join(base, '%s.dic' % locale),
os.path.join(base, '%s.aff' % locale), False, name, os.path.basename(base)))
_custom = frozenset(dics)
return _custom
default_en_locale = 'en-US'
try:
ul = parse_lang_code(get_system_locale() or 'en-US')
except ValueError:
ul = None
if ul is not None and ul.langcode == 'eng' and ul.countrycode in 'GB BS BZ GH IE IN JM NZ TT'.split():
default_en_locale = 'en-' + ul.countrycode
default_preferred_locales = {'eng':default_en_locale, 'deu':'de-DE', 'spa':'es-ES', 'fra':'fr-FR'}
def best_locale_for_language(langcode):
best_locale = dprefs['preferred_locales'].get(langcode, default_preferred_locales.get(langcode, None))
if best_locale is not None:
return parse_lang_code(best_locale)
def preferred_dictionary(locale):
return {parse_lang_code(k):v for k, v in iteritems(dprefs['preferred_dictionaries'])}.get(locale, None)
def remove_dictionary(dictionary):
if dictionary.builtin:
raise ValueError('Cannot remove builtin dictionaries')
base = os.path.dirname(dictionary.dicpath)
shutil.rmtree(base)
dprefs['preferred_dictionaries'] = {k:v for k, v in iteritems(dprefs['preferred_dictionaries']) if v != dictionary.id}
def rename_dictionary(dictionary, name):
lf = os.path.join(os.path.dirname(dictionary.dicpath), 'locales')
with open(lf, 'r+b') as f:
lines = f.read().splitlines()
lines[:1] = [name.encode('utf-8')]
f.seek(0), f.truncate(), f.write(b'\n'.join(lines))
custom_dictionaries(reread=True)
def get_dictionary(locale, exact_match=False):
preferred = preferred_dictionary(locale)
# First find all dictionaries that match locale exactly
exact_matches = {}
for collection in (custom_dictionaries(), builtin_dictionaries()):
for d in collection:
if d.primary_locale == locale:
exact_matches[d.id] = d
for d in collection:
for q in d.locales:
if q == locale and d.id not in exact_matches:
exact_matches[d.id] = d
# If the user has specified a preferred dictionary for this locale, use it,
# otherwise, if a builtin dictionary exists, use that
if preferred in exact_matches:
return exact_matches[preferred]
# Return one of the exactly matching dictionaries, preferring user
# installed to builtin ones
for k in sorted(exact_matches, key=lambda x: (1, None) if x is None else (0, x)):
return exact_matches[k]
if exact_match:
return
# No dictionary matched the locale exactly, we will now fallback to
# matching only on language. First see if a dictionary matching the
# preferred locale for the language exists.
best_locale = best_locale_for_language(locale.langcode)
if best_locale is not None:
ans = get_dictionary(best_locale, exact_match=True)
if ans is not None:
return ans
# Now just return any dictionary that matches the language, preferring user
# installed ones to builtin ones
for collection in (custom_dictionaries(), builtin_dictionaries()):
for d in sorted(collection, key=lambda d: d.name or ''):
if d.primary_locale.langcode == locale.langcode:
return d
def load_dictionary(dictionary):
from calibre_extensions import hunspell
def fix_path(path):
if isinstance(path, bytes):
path = path.decode(filesystem_encoding)
path = os.path.abspath(path)
if iswindows:
path = fr'\\?\{path}'
return path
obj = hunspell.Dictionary(fix_path(dictionary.dicpath), fix_path(dictionary.affpath))
return LoadedDictionary(dictionary.primary_locale, dictionary.locales, obj, dictionary.builtin, dictionary.name, dictionary.id)
class Dictionaries:
def __init__(self):
self.remove_hyphenation = re.compile('[\u2010-]+')
self.negative_pat = re.compile(r'-[.\d+]')
self.fix_punctuation_pat = re.compile(r'''[:.]''')
self.dictionaries = {}
self.word_cache = {}
self.ignored_words = set()
self.added_user_words = {}
try:
self.default_locale = parse_lang_code(get_lang())
except ValueError:
self.default_locale = parse_lang_code('en-US')
self.ui_locale = self.default_locale
def initialize(self, force=False):
if force or not hasattr(self, 'active_user_dictionaries'):
self.read_user_dictionaries()
def clear_caches(self):
self.dictionaries.clear(), self.word_cache.clear()
def clear_ignored(self):
self.ignored_words.clear()
def dictionary_for_locale(self, locale):
ans = self.dictionaries.get(locale, not_present)
if ans is not_present:
ans = get_dictionary(locale)
if ans is not None:
ans = load_dictionary(ans)
for ud in self.active_user_dictionaries:
for word, langcode in ud.words:
if langcode == locale.langcode:
try:
ans.obj.add(word)
except Exception:
# not critical since all it means is that the word won't show up in suggestions
prints(f'Failed to add the word {word!r} to the dictionary for {locale}', file=sys.stderr)
self.dictionaries[locale] = ans
return ans
def ignore_word(self, word, locale):
self.ignored_words.add((word, locale.langcode))
self.word_cache[(word, locale)] = True
def unignore_word(self, word, locale):
self.ignored_words.discard((word, locale.langcode))
self.word_cache.pop((word, locale), None)
def is_word_ignored(self, word, locale):
return (word, locale.langcode) in self.ignored_words
@property
def all_user_dictionaries(self):
return chain(self.active_user_dictionaries, self.inactive_user_dictionaries)
def user_dictionary(self, name):
for ud in self.all_user_dictionaries:
if ud.name == name:
return ud
def read_user_dictionaries(self):
self.active_user_dictionaries = []
self.inactive_user_dictionaries = []
for d in dprefs['user_dictionaries'] or dprefs.defaults['user_dictionaries']:
d = UserDictionary(**d)
(self.active_user_dictionaries if d.is_active else self.inactive_user_dictionaries).append(d)
def mark_user_dictionary_as_active(self, name, is_active=True):
d = self.user_dictionary(name)
if d is not None:
d.is_active = is_active
self.save_user_dictionaries()
return True
return False
def save_user_dictionaries(self):
dprefs['user_dictionaries'] = [d.serialize() for d in self.all_user_dictionaries]
def add_user_words(self, words, langcode):
for d in itervalues(self.dictionaries):
if d and getattr(d.primary_locale, 'langcode', None) == langcode:
for word in words:
d.obj.add(word)
def remove_user_words(self, words, langcode):
for d in itervalues(self.dictionaries):
if d and d.primary_locale.langcode == langcode:
for word in words:
d.obj.remove(word)
def add_to_user_dictionary(self, name, word, locale):
ud = self.user_dictionary(name)
if ud is None:
raise ValueError('Cannot add to the dictionary named: %s as no such dictionary exists' % name)
wl = len(ud.words)
if isinstance(word, (set, frozenset)):
ud.words |= word
self.add_user_words({x[0] for x in word}, locale.langcode)
else:
ud.words.add((word, locale.langcode))
self.add_user_words((word,), locale.langcode)
if len(ud.words) > wl:
self.save_user_dictionaries()
try:
self.word_cache.pop((word, locale), None)
except TypeError:
pass # word is a set, ignore
return True
return False
def remove_from_user_dictionaries(self, word, locale):
key = (word, locale.langcode)
changed = False
for ud in self.active_user_dictionaries:
if key in ud.words:
changed = True
ud.words.discard(key)
if changed:
self.word_cache.pop((word, locale), None)
self.save_user_dictionaries()
self.remove_user_words((word,), locale.langcode)
return changed
def remove_from_user_dictionary(self, name, words):
changed = False
removals = defaultdict(set)
keys = [(w, l.langcode) for w, l in words]
for d in self.all_user_dictionaries:
if d.name == name:
for key in keys:
if key in d.words:
d.words.discard(key)
removals[key[1]].add(key[0])
changed = True
if changed:
for key in words:
self.word_cache.pop(key, None)
for langcode, words in iteritems(removals):
self.remove_user_words(words, langcode)
self.save_user_dictionaries()
return changed
def word_in_user_dictionary(self, word, locale):
key = (word, locale.langcode)
for ud in self.active_user_dictionaries:
if key in ud.words:
return ud.name
def create_user_dictionary(self, name):
if name in {d.name for d in self.all_user_dictionaries}:
raise ValueError('A dictionary named %s already exists' % name)
d = UserDictionary(name=name, is_active=True, words=())
self.active_user_dictionaries.append(d)
self.save_user_dictionaries()
def remove_user_dictionary(self, name):
changed = False
for x in (self.active_user_dictionaries, self.inactive_user_dictionaries):
for d in tuple(x):
if d.name == name:
x.remove(d)
changed = True
if changed:
self.save_user_dictionaries()
self.clear_caches()
return changed
def rename_user_dictionary(self, name, new_name):
changed = False
for d in self.all_user_dictionaries:
if d.name == name:
d.name = new_name
changed = True
if changed:
self.save_user_dictionaries()
return changed
def recognized(self, word, locale=None):
locale = locale or self.default_locale
key = (word, locale)
ans = self.word_cache.get(key, None)
if ans is None:
lkey = (word, locale.langcode)
ans = False
if lkey in self.ignored_words:
ans = True
else:
for ud in self.active_user_dictionaries:
if lkey in ud.words:
ans = True
break
else:
d = self.dictionary_for_locale(locale)
if d is not None:
try:
ans = d.obj.recognized(word.replace('\u2010', '-'))
except ValueError:
pass
else:
ans = True
if ans is False and self.negative_pat.match(word) is not None:
ans = True
self.word_cache[key] = ans
return ans
def suggestions(self, word, locale=None):
locale = locale or self.default_locale
d = self.dictionary_for_locale(locale)
has_unicode_hyphen = '\u2010' in word
ans = ()
def add_suggestion(w, ans):
return (w,) + tuple(x for x in ans if x != w)
if d is not None:
try:
ans = d.obj.suggest(str(word).replace('\u2010', '-'))
except ValueError:
pass
else:
dehyphenated_word = self.remove_hyphenation.sub('', word)
if len(dehyphenated_word) != len(word) and self.recognized(dehyphenated_word, locale):
# Ensure the de-hyphenated word is present and is the first suggestion
ans = add_suggestion(dehyphenated_word, ans)
else:
m = self.fix_punctuation_pat.search(word)
if m is not None:
w1, w2 = word[:m.start()], word[m.end():]
if self.recognized(w1) and self.recognized(w2):
fw = w1 + m.group() + ' ' + w2
ans = add_suggestion(fw, ans)
if capitalize(w2) != w2:
fw = w1 + m.group() + ' ' + capitalize(w2)
ans = add_suggestion(fw, ans)
if has_unicode_hyphen:
ans = tuple(w.replace('-', '\u2010') for w in ans)
return ans
def build_test():
dictionaries = Dictionaries()
dictionaries.initialize()
eng = parse_lang_code('en')
if not dictionaries.recognized('recognized', locale=eng):
raise AssertionError('The word recognized was not recognized')
def find_tests():
import unittest
class TestDictionaries(unittest.TestCase):
def setUp(self):
dictionaries = Dictionaries()
dictionaries.initialize()
eng = parse_lang_code('en-GB')
self.recognized = partial(dictionaries.recognized, locale=eng)
self.suggestions = partial(dictionaries.suggestions, locale=eng)
def ar(self, w):
if not self.recognized(w):
raise AssertionError('The word %r was not recognized' % w)
def test_dictionaries(self):
for w in 'recognized one-half one\u2010half'.split():
self.ar(w)
d = load_dictionary(get_dictionary(parse_lang_code('es-ES'))).obj
self.assertTrue(d.recognized('Ahí'))
self.assertIn('one\u2010half', self.suggestions('oone\u2010half'))
d = load_dictionary(get_dictionary(parse_lang_code('es'))).obj
self.assertIn('adequately', self.suggestions('ade-quately'))
self.assertIn('magic. Wand', self.suggestions('magic.wand'))
self.assertIn('List', self.suggestions('Lisùëòt'))
return unittest.TestLoader().loadTestsFromTestCase(TestDictionaries)
def test():
from calibre.utils.run_tests import run_cli
run_cli(find_tests())
if __name__ == '__main__':
test()
| 18,220 | Python | .py | 402 | 34.803483 | 146 | 0.604318 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,820 | errors.py | kovidgoyal_calibre/src/css_selectors/errors.py | #!/usr/bin/env python
# vim:fileencoding=utf-8
__license__ = 'GPL v3'
__copyright__ = '2015, Kovid Goyal <kovid at kovidgoyal.net>'
class SelectorError(ValueError):
"""Common parent for SelectorSyntaxError and ExpressionError"""
class SelectorSyntaxError(SelectorError):
"""Parsing a selector that does not match the grammar."""
class ExpressionError(SelectorError):
"""Unknown or unsupported selector (eg. pseudo-class)."""
| 445 | Python | .py | 10 | 41.5 | 67 | 0.754098 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,821 | select.py | kovidgoyal_calibre/src/css_selectors/select.py | #!/usr/bin/env python
# vim:fileencoding=utf-8
__license__ = 'GPL v3'
__copyright__ = '2015, Kovid Goyal <kovid at kovidgoyal.net>'
import itertools
import re
from collections import OrderedDict, defaultdict
from functools import wraps
from itertools import chain
from lxml import etree
from css_selectors.errors import ExpressionError
from css_selectors.ordered_set import OrderedSet
from css_selectors.parser import Element, FunctionalPseudoElement, ascii_lower, parse
from polyglot.builtins import iteritems, itervalues
PARSE_CACHE_SIZE = 200
parse_cache = OrderedDict()
XPATH_CACHE_SIZE = 30
xpath_cache = OrderedDict()
# Test that the string is not empty and does not contain whitespace
is_non_whitespace = re.compile(r'^[^ \t\r\n\f]+$').match
def get_parsed_selector(raw):
try:
return parse_cache[raw]
except KeyError:
parse_cache[raw] = ans = parse(raw)
if len(parse_cache) > PARSE_CACHE_SIZE:
parse_cache.pop(next(iter(parse_cache)))
return ans
def get_compiled_xpath(expr):
try:
return xpath_cache[expr]
except KeyError:
xpath_cache[expr] = ans = etree.XPath(expr)
if len(xpath_cache) > XPATH_CACHE_SIZE:
xpath_cache.pop(next(iter(xpath_cache)))
return ans
class AlwaysIn:
def __contains__(self, x):
return True
always_in = AlwaysIn()
def trace_wrapper(func):
@wraps(func)
def trace(*args, **kwargs):
targs = args[1:] if args and isinstance(args[0], Select) else args
print('Called:', func.__name__, 'with args:', targs, kwargs or '')
return func(*args, **kwargs)
return trace
def normalize_language_tag(tag):
"""Return a list of normalized combinations for a `BCP 47` language tag.
Example:
>>> normalize_language_tag('de_AT-1901')
['de-at-1901', 'de-at', 'de-1901', 'de']
"""
# normalize:
tag = ascii_lower(tag).replace('_','-')
# split (except singletons, which mark the following tag as non-standard):
tag = re.sub(r'-([a-zA-Z0-9])-', r'-\1_', tag)
subtags = [subtag.replace('_', '-') for subtag in tag.split('-')]
base_tag = (subtags.pop(0),)
taglist = {base_tag[0]}
# find all combinations of subtags
for n in range(len(subtags), 0, -1):
for tags in itertools.combinations(subtags, n):
taglist.add('-'.join(base_tag + tags))
return taglist
INAPPROPRIATE_PSEUDO_CLASSES = frozenset((
'active', 'after', 'disabled', 'visited', 'link', 'before', 'focus', 'first-letter', 'enabled', 'first-line', 'hover', 'checked', 'target'))
class Select:
'''
This class implements CSS Level 3 selectors
(http://www.w3.org/TR/css3-selectors) on an lxml tree, with caching for
performance. To use:
>>> from css_selectors import Select
>>> select = Select(root) # Where root is an lxml document
>>> print(tuple(select('p.myclass')))
Tags are returned in document order. Note that attribute and tag names are
matched case-insensitively. Class and id values are also matched
case-insensitively. Also namespaces are ignored (this is for performance of
the common case). The UI related selectors are not implemented, such as
:enabled, :disabled, :checked, :hover, etc. Similarly, the non-element
related selectors such as ::first-line, ::first-letter, ::before, etc. are
not implemented.
WARNING: This class uses internal caches. You *must not* make any changes
to the lxml tree. If you do make some changes, either create a new Select
object or call :meth:`invalidate_caches`.
This class can be easily sub-classed to work with tree implementations
other than lxml. Simply override the methods in the ``Tree Integration``
block below.
The caching works by maintaining internal maps from classes/ids/tag
names/etc. to node sets. These caches are populated as needed, and used
for all subsequent selections. Thus, for best performance you should use
the same selector object for finding the matching nodes for multiple
queries. Of course, remember not to change the tree in between queries.
'''
combinator_mapping = {
' ': 'descendant',
'>': 'child',
'+': 'direct_adjacent',
'~': 'indirect_adjacent',
}
attribute_operator_mapping = {
'exists': 'exists',
'=': 'equals',
'~=': 'includes',
'|=': 'dashmatch',
'^=': 'prefixmatch',
'$=': 'suffixmatch',
'*=': 'substringmatch',
}
def __init__(self, root, default_lang=None, ignore_inappropriate_pseudo_classes=False, dispatch_map=None, trace=False):
if hasattr(root, 'getroot'):
root = root.getroot()
self.root = root
self.dispatch_map = dispatch_map or default_dispatch_map
self.invalidate_caches()
self.default_lang = default_lang
if trace:
self.dispatch_map = {k:trace_wrapper(v) for k, v in iteritems(self.dispatch_map)}
if ignore_inappropriate_pseudo_classes:
self.ignore_inappropriate_pseudo_classes = INAPPROPRIATE_PSEUDO_CLASSES
else:
self.ignore_inappropriate_pseudo_classes = frozenset()
# External API {{{
def invalidate_caches(self):
'Invalidate all caches. You must call this before using this object if you have made changes to the HTML tree'
self._element_map = None
self._id_map = None
self._class_map = None
self._attrib_map = None
self._attrib_space_map = None
self._lang_map = None
self.map_tag_name = ascii_lower
if '{' in self.root.tag:
def map_tag_name(x):
return ascii_lower(x.rpartition('}')[2])
self.map_tag_name = map_tag_name
def __call__(self, selector, root=None):
''' Return an iterator over all matching tags, in document order.
Normally, all matching tags in the document are returned, is you
specify root, then only tags that are root or descendants of root are
returned. Note that this can be very expensive if root has a lot of
descendants. '''
seen = set()
if root is not None:
root = frozenset(self.itertag(root))
for parsed_selector in get_parsed_selector(selector):
for item in self.iterparsedselector(parsed_selector):
if item not in seen and (root is None or item in root):
yield item
seen.add(item)
def has_matches(self, selector, root=None):
'Return True iff selector matches at least one item in the tree'
for elem in self(selector, root=root):
return True
return False
# }}}
def iterparsedselector(self, parsed_selector):
type_name = type(parsed_selector).__name__
try:
func = self.dispatch_map[ascii_lower(type_name)]
except KeyError:
raise ExpressionError('%s is not supported' % type_name)
for item in func(self, parsed_selector):
yield item
@property
def element_map(self):
if self._element_map is None:
self._element_map = em = defaultdict(OrderedSet)
for tag in self.itertag():
em[self.map_tag_name(tag.tag)].add(tag)
return self._element_map
@property
def id_map(self):
if self._id_map is None:
self._id_map = im = defaultdict(OrderedSet)
lower = ascii_lower
for elem in self.iteridtags():
im[lower(elem.get('id'))].add(elem)
return self._id_map
@property
def class_map(self):
if self._class_map is None:
self._class_map = cm = defaultdict(OrderedSet)
lower = ascii_lower
for elem in self.iterclasstags():
for cls in elem.get('class').split():
cm[lower(cls)].add(elem)
return self._class_map
@property
def attrib_map(self):
if self._attrib_map is None:
self._attrib_map = am = defaultdict(lambda : defaultdict(OrderedSet))
map_attrib_name = ascii_lower
if '{' in self.root.tag:
def map_attrib_name(x):
return ascii_lower(x.rpartition('}')[2])
for tag in self.itertag():
for attr, val in iteritems(tag.attrib):
am[map_attrib_name(attr)][val].add(tag)
return self._attrib_map
@property
def attrib_space_map(self):
if self._attrib_space_map is None:
self._attrib_space_map = am = defaultdict(lambda : defaultdict(OrderedSet))
map_attrib_name = ascii_lower
if '{' in self.root.tag:
def map_attrib_name(x):
return ascii_lower(x.rpartition('}')[2])
for tag in self.itertag():
for attr, val in iteritems(tag.attrib):
for v in val.split():
am[map_attrib_name(attr)][v].add(tag)
return self._attrib_space_map
@property
def lang_map(self):
if self._lang_map is None:
self._lang_map = lm = defaultdict(OrderedSet)
dl = normalize_language_tag(self.default_lang) if self.default_lang else None
lmap = {tag:dl for tag in self.itertag()} if dl else {}
for tag in self.itertag():
lang = None
for attr in ('{http://www.w3.org/XML/1998/namespace}lang', 'lang'):
lang = tag.get(attr)
if lang:
lang = normalize_language_tag(lang)
for dtag in self.itertag(tag):
lmap[dtag] = lang
for tag, langs in iteritems(lmap):
for lang in langs:
lm[lang].add(tag)
return self._lang_map
# Tree Integration {{{
def itertag(self, tag=None):
return (self.root if tag is None else tag).iter('*')
def iterdescendants(self, tag=None):
return (self.root if tag is None else tag).iterdescendants('*')
def iterchildren(self, tag=None):
return (self.root if tag is None else tag).iterchildren('*')
def itersiblings(self, tag=None, preceding=False):
return (self.root if tag is None else tag).itersiblings('*', preceding=preceding)
def iteridtags(self):
return get_compiled_xpath('//*[@id]')(self.root)
def iterclasstags(self):
return get_compiled_xpath('//*[@class]')(self.root)
def sibling_count(self, child, before=True, same_type=False):
' Return the number of siblings before or after child or raise ValueError if child has no parent. '
parent = child.getparent()
if parent is None:
raise ValueError('Child has no parent')
if same_type:
siblings = OrderedSet(child.itersiblings(preceding=before))
return len(self.element_map[self.map_tag_name(child.tag)] & siblings)
else:
if before:
return parent.index(child)
return len(parent) - parent.index(child) - 1
def all_sibling_count(self, child, same_type=False):
' Return the number of siblings of child or raise ValueError if child has no parent '
parent = child.getparent()
if parent is None:
raise ValueError('Child has no parent')
if same_type:
siblings = OrderedSet(chain(child.itersiblings(preceding=False), child.itersiblings(preceding=True)))
return len(self.element_map[self.map_tag_name(child.tag)] & siblings)
else:
return len(parent) - 1
def is_empty(self, elem):
' Return True iff elem has no child tags and no text content '
for child in elem:
# Check for comment/PI nodes with tail text
if child.tail:
return False
return len(tuple(elem.iterchildren('*'))) == 0 and not elem.text
# }}}
# Combinators {{{
def select_combinedselector(cache, combined):
"""Translate a combined selector."""
combinator = cache.combinator_mapping[combined.combinator]
# Fast path for when the sub-selector is all elements
right = None if isinstance(combined.subselector, Element) and (
combined.subselector.element or '*') == '*' else cache.iterparsedselector(combined.subselector)
for item in cache.dispatch_map[combinator](cache, cache.iterparsedselector(combined.selector), right):
yield item
def select_descendant(cache, left, right):
"""right is a child, grand-child or further descendant of left"""
right = always_in if right is None else frozenset(right)
for ancestor in left:
for descendant in cache.iterdescendants(ancestor):
if descendant in right:
yield descendant
def select_child(cache, left, right):
"""right is an immediate child of left"""
right = always_in if right is None else frozenset(right)
for parent in left:
for child in cache.iterchildren(parent):
if child in right:
yield child
def select_direct_adjacent(cache, left, right):
"""right is a sibling immediately after left"""
right = always_in if right is None else frozenset(right)
for parent in left:
for sibling in cache.itersiblings(parent):
if sibling in right:
yield sibling
break
def select_indirect_adjacent(cache, left, right):
"""right is a sibling after left, immediately or not"""
right = always_in if right is None else frozenset(right)
for parent in left:
for sibling in cache.itersiblings(parent):
if sibling in right:
yield sibling
# }}}
def select_element(cache, selector):
"""A type or universal selector."""
element = selector.element
if not element or element == '*':
for elem in cache.itertag():
yield elem
else:
for elem in cache.element_map[ascii_lower(element)]:
yield elem
def select_hash(cache, selector):
'An id selector'
items = cache.id_map[ascii_lower(selector.id)]
if len(items) > 0:
for elem in cache.iterparsedselector(selector.selector):
if elem in items:
yield elem
def select_class(cache, selector):
'A class selector'
items = cache.class_map[ascii_lower(selector.class_name)]
if items:
for elem in cache.iterparsedselector(selector.selector):
if elem in items:
yield elem
def select_negation(cache, selector):
'Implement :not()'
exclude = frozenset(cache.iterparsedselector(selector.subselector))
for item in cache.iterparsedselector(selector.selector):
if item not in exclude:
yield item
# Attribute selectors {{{
def select_attrib(cache, selector):
operator = cache.attribute_operator_mapping[selector.operator]
items = frozenset(cache.dispatch_map[operator](cache, ascii_lower(selector.attrib), selector.value))
for item in cache.iterparsedselector(selector.selector):
if item in items:
yield item
def select_exists(cache, attrib, value=None):
for elem_set in itervalues(cache.attrib_map[attrib]):
for elem in elem_set:
yield elem
def select_equals(cache, attrib, value):
for elem in cache.attrib_map[attrib][value]:
yield elem
def select_includes(cache, attrib, value):
if is_non_whitespace(value):
for elem in cache.attrib_space_map[attrib][value]:
yield elem
def select_dashmatch(cache, attrib, value):
if value:
for val, elem_set in iteritems(cache.attrib_map[attrib]):
if val == value or val.startswith(value + '-'):
for elem in elem_set:
yield elem
def select_prefixmatch(cache, attrib, value):
if value:
for val, elem_set in iteritems(cache.attrib_map[attrib]):
if val.startswith(value):
for elem in elem_set:
yield elem
def select_suffixmatch(cache, attrib, value):
if value:
for val, elem_set in iteritems(cache.attrib_map[attrib]):
if val.endswith(value):
for elem in elem_set:
yield elem
def select_substringmatch(cache, attrib, value):
if value:
for val, elem_set in iteritems(cache.attrib_map[attrib]):
if value in val:
for elem in elem_set:
yield elem
# }}}
# Function selectors {{{
def select_function(cache, function):
"""Select with a functional pseudo-class."""
fname = function.name.replace('-', '_')
try:
func = cache.dispatch_map[fname]
except KeyError:
raise ExpressionError(
"The pseudo-class :%s() is unknown" % function.name)
if fname == 'lang':
items = frozenset(func(cache, function))
for item in cache.iterparsedselector(function.selector):
if item in items:
yield item
else:
for item in cache.iterparsedselector(function.selector):
if func(cache, function, item):
yield item
def select_lang(cache, function):
' Implement :lang() '
if function.argument_types() not in (['STRING'], ['IDENT']):
raise ExpressionError("Expected a single string or ident for :lang(), got %r" % function.arguments)
lang = function.arguments[0].value
if lang:
lang = ascii_lower(lang)
lp = lang + '-'
for tlang, elem_set in iteritems(cache.lang_map):
if tlang == lang or (tlang is not None and tlang.startswith(lp)):
for elem in elem_set:
yield elem
def select_nth_child(cache, function, elem):
' Implement :nth-child() '
a, b = function.parsed_arguments
try:
num = cache.sibling_count(elem) + 1
except ValueError:
return False
if a == 0:
return num == b
n = (num - b) / a
return n.is_integer() and n > -1
def select_nth_last_child(cache, function, elem):
' Implement :nth-last-child() '
a, b = function.parsed_arguments
try:
num = cache.sibling_count(elem, before=False) + 1
except ValueError:
return False
if a == 0:
return num == b
n = (num - b) / a
return n.is_integer() and n > -1
def select_nth_of_type(cache, function, elem):
' Implement :nth-of-type() '
a, b = function.parsed_arguments
try:
num = cache.sibling_count(elem, same_type=True) + 1
except ValueError:
return False
if a == 0:
return num == b
n = (num - b) / a
return n.is_integer() and n > -1
def select_nth_last_of_type(cache, function, elem):
' Implement :nth-last-of-type() '
a, b = function.parsed_arguments
try:
num = cache.sibling_count(elem, before=False, same_type=True) + 1
except ValueError:
return False
if a == 0:
return num == b
n = (num - b) / a
return n.is_integer() and n > -1
# }}}
# Pseudo elements {{{
def pseudo_func(f):
f.is_pseudo = True
return f
@pseudo_func
def allow_all(cache, item):
return True
def get_func_for_pseudo(cache, ident):
try:
func = cache.dispatch_map[ident.replace('-', '_')]
except KeyError:
if ident in cache.ignore_inappropriate_pseudo_classes:
func = allow_all
else:
raise ExpressionError(
"The pseudo-class :%s is not supported" % ident)
try:
func.is_pseudo
except AttributeError:
raise ExpressionError(
"The pseudo-class :%s is invalid" % ident)
return func
def select_selector(cache, selector):
if selector.pseudo_element is None:
for item in cache.iterparsedselector(selector.parsed_tree):
yield item
return
if isinstance(selector.pseudo_element, FunctionalPseudoElement):
raise ExpressionError(
"The pseudo-element ::%s is not supported" % selector.pseudo_element.name)
func = get_func_for_pseudo(cache, selector.pseudo_element)
for item in cache.iterparsedselector(selector.parsed_tree):
if func(cache, item):
yield item
def select_pseudo(cache, pseudo):
func = get_func_for_pseudo(cache, pseudo.ident)
if func is select_root:
yield cache.root
return
for item in cache.iterparsedselector(pseudo.selector):
if func(cache, item):
yield item
@pseudo_func
def select_root(cache, elem):
return elem is cache.root
@pseudo_func
def select_first_child(cache, elem):
try:
return cache.sibling_count(elem) == 0
except ValueError:
return False
@pseudo_func
def select_last_child(cache, elem):
try:
return cache.sibling_count(elem, before=False) == 0
except ValueError:
return False
@pseudo_func
def select_only_child(cache, elem):
try:
return cache.all_sibling_count(elem) == 0
except ValueError:
return False
@pseudo_func
def select_first_of_type(cache, elem):
try:
return cache.sibling_count(elem, same_type=True) == 0
except ValueError:
return False
@pseudo_func
def select_last_of_type(cache, elem):
try:
return cache.sibling_count(elem, before=False, same_type=True) == 0
except ValueError:
return False
@pseudo_func
def select_only_of_type(cache, elem):
try:
return cache.all_sibling_count(elem, same_type=True) == 0
except ValueError:
return False
@pseudo_func
def select_empty(cache, elem):
return cache.is_empty(elem)
# }}}
default_dispatch_map = {name.partition('_')[2]:obj for name, obj in globals().items() if name.startswith('select_') and callable(obj)}
if __name__ == '__main__':
from pprint import pprint
root = etree.fromstring(
'<body xmlns="xxx" xml:lang="en"><p id="p" class="one two" lang="fr"><a id="a"/><b/><c/><d/></p></body>',
parser=etree.XMLParser(recover=True, no_network=True, resolve_entities=False))
select = Select(root, ignore_inappropriate_pseudo_classes=True, trace=True)
pprint(list(select('p:disabled')))
| 22,345 | Python | .py | 553 | 32.381555 | 144 | 0.632488 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,822 | ordered_set.py | kovidgoyal_calibre/src/css_selectors/ordered_set.py | #!/usr/bin/env python
# vim:fileencoding=utf-8
__license__ = 'GPL v3'
__copyright__ = '2015, Kovid Goyal <kovid at kovidgoyal.net>'
import collections.abc
from polyglot.builtins import string_or_bytes
SLICE_ALL = slice(None)
def is_iterable(obj):
"""
Are we being asked to look up a list of things, instead of a single thing?
We check for the `__iter__` attribute so that this can cover types that
don't have to be known by this module, such as NumPy arrays.
Strings, however, should be considered as atomic values to look up, not
iterables.
"""
return hasattr(obj, '__iter__') and not isinstance(obj, string_or_bytes)
class OrderedSet(collections.abc.MutableSet):
"""
An OrderedSet is a custom MutableSet that remembers its order, so that
every entry has an index that can be looked up.
"""
def __init__(self, iterable=None):
self.items = []
self.map = {}
if iterable is not None:
for item in iterable:
idx = self.map.get(item)
if idx is None:
self.map[item] = len(self.items)
self.items.append(item)
def __len__(self):
return len(self.items)
def __getitem__(self, index):
"""
Get the item at a given index.
If `index` is a slice, you will get back that slice of items. If it's
the slice [:], exactly the same object is returned. (If you want an
independent copy of an OrderedSet, use `OrderedSet.copy()`.)
If `index` is an iterable, you'll get the OrderedSet of items
corresponding to those indices. This is similar to NumPy's
"fancy indexing".
"""
if index == SLICE_ALL:
return self
elif hasattr(index, '__index__') or isinstance(index, slice):
result = self.items[index]
if isinstance(result, list):
return OrderedSet(result)
else:
return result
elif is_iterable(index):
return OrderedSet([self.items[i] for i in index])
else:
raise TypeError("Don't know how to index an OrderedSet by %r" %
index)
def copy(self):
return OrderedSet(self)
def __getstate__(self):
return tuple(self)
def __setstate__(self, state):
self.__init__(state)
def __contains__(self, key):
return key in self.map
def add(self, key):
"""
Add `key` as an item to this OrderedSet, then return its index.
If `key` is already in the OrderedSet, return the index it already
had.
"""
index = self.map.get(key)
if index is None:
self.map[key] = index = len(self.items)
self.items.append(key)
return index
def index(self, key):
"""
Get the index of a given entry, raising an IndexError if it's not
present.
`key` can be an iterable of entries that is not a string, in which case
this returns a list of indices.
"""
if is_iterable(key):
return [self.index(subkey) for subkey in key]
return self.map[key]
def discard(self, key):
index = self.map.get(key)
if index is not None:
self.items.pop(index)
for item in self.items[index:]:
self.map[item] -= 1
return True
return False
def __iter__(self):
return iter(self.items)
def __reversed__(self):
return reversed(self.items)
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and self.items == other.items
try:
return type(other)(self.map) == other
except TypeError:
return False
| 4,014 | Python | .py | 107 | 28.560748 | 79 | 0.584794 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,823 | __init__.py | kovidgoyal_calibre/src/css_selectors/__init__.py | #!/usr/bin/env python
# vim:fileencoding=utf-8
__license__ = 'GPL v3'
__copyright__ = '2015, Kovid Goyal <kovid at kovidgoyal.net>'
from css_selectors.errors import ExpressionError, SelectorError, SelectorSyntaxError
from css_selectors.parser import parse
from css_selectors.select import INAPPROPRIATE_PSEUDO_CLASSES, Select
__all__ = ['parse', 'Select', 'INAPPROPRIATE_PSEUDO_CLASSES', 'SelectorError', 'SelectorSyntaxError', 'ExpressionError']
| 451 | Python | .py | 8 | 54.875 | 120 | 0.783599 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,824 | tests.py | kovidgoyal_calibre/src/css_selectors/tests.py | #!/usr/bin/env python
# vim:fileencoding=utf-8
__license__ = 'GPL v3'
__copyright__ = '2015, Kovid Goyal <kovid at kovidgoyal.net>'
import argparse
import sys
import unittest
from lxml import etree, html
from css_selectors.errors import ExpressionError, SelectorSyntaxError
from css_selectors.parser import parse, tokenize
from css_selectors.select import Select
class TestCSSSelectors(unittest.TestCase):
# Test data {{{
HTML_IDS = '''
<html id="html"><head>
<link id="link-href" href="foo" />
<link id="link-nohref" />
</head><body>
<div id="outer-div">
<a id="name-anchor" name="foo"></a>
<a id="tag-anchor" rel="tag" href="http://localhost/foo">link</a>
<a id="nofollow-anchor" rel="nofollow" href="https://example.org">
link</a>
<ol id="first-ol" class="a b c">
<li id="first-li">content</li>
<li id="second-li" lang="En-us">
<div id="li-div">
</div>
</li>
<li id="third-li" class="ab c"></li>
<li id="fourth-li" class="ab
c"></li>
<li id="fifth-li"></li>
<li id="sixth-li"></li>
<li id="seventh-li"> </li>
</ol>
<p id="paragraph">
<b id="p-b">hi</b> <em id="p-em">there</em>
<b id="p-b2">guy</b>
<input type="checkbox" id="checkbox-unchecked" />
<input type="checkbox" id="checkbox-disabled" disabled="" />
<input type="text" id="text-checked" checked="checked" />
<input type="hidden" />
<input type="hidden" disabled="disabled" />
<input type="checkbox" id="checkbox-checked" checked="checked" />
<input type="checkbox" id="checkbox-disabled-checked"
disabled="disabled" checked="checked" />
<fieldset id="fieldset" disabled="disabled">
<input type="checkbox" id="checkbox-fieldset-disabled" />
<input type="hidden" />
</fieldset>
</p>
<ol id="second-ol">
</ol>
<map name="dummymap">
<area shape="circle" coords="200,250,25" href="foo.html" id="area-href" />
<area shape="default" id="area-nohref" />
</map>
</div>
<div id="foobar-div" foobar="ab bc
cde"><span id="foobar-span"></span></div>
</body></html>
'''
HTML_SHAKESPEARE = '''
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en" debug="true">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8"/>
</head>
<body>
<div id="test">
<div class="dialog">
<h2>As You Like It</h2>
<div id="playwright">
by William Shakespeare
</div>
<div class="dialog scene thirdClass" id="scene1">
<h3>ACT I, SCENE III. A room in the palace.</h3>
<div class="dialog">
<div class="direction">Enter CELIA and ROSALIND</div>
</div>
<div id="speech1" class="character">CELIA</div>
<div class="dialog">
<div id="scene1.3.1">Why, cousin! why, Rosalind! Cupid have mercy! not a word?</div>
</div>
<div id="speech2" class="character">ROSALIND</div>
<div class="dialog">
<div id="scene1.3.2">Not one to throw at a dog.</div>
</div>
<div id="speech3" class="character">CELIA</div>
<div class="dialog">
<div id="scene1.3.3">No, thy words are too precious to be cast away upon</div>
<div id="scene1.3.4">curs; throw some of them at me; come, lame me with reasons.</div>
</div>
<div id="speech4" class="character">ROSALIND</div>
<div id="speech5" class="character">CELIA</div>
<div class="dialog">
<div id="scene1.3.8">But is all this for your father?</div>
</div>
<div class="dialog">
<div id="scene1.3.5">Then there were two cousins laid up; when the one</div>
<div id="scene1.3.6">should be lamed with reasons and the other mad</div>
<div id="scene1.3.7">without any.</div>
</div>
<div id="speech6" class="character">ROSALIND</div>
<div class="dialog">
<div id="scene1.3.9">No, some of it is for my child's father. O, how</div>
<div id="scene1.3.10">full of briers is this working-day world!</div>
</div>
<div id="speech7" class="character">CELIA</div>
<div class="dialog">
<div id="scene1.3.11">They are but burs, cousin, thrown upon thee in</div>
<div id="scene1.3.12">holiday foolery: if we walk not in the trodden</div>
<div id="scene1.3.13">paths our very petticoats will catch them.</div>
</div>
<div id="speech8" class="character">ROSALIND</div>
<div class="dialog">
<div id="scene1.3.14">I could shake them off my coat: these burs are in my heart.</div>
</div>
<div id="speech9" class="character">CELIA</div>
<div class="dialog">
<div id="scene1.3.15">Hem them away.</div>
</div>
<div id="speech10" class="character">ROSALIND</div>
<div class="dialog">
<div id="scene1.3.16">I would try, if I could cry 'hem' and have him.</div>
</div>
<div id="speech11" class="character">CELIA</div>
<div class="dialog">
<div id="scene1.3.17">Come, come, wrestle with thy affections.</div>
</div>
<div id="speech12" class="character">ROSALIND</div>
<div class="dialog">
<div id="scene1.3.18">O, they take the part of a better wrestler than myself!</div>
</div>
<div id="speech13" class="character">CELIA</div>
<div class="dialog">
<div id="scene1.3.19">O, a good wish upon you! you will try in time, in</div>
<div id="scene1.3.20">despite of a fall. But, turning these jests out of</div>
<div id="scene1.3.21">service, let us talk in good earnest: is it</div>
<div id="scene1.3.22">possible, on such a sudden, you should fall into so</div>
<div id="scene1.3.23">strong a liking with old Sir Rowland's youngest son?</div>
</div>
<div id="speech14" class="character">ROSALIND</div>
<div class="dialog">
<div id="scene1.3.24">The duke my father loved his father dearly.</div>
</div>
<div id="speech15" class="character">CELIA</div>
<div class="dialog">
<div id="scene1.3.25">Doth it therefore ensue that you should love his son</div>
<div id="scene1.3.26">dearly? By this kind of chase, I should hate him,</div>
<div id="scene1.3.27">for my father hated his father dearly; yet I hate</div>
<div id="scene1.3.28">not Orlando.</div>
</div>
<div id="speech16" class="character">ROSALIND</div>
<div title="wtf" class="dialog">
<div id="scene1.3.29">No, faith, hate him not, for my sake.</div>
</div>
<div id="speech17" class="character">CELIA</div>
<div class="dialog">
<div id="scene1.3.30">Why should I not? doth he not deserve well?</div>
</div>
<div id="speech18" class="character">ROSALIND</div>
<div class="dialog">
<div id="scene1.3.31">Let me love him for that, and do you love him</div>
<div id="scene1.3.32">because I do. Look, here comes the duke.</div>
</div>
<div id="speech19" class="character">CELIA</div>
<div class="dialog">
<div id="scene1.3.33">With his eyes full of anger.</div>
<div class="direction">Enter DUKE FREDERICK, with Lords</div>
</div>
<div id="speech20" class="character">DUKE FREDERICK</div>
<div class="dialog">
<div id="scene1.3.34">Mistress, dispatch you with your safest haste</div>
<div id="scene1.3.35">And get you from our court.</div>
</div>
<div id="speech21" class="character">ROSALIND</div>
<div class="dialog">
<div id="scene1.3.36">Me, uncle?</div>
</div>
<div id="speech22" class="character">DUKE FREDERICK</div>
<div class="dialog">
<div id="scene1.3.37">You, cousin</div>
<div id="scene1.3.38">Within these ten days if that thou be'st found</div>
<div id="scene1.3.39">So near our public court as twenty miles,</div>
<div id="scene1.3.40">Thou diest for it.</div>
</div>
<div id="speech23" class="character">ROSALIND</div>
<div class="dialog">
<div id="scene1.3.41"> I do beseech your grace,</div>
<div id="scene1.3.42">Let me the knowledge of my fault bear with me:</div>
<div id="scene1.3.43">If with myself I hold intelligence</div>
<div id="scene1.3.44">Or have acquaintance with mine own desires,</div>
<div id="scene1.3.45">If that I do not dream or be not frantic,--</div>
<div id="scene1.3.46">As I do trust I am not--then, dear uncle,</div>
<div id="scene1.3.47">Never so much as in a thought unborn</div>
<div id="scene1.3.48">Did I offend your highness.</div>
</div>
<div id="speech24" class="character">DUKE FREDERICK</div>
<div class="dialog">
<div id="scene1.3.49">Thus do all traitors:</div>
<div id="scene1.3.50">If their purgation did consist in words,</div>
<div id="scene1.3.51">They are as innocent as grace itself:</div>
<div id="scene1.3.52">Let it suffice thee that I trust thee not.</div>
</div>
<div id="speech25" class="character">ROSALIND</div>
<div class="dialog">
<div id="scene1.3.53">Yet your mistrust cannot make me a traitor:</div>
<div id="scene1.3.54">Tell me whereon the likelihood depends.</div>
</div>
<div id="speech26" class="character">DUKE FREDERICK</div>
<div class="dialog">
<div id="scene1.3.55">Thou art thy father's daughter; there's enough.</div>
</div>
<div id="speech27" class="character">ROSALIND</div>
<div class="dialog">
<div id="scene1.3.56">So was I when your highness took his dukedom;</div>
<div id="scene1.3.57">So was I when your highness banish'd him:</div>
<div id="scene1.3.58">Treason is not inherited, my lord;</div>
<div id="scene1.3.59">Or, if we did derive it from our friends,</div>
<div id="scene1.3.60">What's that to me? my father was no traitor:</div>
<div id="scene1.3.61">Then, good my liege, mistake me not so much</div>
<div id="scene1.3.62">To think my poverty is treacherous.</div>
</div>
<div id="speech28" class="character">CELIA</div>
<div class="dialog">
<div id="scene1.3.63">Dear sovereign, hear me speak.</div>
</div>
<div id="speech29" class="character">DUKE FREDERICK</div>
<div class="dialog">
<div id="scene1.3.64">Ay, Celia; we stay'd her for your sake,</div>
<div id="scene1.3.65">Else had she with her father ranged along.</div>
</div>
<div id="speech30" class="character">CELIA</div>
<div class="dialog">
<div id="scene1.3.66">I did not then entreat to have her stay;</div>
<div id="scene1.3.67">It was your pleasure and your own remorse:</div>
<div id="scene1.3.68">I was too young that time to value her;</div>
<div id="scene1.3.69">But now I know her: if she be a traitor,</div>
<div id="scene1.3.70">Why so am I; we still have slept together,</div>
<div id="scene1.3.71">Rose at an instant, learn'd, play'd, eat together,</div>
<div id="scene1.3.72">And wheresoever we went, like Juno's swans,</div>
<div id="scene1.3.73">Still we went coupled and inseparable.</div>
</div>
<div id="speech31" class="character">DUKE FREDERICK</div>
<div class="dialog">
<div id="scene1.3.74">She is too subtle for thee; and her smoothness,</div>
<div id="scene1.3.75">Her very silence and her patience</div>
<div id="scene1.3.76">Speak to the people, and they pity her.</div>
<div id="scene1.3.77">Thou art a fool: she robs thee of thy name;</div>
<div id="scene1.3.78">And thou wilt show more bright and seem more virtuous</div>
<div id="scene1.3.79">When she is gone. Then open not thy lips:</div>
<div id="scene1.3.80">Firm and irrevocable is my doom</div>
<div id="scene1.3.81">Which I have pass'd upon her; she is banish'd.</div>
</div>
<div id="speech32" class="character">CELIA</div>
<div class="dialog">
<div id="scene1.3.82">Pronounce that sentence then on me, my liege:</div>
<div id="scene1.3.83">I cannot live out of her company.</div>
</div>
<div id="speech33" class="character">DUKE FREDERICK</div>
<div class="dialog">
<div id="scene1.3.84">You are a fool. You, niece, provide yourself:</div>
<div id="scene1.3.85">If you outstay the time, upon mine honour,</div>
<div id="scene1.3.86">And in the greatness of my word, you die.</div>
<div class="direction">Exeunt DUKE FREDERICK and Lords</div>
</div>
<div id="speech34" class="character">CELIA</div>
<div class="dialog">
<div id="scene1.3.87">O my poor Rosalind, whither wilt thou go?</div>
<div id="scene1.3.88">Wilt thou change fathers? I will give thee mine.</div>
<div id="scene1.3.89">I charge thee, be not thou more grieved than I am.</div>
</div>
<div id="speech35" class="character">ROSALIND</div>
<div class="dialog">
<div id="scene1.3.90">I have more cause.</div>
</div>
<div id="speech36" class="character">CELIA</div>
<div class="dialog">
<div id="scene1.3.91"> Thou hast not, cousin;</div>
<div id="scene1.3.92">Prithee be cheerful: know'st thou not, the duke</div>
<div id="scene1.3.93">Hath banish'd me, his daughter?</div>
</div>
<div id="speech37" class="character">ROSALIND</div>
<div class="dialog">
<div id="scene1.3.94">That he hath not.</div>
</div>
<div id="speech38" class="character">CELIA</div>
<div class="dialog">
<div id="scene1.3.95">No, hath not? Rosalind lacks then the love</div>
<div id="scene1.3.96">Which teacheth thee that thou and I am one:</div>
<div id="scene1.3.97">Shall we be sunder'd? shall we part, sweet girl?</div>
<div id="scene1.3.98">No: let my father seek another heir.</div>
<div id="scene1.3.99">Therefore devise with me how we may fly,</div>
<div id="scene1.3.100">Whither to go and what to bear with us;</div>
<div id="scene1.3.101">And do not seek to take your change upon you,</div>
<div id="scene1.3.102">To bear your griefs yourself and leave me out;</div>
<div id="scene1.3.103">For, by this heaven, now at our sorrows pale,</div>
<div id="scene1.3.104">Say what thou canst, I'll go along with thee.</div>
</div>
<div id="speech39" class="character">ROSALIND</div>
<div class="dialog">
<div id="scene1.3.105">Why, whither shall we go?</div>
</div>
<div id="speech40" class="character">CELIA</div>
<div class="dialog">
<div id="scene1.3.106">To seek my uncle in the forest of Arden.</div>
</div>
<div id="speech41" class="character">ROSALIND</div>
<div class="dialog">
<div id="scene1.3.107">Alas, what danger will it be to us,</div>
<div id="scene1.3.108">Maids as we are, to travel forth so far!</div>
<div id="scene1.3.109">Beauty provoketh thieves sooner than gold.</div>
</div>
<div id="speech42" class="character">CELIA</div>
<div class="dialog">
<div id="scene1.3.110">I'll put myself in poor and mean attire</div>
<div id="scene1.3.111">And with a kind of umber smirch my face;</div>
<div id="scene1.3.112">The like do you: so shall we pass along</div>
<div id="scene1.3.113">And never stir assailants.</div>
</div>
<div id="speech43" class="character">ROSALIND</div>
<div class="dialog">
<div id="scene1.3.114">Were it not better,</div>
<div id="scene1.3.115">Because that I am more than common tall,</div>
<div id="scene1.3.116">That I did suit me all points like a man?</div>
<div id="scene1.3.117">A gallant curtle-axe upon my thigh,</div>
<div id="scene1.3.118">A boar-spear in my hand; and--in my heart</div>
<div id="scene1.3.119">Lie there what hidden woman's fear there will--</div>
<div id="scene1.3.120">We'll have a swashing and a martial outside,</div>
<div id="scene1.3.121">As many other mannish cowards have</div>
<div id="scene1.3.122">That do outface it with their semblances.</div>
</div>
<div id="speech44" class="character">CELIA</div>
<div class="dialog">
<div id="scene1.3.123">What shall I call thee when thou art a man?</div>
</div>
<div id="speech45" class="character">ROSALIND</div>
<div class="dialog">
<div id="scene1.3.124">I'll have no worse a name than Jove's own page;</div>
<div id="scene1.3.125">And therefore look you call me Ganymede.</div>
<div id="scene1.3.126">But what will you be call'd?</div>
</div>
<div id="speech46" class="character">CELIA</div>
<div class="dialog">
<div id="scene1.3.127">Something that hath a reference to my state</div>
<div id="scene1.3.128">No longer Celia, but Aliena.</div>
</div>
<div id="speech47" class="character">ROSALIND</div>
<div class="dialog">
<div id="scene1.3.129">But, cousin, what if we assay'd to steal</div>
<div id="scene1.3.130">The clownish fool out of your father's court?</div>
<div id="scene1.3.131">Would he not be a comfort to our travel?</div>
</div>
<div id="speech48" class="character">CELIA</div>
<div class="dialog">
<div id="scene1.3.132">He'll go along o'er the wide world with me;</div>
<div id="scene1.3.133">Leave me alone to woo him. Let's away,</div>
<div id="scene1.3.134">And get our jewels and our wealth together,</div>
<div id="scene1.3.135">Devise the fittest time and safest way</div>
<div id="scene1.3.136">To hide us from pursuit that will be made</div>
<div id="scene1.3.137">After my flight. Now go we in content</div>
<div id="scene1.3.138">To liberty and not to banishment.</div>
<div class="direction">Exeunt</div>
</div>
</div>
</div>
</div>
</body>
</html>
'''
# }}}
ae = unittest.TestCase.assertEqual
def test_tokenizer(self): # {{{
tokens = [
type('')(item) for item in tokenize(
r'E\ é > f [a~="y\"x"]:nth(/* fu /]* */-3.7)')]
self.ae(tokens, [
"<IDENT 'E é' at 0>",
"<S ' ' at 4>",
"<DELIM '>' at 5>",
"<S ' ' at 6>",
# the no-break space is not whitespace in CSS
"<IDENT 'f ' at 7>", # f\xa0
"<DELIM '[' at 9>",
"<IDENT 'a' at 10>",
"<DELIM '~' at 11>",
"<DELIM '=' at 12>",
"<STRING 'y\"x' at 13>",
"<DELIM ']' at 19>",
"<DELIM ':' at 20>",
"<IDENT 'nth' at 21>",
"<DELIM '(' at 24>",
"<NUMBER '-3.7' at 37>",
"<DELIM ')' at 41>",
"<EOF at 42>",
])
# }}}
def test_parser(self): # {{{
def repr_parse(css):
selectors = parse(css)
for selector in selectors:
assert selector.pseudo_element is None
return [repr(selector.parsed_tree).replace("(u'", "('")
for selector in selectors]
def parse_many(first, *others):
result = repr_parse(first)
for other in others:
assert repr_parse(other) == result
return result
assert parse_many('*') == ['Element[*]']
assert parse_many('*|*') == ['Element[*]']
assert parse_many('*|foo') == ['Element[foo]']
assert parse_many('foo|*') == ['Element[foo|*]']
assert parse_many('foo|bar') == ['Element[foo|bar]']
# This will never match, but it is valid:
assert parse_many('#foo#bar') == ['Hash[Hash[Element[*]#foo]#bar]']
assert parse_many(
'div>.foo',
'div> .foo',
'div >.foo',
'div > .foo',
'div \n> \t \t .foo', 'div\r>\n\n\n.foo', 'div\f>\f.foo'
) == ['CombinedSelector[Element[div] > Class[Element[*].foo]]']
assert parse_many('td.foo,.bar',
'td.foo, .bar',
'td.foo\t\r\n\f ,\t\r\n\f .bar'
) == [
'Class[Element[td].foo]',
'Class[Element[*].bar]'
]
assert parse_many('div, td.foo, div.bar span') == [
'Element[div]',
'Class[Element[td].foo]',
'CombinedSelector[Class[Element[div].bar] '
'<followed> Element[span]]']
assert parse_many('div > p') == [
'CombinedSelector[Element[div] > Element[p]]']
assert parse_many('td:first') == [
'Pseudo[Element[td]:first]']
assert parse_many('td:first') == [
'Pseudo[Element[td]:first]']
assert parse_many('td :first') == [
'CombinedSelector[Element[td] '
'<followed> Pseudo[Element[*]:first]]']
assert parse_many('td :first') == [
'CombinedSelector[Element[td] '
'<followed> Pseudo[Element[*]:first]]']
assert parse_many('a[name]', 'a[ name\t]') == [
'Attrib[Element[a][name]]']
assert parse_many('a [name]') == [
'CombinedSelector[Element[a] <followed> Attrib[Element[*][name]]]']
self.ae(parse_many('a[rel="include"]', 'a[rel = include]'), [
"Attrib[Element[a][rel = 'include']]"])
assert parse_many("a[hreflang |= 'en']", "a[hreflang|=en]") == [
"Attrib[Element[a][hreflang |= 'en']]"]
self.ae(parse_many('div:nth-child(10)'), [
"Function[Element[div]:nth-child(['10'])]"])
assert parse_many(':nth-child(2n+2)') == [
"Function[Element[*]:nth-child(['2', 'n', '+2'])]"]
assert parse_many('div:nth-of-type(10)') == [
"Function[Element[div]:nth-of-type(['10'])]"]
assert parse_many('div div:nth-of-type(10) .aclass') == [
'CombinedSelector[CombinedSelector[Element[div] <followed> '
"Function[Element[div]:nth-of-type(['10'])]] "
'<followed> Class[Element[*].aclass]]']
assert parse_many('label:only') == [
'Pseudo[Element[label]:only]']
assert parse_many('a:lang(fr)') == [
"Function[Element[a]:lang(['fr'])]"]
assert parse_many('div:contains("foo")') == [
"Function[Element[div]:contains(['foo'])]"]
assert parse_many('div#foobar') == [
'Hash[Element[div]#foobar]']
assert parse_many('div:not(div.foo)') == [
'Negation[Element[div]:not(Class[Element[div].foo])]']
assert parse_many('td ~ th') == [
'CombinedSelector[Element[td] ~ Element[th]]']
# }}}
def test_pseudo_elements(self): # {{{
def parse_pseudo(css):
result = []
for selector in parse(css):
pseudo = selector.pseudo_element
pseudo = type('')(pseudo) if pseudo else pseudo
# No Symbol here
assert pseudo is None or isinstance(pseudo, type(''))
selector = repr(selector.parsed_tree).replace("(u'", "('")
result.append((selector, pseudo))
return result
def parse_one(css):
result = parse_pseudo(css)
assert len(result) == 1
return result[0]
self.ae(parse_one('foo'), ('Element[foo]', None))
self.ae(parse_one('*'), ('Element[*]', None))
self.ae(parse_one(':empty'), ('Pseudo[Element[*]:empty]', None))
# Special cases for CSS 2.1 pseudo-elements
self.ae(parse_one(':BEfore'), ('Element[*]', 'before'))
self.ae(parse_one(':aftER'), ('Element[*]', 'after'))
self.ae(parse_one(':First-Line'), ('Element[*]', 'first-line'))
self.ae(parse_one(':First-Letter'), ('Element[*]', 'first-letter'))
self.ae(parse_one('::befoRE'), ('Element[*]', 'before'))
self.ae(parse_one('::AFter'), ('Element[*]', 'after'))
self.ae(parse_one('::firsT-linE'), ('Element[*]', 'first-line'))
self.ae(parse_one('::firsT-letteR'), ('Element[*]', 'first-letter'))
self.ae(parse_one('::text-content'), ('Element[*]', 'text-content'))
self.ae(parse_one('::attr(name)'), (
"Element[*]", "FunctionalPseudoElement[::attr(['name'])]"))
self.ae(parse_one('::Selection'), ('Element[*]', 'selection'))
self.ae(parse_one('foo:after'), ('Element[foo]', 'after'))
self.ae(parse_one('foo::selection'), ('Element[foo]', 'selection'))
self.ae(parse_one('lorem#ipsum ~ a#b.c[href]:empty::selection'), (
'CombinedSelector[Hash[Element[lorem]#ipsum] ~ '
'Pseudo[Attrib[Class[Hash[Element[a]#b].c][href]]:empty]]',
'selection'))
parse_pseudo('foo:before, bar, baz:after') == [
('Element[foo]', 'before'),
('Element[bar]', None),
('Element[baz]', 'after')]
# }}}
def test_specificity(self): # {{{
def specificity(css):
selectors = parse(css)
assert len(selectors) == 1
return selectors[0].specificity()
assert specificity('*') == (0, 0, 0)
assert specificity(' foo') == (0, 0, 1)
assert specificity(':empty ') == (0, 1, 0)
assert specificity(':before') == (0, 0, 1)
assert specificity('*:before') == (0, 0, 1)
assert specificity(':nth-child(2)') == (0, 1, 0)
assert specificity('.bar') == (0, 1, 0)
assert specificity('[baz]') == (0, 1, 0)
assert specificity('[baz="4"]') == (0, 1, 0)
assert specificity('[baz^="4"]') == (0, 1, 0)
assert specificity('#lipsum') == (1, 0, 0)
assert specificity(':not(*)') == (0, 0, 0)
assert specificity(':not(foo)') == (0, 0, 1)
assert specificity(':not(.foo)') == (0, 1, 0)
assert specificity(':not([foo])') == (0, 1, 0)
assert specificity(':not(:empty)') == (0, 1, 0)
assert specificity(':not(#foo)') == (1, 0, 0)
assert specificity('foo:empty') == (0, 1, 1)
assert specificity('foo:before') == (0, 0, 2)
assert specificity('foo::before') == (0, 0, 2)
assert specificity('foo:empty::before') == (0, 1, 2)
assert specificity('#lorem + foo#ipsum:first-child > bar:first-line'
) == (2, 1, 3)
# }}}
def test_parse_errors(self): # {{{
def get_error(css):
try:
parse(css)
except SelectorSyntaxError:
# Py2, Py3, ...
return str(sys.exc_info()[1]).replace("(u'", "('")
self.ae(get_error('attributes(href)/html/body/a'), (
"Expected selector, got <DELIM '(' at 10>"))
assert get_error('attributes(href)') == (
"Expected selector, got <DELIM '(' at 10>")
assert get_error('html/body/a') == (
"Expected selector, got <DELIM '/' at 4>")
assert get_error(' ') == (
"Expected selector, got <EOF at 1>")
assert get_error('div, ') == (
"Expected selector, got <EOF at 5>")
assert get_error(' , div') == (
"Expected selector, got <DELIM ',' at 1>")
assert get_error('p, , div') == (
"Expected selector, got <DELIM ',' at 3>")
assert get_error('div > ') == (
"Expected selector, got <EOF at 6>")
assert get_error(' > div') == (
"Expected selector, got <DELIM '>' at 2>")
assert get_error('foo|#bar') == (
"Expected ident or '*', got <HASH 'bar' at 4>")
assert get_error('#.foo') == (
"Expected selector, got <DELIM '#' at 0>")
assert get_error('.#foo') == (
"Expected ident, got <HASH 'foo' at 1>")
assert get_error(':#foo') == (
"Expected ident, got <HASH 'foo' at 1>")
assert get_error('[*]') == (
"Expected '|', got <DELIM ']' at 2>")
assert get_error('[foo|]') == (
"Expected ident, got <DELIM ']' at 5>")
assert get_error('[#]') == (
"Expected ident or '*', got <DELIM '#' at 1>")
assert get_error('[foo=#]') == (
"Expected string or ident, got <DELIM '#' at 5>")
assert get_error('[href]a') == (
"Expected selector, got <IDENT 'a' at 6>")
assert get_error('[rel=stylesheet]') is None
assert get_error('[rel:stylesheet]') == (
"Operator expected, got <DELIM ':' at 4>")
assert get_error('[rel=stylesheet') == (
"Expected ']', got <EOF at 15>")
assert get_error(':lang(fr)') is None
assert get_error(':lang(fr') == (
"Expected an argument, got <EOF at 8>")
assert get_error(':contains("foo') == (
"Unclosed string at 10")
assert get_error('foo!') == (
"Expected selector, got <DELIM '!' at 3>")
# Mis-placed pseudo-elements
assert get_error('a:before:empty') == (
"Got pseudo-element ::before not at the end of a selector")
assert get_error('li:before a') == (
"Got pseudo-element ::before not at the end of a selector")
assert get_error(':not(:before)') == (
"Got pseudo-element ::before inside :not() at 12")
assert get_error(':not(:not(a))') == (
"Got nested :not()")
# }}}
def test_select(self): # {{{
document = etree.fromstring(self.HTML_IDS, parser=etree.XMLParser(recover=True, no_network=True, resolve_entities=False))
select = Select(document)
def select_ids(selector):
for elem in select(selector):
yield elem.get('id')
def pcss(main, *selectors, **kwargs):
result = list(select_ids(main))
for selector in selectors:
self.ae(list(select_ids(selector)), result)
return result
all_ids = pcss('*')
self.ae(all_ids[:6], [
'html', None, 'link-href', 'link-nohref', None, 'outer-div'])
self.ae(all_ids[-1:], ['foobar-span'])
self.ae(pcss('div'), ['outer-div', 'li-div', 'foobar-div'])
self.ae(pcss('DIV'), [
'outer-div', 'li-div', 'foobar-div']) # case-insensitive in HTML
self.ae(pcss('div div'), ['li-div'])
self.ae(pcss('div, div div'), ['outer-div', 'li-div', 'foobar-div'])
self.ae(pcss('a[name]'), ['name-anchor'])
self.ae(pcss('a[NAme]'), ['name-anchor']) # case-insensitive in HTML:
self.ae(pcss('a[rel]'), ['tag-anchor', 'nofollow-anchor'])
self.ae(pcss('a[rel="tag"]'), ['tag-anchor'])
self.ae(pcss('a[href*="localhost"]'), ['tag-anchor'])
self.ae(pcss('a[href*=""]'), [])
self.ae(pcss('a[href^="http"]'), ['tag-anchor', 'nofollow-anchor'])
self.ae(pcss('a[href^="http:"]'), ['tag-anchor'])
self.ae(pcss('a[href^=""]'), [])
self.ae(pcss('a[href$="org"]'), ['nofollow-anchor'])
self.ae(pcss('a[href$=""]'), [])
self.ae(pcss('div[foobar~="bc"]', 'div[foobar~="cde"]', skip_webkit=True), ['foobar-div'])
self.ae(pcss('[foobar~="ab bc"]', '[foobar~=""]', '[foobar~=" \t"]'), [])
self.ae(pcss('div[foobar~="cd"]'), [])
self.ae(pcss('*[lang|="En"]', '[lang|="En-us"]'), ['second-li'])
# Attribute values are case sensitive
self.ae(pcss('*[lang|="en"]', '[lang|="en-US"]', skip_webkit=True), [])
self.ae(pcss('*[lang|="e"]'), [])
self.ae(pcss(':lang("EN")', '*:lang(en-US)', skip_webkit=True), ['second-li', 'li-div'])
self.ae(pcss(':lang("e")'), [])
self.ae(pcss('li:nth-child(1)', 'li:first-child'), ['first-li'])
self.ae(pcss('li:nth-child(3)', '#first-li ~ :nth-child(3)'), ['third-li'])
self.ae(pcss('li:nth-child(10)'), [])
self.ae(pcss('li:nth-child(2n)', 'li:nth-child(even)', 'li:nth-child(2n+0)'), ['second-li', 'fourth-li', 'sixth-li'])
self.ae(pcss('li:nth-child(+2n+1)', 'li:nth-child(odd)'), ['first-li', 'third-li', 'fifth-li', 'seventh-li'])
self.ae(pcss('li:nth-child(2n+4)'), ['fourth-li', 'sixth-li'])
self.ae(pcss('li:nth-child(3n+1)'), ['first-li', 'fourth-li', 'seventh-li'])
self.ae(pcss('li:nth-last-child(0)'), [])
self.ae(pcss('li:nth-last-child(1)', 'li:last-child'), ['seventh-li'])
self.ae(pcss('li:nth-last-child(2n)', 'li:nth-last-child(even)'), ['second-li', 'fourth-li', 'sixth-li'])
self.ae(pcss('li:nth-last-child(2n+2)'), ['second-li', 'fourth-li', 'sixth-li'])
self.ae(pcss('ol:first-of-type'), ['first-ol'])
self.ae(pcss('ol:nth-child(1)'), [])
self.ae(pcss('ol:nth-of-type(2)'), ['second-ol'])
self.ae(pcss('ol:nth-last-of-type(1)'), ['second-ol'])
self.ae(pcss('span:only-child'), ['foobar-span'])
self.ae(pcss('li div:only-child'), ['li-div'])
self.ae(pcss('div *:only-child'), ['li-div', 'foobar-span'])
self.ae(pcss('p *:only-of-type', skip_webkit=True), ['p-em', 'fieldset'])
self.ae(pcss('p:only-of-type', skip_webkit=True), ['paragraph'])
self.ae(pcss('a:empty', 'a:EMpty'), ['name-anchor'])
self.ae(pcss('li:empty'), ['third-li', 'fourth-li', 'fifth-li', 'sixth-li'])
self.ae(pcss(':root', 'html:root', 'li:root'), ['html'])
self.ae(pcss('* :root', 'p *:root'), [])
self.ae(pcss('.a', '.b', '*.a', 'ol.a'), ['first-ol'])
self.ae(pcss('.c', '*.c'), ['first-ol', 'third-li', 'fourth-li'])
self.ae(pcss('ol *.c', 'ol li.c', 'li ~ li.c', 'ol > li.c'), [
'third-li', 'fourth-li'])
self.ae(pcss('#first-li', 'li#first-li', '*#first-li'), ['first-li'])
self.ae(pcss('li div', 'li > div', 'div div'), ['li-div'])
self.ae(pcss('div > div'), [])
self.ae(pcss('div>.c', 'div > .c'), ['first-ol'])
self.ae(pcss('div + div'), ['foobar-div'])
self.ae(pcss('a ~ a'), ['tag-anchor', 'nofollow-anchor'])
self.ae(pcss('a[rel="tag"] ~ a'), ['nofollow-anchor'])
self.ae(pcss('ol#first-ol li:last-child'), ['seventh-li'])
self.ae(pcss('ol#first-ol *:last-child'), ['li-div', 'seventh-li'])
self.ae(pcss('#outer-div:first-child'), ['outer-div'])
self.ae(pcss('#outer-div :first-child'), [
'name-anchor', 'first-li', 'li-div', 'p-b',
'checkbox-fieldset-disabled', 'area-href'])
self.ae(pcss('a[href]'), ['tag-anchor', 'nofollow-anchor'])
self.ae(pcss(':not(*)'), [])
self.ae(pcss('a:not([href])'), ['name-anchor'])
self.ae(pcss('ol :Not(li[class])', skip_webkit=True), [
'first-li', 'second-li', 'li-div',
'fifth-li', 'sixth-li', 'seventh-li'])
self.ae(pcss(r'di\a0 v', r'div\['), [])
self.ae(pcss(r'[h\a0 ref]', r'[h\]ref]'), [])
self.assertRaises(ExpressionError, lambda : tuple(select('body:nth-child')))
select = Select(document, ignore_inappropriate_pseudo_classes=True)
self.assertGreater(len(tuple(select('p:hover'))), 0)
def test_select_shakespeare(self):
document = html.document_fromstring(self.HTML_SHAKESPEARE)
select = Select(document)
def count(s):
return sum(1 for r in select(s))
# Data borrowed from http://mootools.net/slickspeed/
# Changed from original; probably because I'm only
self.ae(count('*'), 249)
assert count('div:only-child') == 22 # ?
assert count('div:nth-child(even)') == 106
assert count('div:nth-child(2n)') == 106
assert count('div:nth-child(odd)') == 137
assert count('div:nth-child(2n+1)') == 137
assert count('div:nth-child(n)') == 243
assert count('div:last-child') == 53
assert count('div:first-child') == 51
assert count('div > div') == 242
assert count('div + div') == 190
assert count('div ~ div') == 190
assert count('body') == 1
assert count('body div') == 243
assert count('div') == 243
assert count('div div') == 242
assert count('div div div') == 241
assert count('div, div, div') == 243
assert count('div, a, span') == 243
assert count('.dialog') == 51
assert count('div.dialog') == 51
assert count('div .dialog') == 51
assert count('div.character, div.dialog') == 99
assert count('div.direction.dialog') == 0
assert count('div.dialog.direction') == 0
assert count('div.dialog.scene') == 1
assert count('div.scene.scene') == 1
assert count('div.scene .scene') == 0
assert count('div.direction .dialog ') == 0
assert count('div .dialog .direction') == 4
assert count('div.dialog .dialog .direction') == 4
assert count('#speech5') == 1
assert count('div#speech5') == 1
assert count('div #speech5') == 1
assert count('div.scene div.dialog') == 49
assert count('div#scene1 div.dialog div') == 142
assert count('#scene1 #speech1') == 1
assert count('div[class]') == 103
assert count('div[class=dialog]') == 50
assert count('div[class^=dia]') == 51
assert count('div[class$=log]') == 50
assert count('div[class*=sce]') == 1
assert count('div[class|=dialog]') == 50 # ? Seems right
assert count('div[class~=dialog]') == 51 # ? Seems right
# }}}
# Run tests {{{
def find_tests():
return unittest.defaultTestLoader.loadTestsFromTestCase(TestCSSSelectors)
def run_tests(find_tests=find_tests, for_build=False):
if not for_build:
parser = argparse.ArgumentParser()
parser.add_argument('name', nargs='?', default=None,
help='The name of the test to run')
args = parser.parse_args()
if not for_build and args.name and args.name.startswith('.'):
tests = find_tests()
q = args.name[1:]
if not q.startswith('test_'):
q = 'test_' + q
ans = None
try:
for test in tests:
if test._testMethodName == q:
ans = test
raise StopIteration()
except StopIteration:
pass
if ans is None:
print('No test named %s found' % args.name)
raise SystemExit(1)
tests = ans
else:
tests = unittest.defaultTestLoader.loadTestsFromName(args.name) if not for_build and args.name else find_tests()
r = unittest.TextTestRunner
if for_build:
r = r(verbosity=0, buffer=True, failfast=True)
else:
r = r(verbosity=4)
result = r.run(tests)
if for_build and result.errors or result.failures:
raise SystemExit(1)
if __name__ == '__main__':
run_tests()
# }}}
| 37,012 | Python | .py | 800 | 40.1175 | 129 | 0.599027 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,825 | parser.py | kovidgoyal_calibre/src/css_selectors/parser.py | #!/usr/bin/env python
# vim:fileencoding=utf-8
"""
Tokenizer, parser and parsed objects for CSS selectors.
:copyright: (c) 2007-2012 Ian Bicking and contributors.
See AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import operator
import re
import string
import sys
from css_selectors.errors import ExpressionError, SelectorSyntaxError
from polyglot.builtins import codepoint_to_chr, unicode_type
utab = {c:c+32 for c in range(ord(u'A'), ord(u'Z')+1)}
if sys.version_info.major < 3:
tab = string.maketrans(string.ascii_uppercase, string.ascii_lowercase)
def ascii_lower(string):
"""Lower-case, but only in the ASCII range."""
return string.translate(utab if isinstance(string, unicode_type) else tab)
def urepr(x):
if isinstance(x, list):
return '[%s]' % ', '.join((map(urepr, x)))
ans = repr(x)
if ans.startswith("u'") or ans.startswith('u"'):
ans = ans[1:]
return ans
else:
def ascii_lower(x):
return x.translate(utab)
urepr = repr
# Parsed objects
class Selector:
"""
Represents a parsed selector.
"""
def __init__(self, tree, pseudo_element=None):
self.parsed_tree = tree
if pseudo_element is not None and not isinstance(
pseudo_element, FunctionalPseudoElement):
pseudo_element = ascii_lower(pseudo_element)
#: A :class:`FunctionalPseudoElement`,
#: or the identifier for the pseudo-element as a string,
# or ``None``.
#:
#: +-------------------------+----------------+--------------------------------+
#: | | Selector | Pseudo-element |
#: +=========================+================+================================+
#: | CSS3 syntax | ``a::before`` | ``'before'`` |
#: +-------------------------+----------------+--------------------------------+
#: | Older syntax | ``a:before`` | ``'before'`` |
#: +-------------------------+----------------+--------------------------------+
#: | From the Lists3_ draft, | ``li::marker`` | ``'marker'`` |
#: | not in Selectors3 | | |
#: +-------------------------+----------------+--------------------------------+
#: | Invalid pseudo-class | ``li:marker`` | ``None`` |
#: +-------------------------+----------------+--------------------------------+
#: | Functional | ``a::foo(2)`` | ``FunctionalPseudoElement(…)`` |
#: +-------------------------+----------------+--------------------------------+
#:
# : .. _Lists3: http://www.w3.org/TR/2011/WD-css3-lists-20110524/#marker-pseudoelement
self.pseudo_element = pseudo_element
def __repr__(self):
if isinstance(self.pseudo_element, FunctionalPseudoElement):
pseudo_element = repr(self.pseudo_element)
if self.pseudo_element:
pseudo_element = '::%s' % self.pseudo_element
else:
pseudo_element = ''
return '%s[%r%s]' % (
self.__class__.__name__, self.parsed_tree, pseudo_element)
def specificity(self):
"""Return the specificity_ of this selector as a tuple of 3 integers.
.. _specificity: http://www.w3.org/TR/selectors/#specificity
"""
a, b, c = self.parsed_tree.specificity()
if self.pseudo_element:
c += 1
return a, b, c
class Class:
"""
Represents selector.class_name
"""
def __init__(self, selector, class_name):
self.selector = selector
self.class_name = class_name
def __repr__(self):
return '%s[%r.%s]' % (
self.__class__.__name__, self.selector, self.class_name)
def specificity(self):
a, b, c = self.selector.specificity()
b += 1
return a, b, c
class FunctionalPseudoElement:
"""
Represents selector::name(arguments)
.. attribute:: name
The name (identifier) of the pseudo-element, as a string.
.. attribute:: arguments
The arguments of the pseudo-element, as a list of tokens.
**Note:** tokens are not part of the public API,
and may change between versions.
Use at your own risks.
"""
def __init__(self, name, arguments):
self.name = ascii_lower(name)
self.arguments = arguments
def __repr__(self):
return '%s[::%s(%s)]' % (
self.__class__.__name__, self.name,
urepr([token.value for token in self.arguments]))
def argument_types(self):
return [token.type for token in self.arguments]
def specificity(self):
a, b, c = self.selector.specificity()
b += 1
return a, b, c
class Function:
"""
Represents selector:name(expr)
"""
def __init__(self, selector, name, arguments):
self.selector = selector
self.name = ascii_lower(name)
self.arguments = arguments
self._parsed_arguments = None
def __repr__(self):
return '%s[%r:%s(%s)]' % (
self.__class__.__name__, self.selector, self.name,
urepr([token.value for token in self.arguments]))
def argument_types(self):
return [token.type for token in self.arguments]
@property
def parsed_arguments(self):
if self._parsed_arguments is None:
try:
self._parsed_arguments = parse_series(self.arguments)
except ValueError:
raise ExpressionError("Invalid series: '%r'" % self.arguments)
return self._parsed_arguments
def parse_arguments(self):
if not self.arguments_parsed:
self.arguments_parsed = True
def specificity(self):
a, b, c = self.selector.specificity()
b += 1
return a, b, c
class Pseudo:
"""
Represents selector:ident
"""
def __init__(self, selector, ident):
self.selector = selector
self.ident = ascii_lower(ident)
def __repr__(self):
return '%s[%r:%s]' % (
self.__class__.__name__, self.selector, self.ident)
def specificity(self):
a, b, c = self.selector.specificity()
b += 1
return a, b, c
class Negation:
"""
Represents selector:not(subselector)
"""
def __init__(self, selector, subselector):
self.selector = selector
self.subselector = subselector
def __repr__(self):
return '%s[%r:not(%r)]' % (
self.__class__.__name__, self.selector, self.subselector)
def specificity(self):
a1, b1, c1 = self.selector.specificity()
a2, b2, c2 = self.subselector.specificity()
return a1 + a2, b1 + b2, c1 + c2
class Attrib:
"""
Represents selector[namespace|attrib operator value]
"""
def __init__(self, selector, namespace, attrib, operator, value):
self.selector = selector
self.namespace = namespace
self.attrib = attrib
self.operator = operator
self.value = value
def __repr__(self):
if self.namespace:
attrib = '%s|%s' % (self.namespace, self.attrib)
else:
attrib = self.attrib
if self.operator == 'exists':
return '%s[%r[%s]]' % (
self.__class__.__name__, self.selector, attrib)
else:
return '%s[%r[%s %s %s]]' % (
self.__class__.__name__, self.selector, attrib,
self.operator, urepr(self.value))
def specificity(self):
a, b, c = self.selector.specificity()
b += 1
return a, b, c
class Element:
"""
Represents namespace|element
`None` is for the universal selector '*'
"""
def __init__(self, namespace=None, element=None):
self.namespace = namespace
self.element = element
def __repr__(self):
element = self.element or '*'
if self.namespace:
element = '%s|%s' % (self.namespace, element)
return '%s[%s]' % (self.__class__.__name__, element)
def specificity(self):
if self.element:
return 0, 0, 1
else:
return 0, 0, 0
class Hash:
"""
Represents selector#id
"""
def __init__(self, selector, id):
self.selector = selector
self.id = id
def __repr__(self):
return '%s[%r#%s]' % (
self.__class__.__name__, self.selector, self.id)
def specificity(self):
a, b, c = self.selector.specificity()
a += 1
return a, b, c
class CombinedSelector:
def __init__(self, selector, combinator, subselector):
assert selector is not None
self.selector = selector
self.combinator = combinator
self.subselector = subselector
def __repr__(self):
if self.combinator == ' ':
comb = '<followed>'
else:
comb = self.combinator
return '%s[%r %s %r]' % (
self.__class__.__name__, self.selector, comb, self.subselector)
def specificity(self):
a1, b1, c1 = self.selector.specificity()
a2, b2, c2 = self.subselector.specificity()
return a1 + a2, b1 + b2, c1 + c2
# Parser
# foo
_el_re = re.compile(r'^[ \t\r\n\f]*([a-zA-Z]+)[ \t\r\n\f]*$')
# foo#bar or #bar
_id_re = re.compile(r'^[ \t\r\n\f]*([a-zA-Z]*)#([a-zA-Z0-9_-]+)[ \t\r\n\f]*$')
# foo.bar or .bar
_class_re = re.compile(
r'^[ \t\r\n\f]*([a-zA-Z]*)\.([a-zA-Z][a-zA-Z0-9_-]*)[ \t\r\n\f]*$')
def parse(css):
"""Parse a CSS *group of selectors*.
:param css:
A *group of selectors* as an Unicode string.
:raises:
:class:`SelectorSyntaxError` on invalid selectors.
:returns:
A list of parsed :class:`Selector` objects, one for each
selector in the comma-separated group.
"""
# Fast path for simple cases
match = _el_re.match(css)
if match:
return [Selector(Element(element=match.group(1)))]
match = _id_re.match(css)
if match is not None:
return [Selector(Hash(Element(element=match.group(1) or None),
match.group(2)))]
match = _class_re.match(css)
if match is not None:
return [Selector(Class(Element(element=match.group(1) or None),
match.group(2)))]
stream = TokenStream(tokenize(css))
stream.source = css
return list(parse_selector_group(stream))
# except SelectorSyntaxError:
# e = sys.exc_info()[1]
# message = "%s at %s -> %r" % (
# e, stream.used, stream.peek())
# e.msg = message
# e.args = tuple([message])
# raise
def parse_selector_group(stream):
stream.skip_whitespace()
while 1:
yield Selector(*parse_selector(stream))
if stream.peek() == ('DELIM', ','):
stream.next()
stream.skip_whitespace()
else:
break
def parse_selector(stream):
result, pseudo_element = parse_simple_selector(stream)
while 1:
stream.skip_whitespace()
peek = stream.peek()
if peek in (('EOF', None), ('DELIM', ',')):
break
if pseudo_element:
raise SelectorSyntaxError(
'Got pseudo-element ::%s not at the end of a selector'
% pseudo_element)
if peek.is_delim('+', '>', '~'):
# A combinator
combinator = stream.next().value
stream.skip_whitespace()
else:
# By exclusion, the last parse_simple_selector() ended
# at peek == ' '
combinator = ' '
next_selector, pseudo_element = parse_simple_selector(stream)
result = CombinedSelector(result, combinator, next_selector)
return result, pseudo_element
special_pseudo_elements = (
'first-line', 'first-letter', 'before', 'after')
def parse_simple_selector(stream, inside_negation=False):
stream.skip_whitespace()
selector_start = len(stream.used)
peek = stream.peek()
if peek.type == 'IDENT' or peek == ('DELIM', '*'):
if peek.type == 'IDENT':
namespace = stream.next().value
else:
stream.next()
namespace = None
if stream.peek() == ('DELIM', '|'):
stream.next()
element = stream.next_ident_or_star()
else:
element = namespace
namespace = None
else:
element = namespace = None
result = Element(namespace, element)
pseudo_element = None
while 1:
peek = stream.peek()
if peek.type in ('S', 'EOF') or peek.is_delim(',', '+', '>', '~') or (
inside_negation and peek == ('DELIM', ')')):
break
if pseudo_element:
raise SelectorSyntaxError(
'Got pseudo-element ::%s not at the end of a selector'
% pseudo_element)
if peek.type == 'HASH':
result = Hash(result, stream.next().value)
elif peek == ('DELIM', '.'):
stream.next()
result = Class(result, stream.next_ident())
elif peek == ('DELIM', '['):
stream.next()
result = parse_attrib(result, stream)
elif peek == ('DELIM', ':'):
stream.next()
if stream.peek() == ('DELIM', ':'):
stream.next()
pseudo_element = stream.next_ident()
if stream.peek() == ('DELIM', '('):
stream.next()
pseudo_element = FunctionalPseudoElement(
pseudo_element, parse_arguments(stream))
continue
ident = stream.next_ident()
if ident.lower() in special_pseudo_elements:
# Special case: CSS 2.1 pseudo-elements can have a single ':'
# Any new pseudo-element must have two.
pseudo_element = unicode_type(ident)
continue
if stream.peek() != ('DELIM', '('):
result = Pseudo(result, ident)
continue
stream.next()
stream.skip_whitespace()
if ident.lower() == 'not':
if inside_negation:
raise SelectorSyntaxError('Got nested :not()')
argument, argument_pseudo_element = parse_simple_selector(
stream, inside_negation=True)
next = stream.next()
if argument_pseudo_element:
raise SelectorSyntaxError(
'Got pseudo-element ::%s inside :not() at %s'
% (argument_pseudo_element, next.pos))
if next != ('DELIM', ')'):
raise SelectorSyntaxError("Expected ')', got %s" % (next,))
result = Negation(result, argument)
else:
result = Function(result, ident, parse_arguments(stream))
else:
raise SelectorSyntaxError(
"Expected selector, got %s" % (peek,))
if len(stream.used) == selector_start:
raise SelectorSyntaxError(
"Expected selector, got %s" % (stream.peek(),))
return result, pseudo_element
def parse_arguments(stream):
arguments = []
while 1:
stream.skip_whitespace()
next = stream.next()
if next.type in ('IDENT', 'STRING', 'NUMBER') or next in [
('DELIM', '+'), ('DELIM', '-')]:
arguments.append(next)
elif next == ('DELIM', ')'):
return arguments
else:
raise SelectorSyntaxError(
"Expected an argument, got %s" % (next,))
def parse_attrib(selector, stream):
stream.skip_whitespace()
attrib = stream.next_ident_or_star()
if attrib is None and stream.peek() != ('DELIM', '|'):
raise SelectorSyntaxError(
"Expected '|', got %s" % (stream.peek(),))
if stream.peek() == ('DELIM', '|'):
stream.next()
if stream.peek() == ('DELIM', '='):
namespace = None
stream.next()
op = '|='
else:
namespace = attrib
attrib = stream.next_ident()
op = None
else:
namespace = op = None
if op is None:
stream.skip_whitespace()
next = stream.next()
if next == ('DELIM', ']'):
return Attrib(selector, namespace, attrib, 'exists', None)
elif next == ('DELIM', '='):
op = '='
elif next.is_delim('^', '$', '*', '~', '|', '!') and (
stream.peek() == ('DELIM', '=')):
op = next.value + '='
stream.next()
else:
raise SelectorSyntaxError(
"Operator expected, got %s" % (next,))
stream.skip_whitespace()
value = stream.next()
if value.type not in ('IDENT', 'STRING'):
raise SelectorSyntaxError(
"Expected string or ident, got %s" % (value,))
stream.skip_whitespace()
next = stream.next()
if next != ('DELIM', ']'):
raise SelectorSyntaxError(
"Expected ']', got %s" % (next,))
return Attrib(selector, namespace, attrib, op, value.value)
def parse_series(tokens):
"""
Parses the arguments for :nth-child() and friends.
:raises: A list of tokens
:returns: :``(a, b)``
"""
for token in tokens:
if token.type == 'STRING':
raise ValueError('String tokens not allowed in series.')
s = ''.join(token.value for token in tokens).strip()
if s == 'odd':
return (2, 1)
elif s == 'even':
return (2, 0)
elif s == 'n':
return (1, 0)
if 'n' not in s:
# Just b
return (0, int(s))
a, b = s.split('n', 1)
if not a:
a = 1
elif a == '-' or a == '+':
a = int(a+'1')
else:
a = int(a)
if not b:
b = 0
else:
b = int(b)
return (a, b)
# Token objects
class Token(tuple):
def __new__(cls, type_, value, pos):
obj = tuple.__new__(cls, (type_, value))
obj.pos = pos
return obj
def __repr__(self):
return "<%s '%s' at %i>" % (self.type, self.value, self.pos)
def is_delim(self, *values):
return self.type == 'DELIM' and self.value in values
type = property(operator.itemgetter(0))
value = property(operator.itemgetter(1))
class EOFToken(Token):
def __new__(cls, pos):
return Token.__new__(cls, 'EOF', None, pos)
def __repr__(self):
return '<%s at %i>' % (self.type, self.pos)
# Tokenizer
class TokenMacros:
unicode_escape = r'\\([0-9a-f]{1,6})(?:\r\n|[ \n\r\t\f])?'
escape = unicode_escape + r'|\\[^\n\r\f0-9a-f]'
string_escape = r'\\(?:\n|\r\n|\r|\f)|' + escape
nonascii = r'[^\0-\177]'
nmchar = '[_a-z0-9-]|%s|%s' % (escape, nonascii)
nmstart = '[_a-z]|%s|%s' % (escape, nonascii)
def _compile(pattern):
return re.compile(pattern % vars(TokenMacros), re.IGNORECASE).match
_match_whitespace = _compile(r'[ \t\r\n\f]+')
_match_number = _compile(r'[+-]?(?:[0-9]*\.[0-9]+|[0-9]+)')
_match_hash = _compile('#(?:%(nmchar)s)+')
_match_ident = _compile('-?(?:%(nmstart)s)(?:%(nmchar)s)*')
_match_string_by_quote = {
"'": _compile(r"([^\n\r\f\\']|%(string_escape)s)*"),
'"': _compile(r'([^\n\r\f\\"]|%(string_escape)s)*'),
}
_sub_simple_escape = re.compile(r'\\(.)').sub
_sub_unicode_escape = re.compile(TokenMacros.unicode_escape, re.I).sub
_sub_newline_escape =re.compile(r'\\(?:\n|\r\n|\r|\f)').sub
# Same as r'\1', but faster on CPython
if hasattr(operator, 'methodcaller'):
# Python 2.6+
_replace_simple = operator.methodcaller('group', 1)
else:
def _replace_simple(match):
return match.group(1)
def _replace_unicode(match):
codepoint = int(match.group(1), 16)
if codepoint > sys.maxunicode:
codepoint = 0xFFFD
return codepoint_to_chr(codepoint)
def unescape_ident(value):
value = _sub_unicode_escape(_replace_unicode, value)
value = _sub_simple_escape(_replace_simple, value)
return value
def tokenize(s):
pos = 0
len_s = len(s)
while pos < len_s:
match = _match_whitespace(s, pos=pos)
if match:
yield Token('S', ' ', pos)
pos = match.end()
continue
match = _match_ident(s, pos=pos)
if match:
value = _sub_simple_escape(_replace_simple,
_sub_unicode_escape(_replace_unicode, match.group()))
yield Token('IDENT', value, pos)
pos = match.end()
continue
match = _match_hash(s, pos=pos)
if match:
value = _sub_simple_escape(_replace_simple,
_sub_unicode_escape(_replace_unicode, match.group()[1:]))
yield Token('HASH', value, pos)
pos = match.end()
continue
quote = s[pos]
if quote in _match_string_by_quote:
match = _match_string_by_quote[quote](s, pos=pos + 1)
assert match, 'Should have found at least an empty match'
end_pos = match.end()
if end_pos == len_s:
raise SelectorSyntaxError('Unclosed string at %s' % pos)
if s[end_pos] != quote:
raise SelectorSyntaxError('Invalid string at %s' % pos)
value = _sub_simple_escape(_replace_simple,
_sub_unicode_escape(_replace_unicode,
_sub_newline_escape('', match.group())))
yield Token('STRING', value, pos)
pos = end_pos + 1
continue
match = _match_number(s, pos=pos)
if match:
value = match.group()
yield Token('NUMBER', value, pos)
pos = match.end()
continue
pos2 = pos + 2
if s[pos:pos2] == '/*':
pos = s.find('*/', pos2)
if pos == -1:
pos = len_s
else:
pos += 2
continue
yield Token('DELIM', s[pos], pos)
pos += 1
assert pos == len_s
yield EOFToken(pos)
class TokenStream:
def __init__(self, tokens, source=None):
self.used = []
self.tokens = iter(tokens)
self.source = source
self.peeked = None
self._peeking = False
try:
self.next_token = self.tokens.next
except AttributeError:
# Python 3
self.next_token = self.tokens.__next__
def next(self):
if self._peeking:
self._peeking = False
self.used.append(self.peeked)
return self.peeked
else:
next = self.next_token()
self.used.append(next)
return next
def peek(self):
if not self._peeking:
self.peeked = self.next_token()
self._peeking = True
return self.peeked
def next_ident(self):
next = self.next()
if next.type != 'IDENT':
raise SelectorSyntaxError('Expected ident, got %s' % (next,))
return next.value
def next_ident_or_star(self):
next = self.next()
if next.type == 'IDENT':
return next.value
elif next == ('DELIM', '*'):
return None
else:
raise SelectorSyntaxError(
"Expected ident or '*', got %s" % (next,))
def skip_whitespace(self):
peek = self.peek()
if peek.type == 'S':
self.next()
| 23,816 | Python | .py | 644 | 28.076087 | 94 | 0.528272 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,826 | core_name_map.py | kovidgoyal_calibre/src/qt/core_name_map.py | # autogenerated by __main__.py do not edit
name_map = {'PYQT_VERSION': 'PyQt6.QtCore',
'PYQT_VERSION_STR': 'PyQt6.QtCore',
'QAbstractAnimation': 'PyQt6.QtCore',
'QAbstractButton': 'PyQt6.QtWidgets',
'QAbstractEventDispatcher': 'PyQt6.QtCore',
'QAbstractFileIconProvider': 'PyQt6.QtGui',
'QAbstractGraphicsShapeItem': 'PyQt6.QtWidgets',
'QAbstractItemDelegate': 'PyQt6.QtWidgets',
'QAbstractItemModel': 'PyQt6.QtCore',
'QAbstractItemView': 'PyQt6.QtWidgets',
'QAbstractListModel': 'PyQt6.QtCore',
'QAbstractNativeEventFilter': 'PyQt6.QtCore',
'QAbstractNetworkCache': 'PyQt6.QtNetwork',
'QAbstractOpenGLFunctions': 'PyQt6.QtOpenGL',
'QAbstractPrintDialog': 'PyQt6.QtPrintSupport',
'QAbstractProxyModel': 'PyQt6.QtCore',
'QAbstractScrollArea': 'PyQt6.QtWidgets',
'QAbstractSlider': 'PyQt6.QtWidgets',
'QAbstractSocket': 'PyQt6.QtNetwork',
'QAbstractSpinBox': 'PyQt6.QtWidgets',
'QAbstractTableModel': 'PyQt6.QtCore',
'QAbstractTextDocumentLayout': 'PyQt6.QtGui',
'QAction': 'PyQt6.QtGui',
'QActionEvent': 'PyQt6.QtGui',
'QActionGroup': 'PyQt6.QtGui',
'QAnimationGroup': 'PyQt6.QtCore',
'QApplication': 'PyQt6.QtWidgets',
'QAudio': 'PyQt6.QtMultimedia',
'QAudioBuffer': 'PyQt6.QtMultimedia',
'QAudioDecoder': 'PyQt6.QtMultimedia',
'QAudioDevice': 'PyQt6.QtMultimedia',
'QAudioFormat': 'PyQt6.QtMultimedia',
'QAudioInput': 'PyQt6.QtMultimedia',
'QAudioOutput': 'PyQt6.QtMultimedia',
'QAudioSink': 'PyQt6.QtMultimedia',
'QAudioSource': 'PyQt6.QtMultimedia',
'QAuthenticator': 'PyQt6.QtNetwork',
'QBackingStore': 'PyQt6.QtGui',
'QBasicTimer': 'PyQt6.QtCore',
'QBitArray': 'PyQt6.QtCore',
'QBitmap': 'PyQt6.QtGui',
'QBluetoothPermission': 'PyQt6.QtCore',
'QBoxLayout': 'PyQt6.QtWidgets',
'QBrush': 'PyQt6.QtGui',
'QBuffer': 'PyQt6.QtCore',
'QButtonGroup': 'PyQt6.QtWidgets',
'QByteArray': 'PyQt6.QtCore',
'QByteArrayMatcher': 'PyQt6.QtCore',
'QCalendar': 'PyQt6.QtCore',
'QCalendarPermission': 'PyQt6.QtCore',
'QCalendarWidget': 'PyQt6.QtWidgets',
'QCamera': 'PyQt6.QtMultimedia',
'QCameraDevice': 'PyQt6.QtMultimedia',
'QCameraFormat': 'PyQt6.QtMultimedia',
'QCameraPermission': 'PyQt6.QtCore',
'QCapturableWindow': 'PyQt6.QtMultimedia',
'QCborError': 'PyQt6.QtCore',
'QCborKnownTags': 'PyQt6.QtCore',
'QCborSimpleType': 'PyQt6.QtCore',
'QCborStreamReader': 'PyQt6.QtCore',
'QCborStreamWriter': 'PyQt6.QtCore',
'QCheckBox': 'PyQt6.QtWidgets',
'QChildEvent': 'PyQt6.QtCore',
'QChildWindowEvent': 'PyQt6.QtGui',
'QClipboard': 'PyQt6.QtGui',
'QCloseEvent': 'PyQt6.QtGui',
'QCollator': 'PyQt6.QtCore',
'QCollatorSortKey': 'PyQt6.QtCore',
'QColor': 'PyQt6.QtGui',
'QColorConstants': 'PyQt6.QtGui',
'QColorDialog': 'PyQt6.QtWidgets',
'QColorSpace': 'PyQt6.QtGui',
'QColorTransform': 'PyQt6.QtGui',
'QColumnView': 'PyQt6.QtWidgets',
'QComboBox': 'PyQt6.QtWidgets',
'QCommandLineOption': 'PyQt6.QtCore',
'QCommandLineParser': 'PyQt6.QtCore',
'QCommandLinkButton': 'PyQt6.QtWidgets',
'QCommonStyle': 'PyQt6.QtWidgets',
'QCompleter': 'PyQt6.QtWidgets',
'QConcatenateTablesProxyModel': 'PyQt6.QtCore',
'QConicalGradient': 'PyQt6.QtGui',
'QContactsPermission': 'PyQt6.QtCore',
'QContextMenuEvent': 'PyQt6.QtGui',
'QCoreApplication': 'PyQt6.QtCore',
'QCryptographicHash': 'PyQt6.QtCore',
'QCursor': 'PyQt6.QtGui',
'QDataStream': 'PyQt6.QtCore',
'QDataWidgetMapper': 'PyQt6.QtWidgets',
'QDate': 'PyQt6.QtCore',
'QDateEdit': 'PyQt6.QtWidgets',
'QDateTime': 'PyQt6.QtCore',
'QDateTimeEdit': 'PyQt6.QtWidgets',
'QDeadlineTimer': 'PyQt6.QtCore',
'QDesktopServices': 'PyQt6.QtGui',
'QDial': 'PyQt6.QtWidgets',
'QDialog': 'PyQt6.QtWidgets',
'QDialogButtonBox': 'PyQt6.QtWidgets',
'QDir': 'PyQt6.QtCore',
'QDirIterator': 'PyQt6.QtCore',
'QDnsDomainNameRecord': 'PyQt6.QtNetwork',
'QDnsHostAddressRecord': 'PyQt6.QtNetwork',
'QDnsLookup': 'PyQt6.QtNetwork',
'QDnsMailExchangeRecord': 'PyQt6.QtNetwork',
'QDnsServiceRecord': 'PyQt6.QtNetwork',
'QDnsTextRecord': 'PyQt6.QtNetwork',
'QDockWidget': 'PyQt6.QtWidgets',
'QDoubleSpinBox': 'PyQt6.QtWidgets',
'QDoubleValidator': 'PyQt6.QtGui',
'QDrag': 'PyQt6.QtGui',
'QDragEnterEvent': 'PyQt6.QtGui',
'QDragLeaveEvent': 'PyQt6.QtGui',
'QDragMoveEvent': 'PyQt6.QtGui',
'QDropEvent': 'PyQt6.QtGui',
'QDynamicPropertyChangeEvent': 'PyQt6.QtCore',
'QEasingCurve': 'PyQt6.QtCore',
'QElapsedTimer': 'PyQt6.QtCore',
'QEnterEvent': 'PyQt6.QtGui',
'QErrorMessage': 'PyQt6.QtWidgets',
'QEvent': 'PyQt6.QtCore',
'QEventLoop': 'PyQt6.QtCore',
'QEventLoopLocker': 'PyQt6.QtCore',
'QEventPoint': 'PyQt6.QtGui',
'QExposeEvent': 'PyQt6.QtGui',
'QFile': 'PyQt6.QtCore',
'QFileDevice': 'PyQt6.QtCore',
'QFileDialog': 'PyQt6.QtWidgets',
'QFileIconProvider': 'PyQt6.QtWidgets',
'QFileInfo': 'PyQt6.QtCore',
'QFileOpenEvent': 'PyQt6.QtGui',
'QFileSelector': 'PyQt6.QtCore',
'QFileSystemModel': 'PyQt6.QtGui',
'QFileSystemWatcher': 'PyQt6.QtCore',
'QFocusEvent': 'PyQt6.QtGui',
'QFocusFrame': 'PyQt6.QtWidgets',
'QFont': 'PyQt6.QtGui',
'QFontComboBox': 'PyQt6.QtWidgets',
'QFontDatabase': 'PyQt6.QtGui',
'QFontDialog': 'PyQt6.QtWidgets',
'QFontInfo': 'PyQt6.QtGui',
'QFontMetrics': 'PyQt6.QtGui',
'QFontMetricsF': 'PyQt6.QtGui',
'QFormLayout': 'PyQt6.QtWidgets',
'QFrame': 'PyQt6.QtWidgets',
'QGenericArgument': 'PyQt6.QtCore',
'QGenericReturnArgument': 'PyQt6.QtCore',
'QGesture': 'PyQt6.QtWidgets',
'QGestureEvent': 'PyQt6.QtWidgets',
'QGestureRecognizer': 'PyQt6.QtWidgets',
'QGlyphRun': 'PyQt6.QtGui',
'QGradient': 'PyQt6.QtGui',
'QGraphicsAnchor': 'PyQt6.QtWidgets',
'QGraphicsAnchorLayout': 'PyQt6.QtWidgets',
'QGraphicsBlurEffect': 'PyQt6.QtWidgets',
'QGraphicsColorizeEffect': 'PyQt6.QtWidgets',
'QGraphicsDropShadowEffect': 'PyQt6.QtWidgets',
'QGraphicsEffect': 'PyQt6.QtWidgets',
'QGraphicsEllipseItem': 'PyQt6.QtWidgets',
'QGraphicsGridLayout': 'PyQt6.QtWidgets',
'QGraphicsItem': 'PyQt6.QtWidgets',
'QGraphicsItemGroup': 'PyQt6.QtWidgets',
'QGraphicsLayout': 'PyQt6.QtWidgets',
'QGraphicsLayoutItem': 'PyQt6.QtWidgets',
'QGraphicsLineItem': 'PyQt6.QtWidgets',
'QGraphicsLinearLayout': 'PyQt6.QtWidgets',
'QGraphicsObject': 'PyQt6.QtWidgets',
'QGraphicsOpacityEffect': 'PyQt6.QtWidgets',
'QGraphicsPathItem': 'PyQt6.QtWidgets',
'QGraphicsPixmapItem': 'PyQt6.QtWidgets',
'QGraphicsPolygonItem': 'PyQt6.QtWidgets',
'QGraphicsProxyWidget': 'PyQt6.QtWidgets',
'QGraphicsRectItem': 'PyQt6.QtWidgets',
'QGraphicsRotation': 'PyQt6.QtWidgets',
'QGraphicsScale': 'PyQt6.QtWidgets',
'QGraphicsScene': 'PyQt6.QtWidgets',
'QGraphicsSceneContextMenuEvent': 'PyQt6.QtWidgets',
'QGraphicsSceneDragDropEvent': 'PyQt6.QtWidgets',
'QGraphicsSceneEvent': 'PyQt6.QtWidgets',
'QGraphicsSceneHelpEvent': 'PyQt6.QtWidgets',
'QGraphicsSceneHoverEvent': 'PyQt6.QtWidgets',
'QGraphicsSceneMouseEvent': 'PyQt6.QtWidgets',
'QGraphicsSceneMoveEvent': 'PyQt6.QtWidgets',
'QGraphicsSceneResizeEvent': 'PyQt6.QtWidgets',
'QGraphicsSceneWheelEvent': 'PyQt6.QtWidgets',
'QGraphicsSimpleTextItem': 'PyQt6.QtWidgets',
'QGraphicsTextItem': 'PyQt6.QtWidgets',
'QGraphicsTransform': 'PyQt6.QtWidgets',
'QGraphicsVideoItem': 'PyQt6.QtMultimediaWidgets',
'QGraphicsView': 'PyQt6.QtWidgets',
'QGraphicsWidget': 'PyQt6.QtWidgets',
'QGridLayout': 'PyQt6.QtWidgets',
'QGroupBox': 'PyQt6.QtWidgets',
'QGuiApplication': 'PyQt6.QtGui',
'QHBoxLayout': 'PyQt6.QtWidgets',
'QHeaderView': 'PyQt6.QtWidgets',
'QHelpEvent': 'PyQt6.QtGui',
'QHideEvent': 'PyQt6.QtGui',
'QHostAddress': 'PyQt6.QtNetwork',
'QHostInfo': 'PyQt6.QtNetwork',
'QHoverEvent': 'PyQt6.QtGui',
'QHstsPolicy': 'PyQt6.QtNetwork',
'QHttp1Configuration': 'PyQt6.QtNetwork',
'QHttp2Configuration': 'PyQt6.QtNetwork',
'QHttpHeaders': 'PyQt6.QtNetwork',
'QHttpMultiPart': 'PyQt6.QtNetwork',
'QHttpPart': 'PyQt6.QtNetwork',
'QIODevice': 'PyQt6.QtCore',
'QIODeviceBase': 'PyQt6.QtCore',
'QIcon': 'PyQt6.QtGui',
'QIconDragEvent': 'PyQt6.QtGui',
'QIconEngine': 'PyQt6.QtGui',
'QIdentityProxyModel': 'PyQt6.QtCore',
'QImage': 'PyQt6.QtGui',
'QImageCapture': 'PyQt6.QtMultimedia',
'QImageIOHandler': 'PyQt6.QtGui',
'QImageReader': 'PyQt6.QtGui',
'QImageWriter': 'PyQt6.QtGui',
'QInputDevice': 'PyQt6.QtGui',
'QInputDialog': 'PyQt6.QtWidgets',
'QInputEvent': 'PyQt6.QtGui',
'QInputMethod': 'PyQt6.QtGui',
'QInputMethodEvent': 'PyQt6.QtGui',
'QInputMethodQueryEvent': 'PyQt6.QtGui',
'QIntValidator': 'PyQt6.QtGui',
'QItemDelegate': 'PyQt6.QtWidgets',
'QItemEditorCreatorBase': 'PyQt6.QtWidgets',
'QItemEditorFactory': 'PyQt6.QtWidgets',
'QItemSelection': 'PyQt6.QtCore',
'QItemSelectionModel': 'PyQt6.QtCore',
'QItemSelectionRange': 'PyQt6.QtCore',
'QJsonDocument': 'PyQt6.QtCore',
'QJsonParseError': 'PyQt6.QtCore',
'QJsonValue': 'PyQt6.QtCore',
'QKeyCombination': 'PyQt6.QtCore',
'QKeyEvent': 'PyQt6.QtGui',
'QKeySequence': 'PyQt6.QtGui',
'QKeySequenceEdit': 'PyQt6.QtWidgets',
'QLCDNumber': 'PyQt6.QtWidgets',
'QLabel': 'PyQt6.QtWidgets',
'QLayout': 'PyQt6.QtWidgets',
'QLayoutItem': 'PyQt6.QtWidgets',
'QLibrary': 'PyQt6.QtCore',
'QLibraryInfo': 'PyQt6.QtCore',
'QLine': 'PyQt6.QtCore',
'QLineEdit': 'PyQt6.QtWidgets',
'QLineF': 'PyQt6.QtCore',
'QLinearGradient': 'PyQt6.QtGui',
'QListView': 'PyQt6.QtWidgets',
'QListWidget': 'PyQt6.QtWidgets',
'QListWidgetItem': 'PyQt6.QtWidgets',
'QLocalServer': 'PyQt6.QtNetwork',
'QLocalSocket': 'PyQt6.QtNetwork',
'QLocale': 'PyQt6.QtCore',
'QLocationPermission': 'PyQt6.QtCore',
'QLockFile': 'PyQt6.QtCore',
'QLoggingCategory': 'PyQt6.QtCore',
'QMainWindow': 'PyQt6.QtWidgets',
'QMargins': 'PyQt6.QtCore',
'QMarginsF': 'PyQt6.QtCore',
'QMatrix2x2': 'PyQt6.QtGui',
'QMatrix2x3': 'PyQt6.QtGui',
'QMatrix2x4': 'PyQt6.QtGui',
'QMatrix3x2': 'PyQt6.QtGui',
'QMatrix3x3': 'PyQt6.QtGui',
'QMatrix3x4': 'PyQt6.QtGui',
'QMatrix4x2': 'PyQt6.QtGui',
'QMatrix4x3': 'PyQt6.QtGui',
'QMatrix4x4': 'PyQt6.QtGui',
'QMdiArea': 'PyQt6.QtWidgets',
'QMdiSubWindow': 'PyQt6.QtWidgets',
'QMediaCaptureSession': 'PyQt6.QtMultimedia',
'QMediaDevices': 'PyQt6.QtMultimedia',
'QMediaFormat': 'PyQt6.QtMultimedia',
'QMediaMetaData': 'PyQt6.QtMultimedia',
'QMediaPlayer': 'PyQt6.QtMultimedia',
'QMediaRecorder': 'PyQt6.QtMultimedia',
'QMediaTimeRange': 'PyQt6.QtMultimedia',
'QMenu': 'PyQt6.QtWidgets',
'QMenuBar': 'PyQt6.QtWidgets',
'QMessageAuthenticationCode': 'PyQt6.QtCore',
'QMessageBox': 'PyQt6.QtWidgets',
'QMessageLogContext': 'PyQt6.QtCore',
'QMessageLogger': 'PyQt6.QtCore',
'QMetaClassInfo': 'PyQt6.QtCore',
'QMetaEnum': 'PyQt6.QtCore',
'QMetaMethod': 'PyQt6.QtCore',
'QMetaObject': 'PyQt6.QtCore',
'QMetaProperty': 'PyQt6.QtCore',
'QMetaType': 'PyQt6.QtCore',
'QMicrophonePermission': 'PyQt6.QtCore',
'QMimeData': 'PyQt6.QtCore',
'QMimeDatabase': 'PyQt6.QtCore',
'QMimeType': 'PyQt6.QtCore',
'QModelIndex': 'PyQt6.QtCore',
'QModelRoleData': 'PyQt6.QtCore',
'QModelRoleDataSpan': 'PyQt6.QtCore',
'QMouseEvent': 'PyQt6.QtGui',
'QMoveEvent': 'PyQt6.QtGui',
'QMovie': 'PyQt6.QtGui',
'QMutex': 'PyQt6.QtCore',
'QMutexLocker': 'PyQt6.QtCore',
'QNativeGestureEvent': 'PyQt6.QtGui',
'QNativeInterface': 'PyQt6.QtQuick',
'QNativeIpcKey': 'PyQt6.QtCore',
'QNetworkAccessManager': 'PyQt6.QtNetwork',
'QNetworkAddressEntry': 'PyQt6.QtNetwork',
'QNetworkCacheMetaData': 'PyQt6.QtNetwork',
'QNetworkCookie': 'PyQt6.QtNetwork',
'QNetworkCookieJar': 'PyQt6.QtNetwork',
'QNetworkDatagram': 'PyQt6.QtNetwork',
'QNetworkDiskCache': 'PyQt6.QtNetwork',
'QNetworkInformation': 'PyQt6.QtNetwork',
'QNetworkInterface': 'PyQt6.QtNetwork',
'QNetworkProxy': 'PyQt6.QtNetwork',
'QNetworkProxyFactory': 'PyQt6.QtNetwork',
'QNetworkProxyQuery': 'PyQt6.QtNetwork',
'QNetworkReply': 'PyQt6.QtNetwork',
'QNetworkRequest': 'PyQt6.QtNetwork',
'QObject': 'PyQt6.QtCore',
'QObjectCleanupHandler': 'PyQt6.QtCore',
'QOcspCertificateStatus': 'PyQt6.QtNetwork',
'QOcspResponse': 'PyQt6.QtNetwork',
'QOcspRevocationReason': 'PyQt6.QtNetwork',
'QOffscreenSurface': 'PyQt6.QtGui',
'QOpenGLBuffer': 'PyQt6.QtOpenGL',
'QOpenGLContext': 'PyQt6.QtGui',
'QOpenGLContextGroup': 'PyQt6.QtGui',
'QOpenGLDebugLogger': 'PyQt6.QtOpenGL',
'QOpenGLDebugMessage': 'PyQt6.QtOpenGL',
'QOpenGLFramebufferObject': 'PyQt6.QtOpenGL',
'QOpenGLFramebufferObjectFormat': 'PyQt6.QtOpenGL',
'QOpenGLFunctions_2_0': 'PyQt6.QtOpenGL',
'QOpenGLFunctions_2_1': 'PyQt6.QtOpenGL',
'QOpenGLFunctions_4_1_Core': 'PyQt6.QtOpenGL',
'QOpenGLPaintDevice': 'PyQt6.QtOpenGL',
'QOpenGLPixelTransferOptions': 'PyQt6.QtOpenGL',
'QOpenGLShader': 'PyQt6.QtOpenGL',
'QOpenGLShaderProgram': 'PyQt6.QtOpenGL',
'QOpenGLTexture': 'PyQt6.QtOpenGL',
'QOpenGLTextureBlitter': 'PyQt6.QtOpenGL',
'QOpenGLTimeMonitor': 'PyQt6.QtOpenGL',
'QOpenGLTimerQuery': 'PyQt6.QtOpenGL',
'QOpenGLVersionFunctionsFactory': 'PyQt6.QtOpenGL',
'QOpenGLVersionProfile': 'PyQt6.QtOpenGL',
'QOpenGLVertexArrayObject': 'PyQt6.QtOpenGL',
'QOpenGLWidget': 'PyQt6.QtOpenGLWidgets',
'QOpenGLWindow': 'PyQt6.QtOpenGL',
'QOperatingSystemVersion': 'PyQt6.QtCore',
'QOperatingSystemVersionBase': 'PyQt6.QtCore',
'QPageLayout': 'PyQt6.QtGui',
'QPageRanges': 'PyQt6.QtGui',
'QPageSetupDialog': 'PyQt6.QtPrintSupport',
'QPageSize': 'PyQt6.QtGui',
'QPagedPaintDevice': 'PyQt6.QtGui',
'QPaintDevice': 'PyQt6.QtGui',
'QPaintDeviceWindow': 'PyQt6.QtGui',
'QPaintEngine': 'PyQt6.QtGui',
'QPaintEngineState': 'PyQt6.QtGui',
'QPaintEvent': 'PyQt6.QtGui',
'QPainter': 'PyQt6.QtGui',
'QPainterPath': 'PyQt6.QtGui',
'QPainterPathStroker': 'PyQt6.QtGui',
'QPalette': 'PyQt6.QtGui',
'QPanGesture': 'PyQt6.QtWidgets',
'QParallelAnimationGroup': 'PyQt6.QtCore',
'QPasswordDigestor': 'PyQt6.QtNetwork',
'QPauseAnimation': 'PyQt6.QtCore',
'QPdfWriter': 'PyQt6.QtGui',
'QPen': 'PyQt6.QtGui',
'QPermission': 'PyQt6.QtCore',
'QPersistentModelIndex': 'PyQt6.QtCore',
'QPicture': 'PyQt6.QtGui',
'QPinchGesture': 'PyQt6.QtWidgets',
'QPixelFormat': 'PyQt6.QtGui',
'QPixmap': 'PyQt6.QtGui',
'QPixmapCache': 'PyQt6.QtGui',
'QPlainTextDocumentLayout': 'PyQt6.QtWidgets',
'QPlainTextEdit': 'PyQt6.QtWidgets',
'QPlatformSurfaceEvent': 'PyQt6.QtGui',
'QPluginLoader': 'PyQt6.QtCore',
'QPoint': 'PyQt6.QtCore',
'QPointF': 'PyQt6.QtCore',
'QPointerEvent': 'PyQt6.QtGui',
'QPointingDevice': 'PyQt6.QtGui',
'QPointingDeviceUniqueId': 'PyQt6.QtGui',
'QPolygon': 'PyQt6.QtGui',
'QPolygonF': 'PyQt6.QtGui',
'QPrintDialog': 'PyQt6.QtPrintSupport',
'QPrintEngine': 'PyQt6.QtPrintSupport',
'QPrintPreviewDialog': 'PyQt6.QtPrintSupport',
'QPrintPreviewWidget': 'PyQt6.QtPrintSupport',
'QPrinter': 'PyQt6.QtPrintSupport',
'QPrinterInfo': 'PyQt6.QtPrintSupport',
'QProcess': 'PyQt6.QtCore',
'QProcessEnvironment': 'PyQt6.QtCore',
'QProgressBar': 'PyQt6.QtWidgets',
'QProgressDialog': 'PyQt6.QtWidgets',
'QPropertyAnimation': 'PyQt6.QtCore',
'QProxyStyle': 'PyQt6.QtWidgets',
'QPushButton': 'PyQt6.QtWidgets',
'QQuaternion': 'PyQt6.QtGui',
'QQuickAsyncImageProvider': 'PyQt6.QtQuick',
'QQuickCloseEvent': 'PyQt6.QtQuick',
'QQuickFramebufferObject': 'PyQt6.QtQuick',
'QQuickGraphicsConfiguration': 'PyQt6.QtQuick',
'QQuickGraphicsDevice': 'PyQt6.QtQuick',
'QQuickImageProvider': 'PyQt6.QtQuick',
'QQuickImageResponse': 'PyQt6.QtQuick',
'QQuickItem': 'PyQt6.QtQuick',
'QQuickItemGrabResult': 'PyQt6.QtQuick',
'QQuickPaintedItem': 'PyQt6.QtQuick',
'QQuickRenderControl': 'PyQt6.QtQuick',
'QQuickRenderTarget': 'PyQt6.QtQuick',
'QQuickTextDocument': 'PyQt6.QtQuick',
'QQuickTextureFactory': 'PyQt6.QtQuick',
'QQuickView': 'PyQt6.QtQuick',
'QQuickWindow': 'PyQt6.QtQuick',
'QRadialGradient': 'PyQt6.QtGui',
'QRadioButton': 'PyQt6.QtWidgets',
'QRandomGenerator': 'PyQt6.QtCore',
'QRasterWindow': 'PyQt6.QtGui',
'QRawFont': 'PyQt6.QtGui',
'QReadLocker': 'PyQt6.QtCore',
'QReadWriteLock': 'PyQt6.QtCore',
'QRect': 'PyQt6.QtCore',
'QRectF': 'PyQt6.QtCore',
'QRecursiveMutex': 'PyQt6.QtCore',
'QRegion': 'PyQt6.QtGui',
'QRegularExpression': 'PyQt6.QtCore',
'QRegularExpressionMatch': 'PyQt6.QtCore',
'QRegularExpressionMatchIterator': 'PyQt6.QtCore',
'QRegularExpressionValidator': 'PyQt6.QtGui',
'QResizeEvent': 'PyQt6.QtGui',
'QResource': 'PyQt6.QtCore',
'QRgba64': 'PyQt6.QtGui',
'QRubberBand': 'PyQt6.QtWidgets',
'QRunnable': 'PyQt6.QtCore',
'QSGBasicGeometryNode': 'PyQt6.QtQuick',
'QSGClipNode': 'PyQt6.QtQuick',
'QSGDynamicTexture': 'PyQt6.QtQuick',
'QSGFlatColorMaterial': 'PyQt6.QtQuick',
'QSGGeometry': 'PyQt6.QtQuick',
'QSGGeometryNode': 'PyQt6.QtQuick',
'QSGImageNode': 'PyQt6.QtQuick',
'QSGMaterial': 'PyQt6.QtQuick',
'QSGMaterialShader': 'PyQt6.QtQuick',
'QSGMaterialType': 'PyQt6.QtQuick',
'QSGNode': 'PyQt6.QtQuick',
'QSGOpacityNode': 'PyQt6.QtQuick',
'QSGOpaqueTextureMaterial': 'PyQt6.QtQuick',
'QSGRectangleNode': 'PyQt6.QtQuick',
'QSGRenderNode': 'PyQt6.QtQuick',
'QSGRendererInterface': 'PyQt6.QtQuick',
'QSGSimpleRectNode': 'PyQt6.QtQuick',
'QSGSimpleTextureNode': 'PyQt6.QtQuick',
'QSGTextNode': 'PyQt6.QtQuick',
'QSGTexture': 'PyQt6.QtQuick',
'QSGTextureMaterial': 'PyQt6.QtQuick',
'QSGTextureProvider': 'PyQt6.QtQuick',
'QSGTransformNode': 'PyQt6.QtQuick',
'QSGVertexColorMaterial': 'PyQt6.QtQuick',
'QSaveFile': 'PyQt6.QtCore',
'QScreen': 'PyQt6.QtGui',
'QScreenCapture': 'PyQt6.QtMultimedia',
'QScrollArea': 'PyQt6.QtWidgets',
'QScrollBar': 'PyQt6.QtWidgets',
'QScrollEvent': 'PyQt6.QtGui',
'QScrollPrepareEvent': 'PyQt6.QtGui',
'QScroller': 'PyQt6.QtWidgets',
'QScrollerProperties': 'PyQt6.QtWidgets',
'QSemaphore': 'PyQt6.QtCore',
'QSemaphoreReleaser': 'PyQt6.QtCore',
'QSequentialAnimationGroup': 'PyQt6.QtCore',
'QSessionManager': 'PyQt6.QtGui',
'QSettings': 'PyQt6.QtCore',
'QSharedMemory': 'PyQt6.QtCore',
'QShortcut': 'PyQt6.QtGui',
'QShortcutEvent': 'PyQt6.QtGui',
'QShowEvent': 'PyQt6.QtGui',
'QSignalBlocker': 'PyQt6.QtCore',
'QSignalMapper': 'PyQt6.QtCore',
'QSinglePointEvent': 'PyQt6.QtGui',
'QSize': 'PyQt6.QtCore',
'QSizeF': 'PyQt6.QtCore',
'QSizeGrip': 'PyQt6.QtWidgets',
'QSizePolicy': 'PyQt6.QtWidgets',
'QSlider': 'PyQt6.QtWidgets',
'QSocketNotifier': 'PyQt6.QtCore',
'QSortFilterProxyModel': 'PyQt6.QtCore',
'QSoundEffect': 'PyQt6.QtMultimedia',
'QSpacerItem': 'PyQt6.QtWidgets',
'QSpinBox': 'PyQt6.QtWidgets',
'QSplashScreen': 'PyQt6.QtWidgets',
'QSplitter': 'PyQt6.QtWidgets',
'QSplitterHandle': 'PyQt6.QtWidgets',
'QSsl': 'PyQt6.QtNetwork',
'QSslCertificate': 'PyQt6.QtNetwork',
'QSslCertificateExtension': 'PyQt6.QtNetwork',
'QSslCipher': 'PyQt6.QtNetwork',
'QSslConfiguration': 'PyQt6.QtNetwork',
'QSslDiffieHellmanParameters': 'PyQt6.QtNetwork',
'QSslEllipticCurve': 'PyQt6.QtNetwork',
'QSslError': 'PyQt6.QtNetwork',
'QSslKey': 'PyQt6.QtNetwork',
'QSslPreSharedKeyAuthenticator': 'PyQt6.QtNetwork',
'QSslServer': 'PyQt6.QtNetwork',
'QSslSocket': 'PyQt6.QtNetwork',
'QStackedLayout': 'PyQt6.QtWidgets',
'QStackedWidget': 'PyQt6.QtWidgets',
'QStandardItem': 'PyQt6.QtGui',
'QStandardItemModel': 'PyQt6.QtGui',
'QStandardPaths': 'PyQt6.QtCore',
'QStaticText': 'PyQt6.QtGui',
'QStatusBar': 'PyQt6.QtWidgets',
'QStatusTipEvent': 'PyQt6.QtGui',
'QStorageInfo': 'PyQt6.QtCore',
'QStringConverter': 'PyQt6.QtCore',
'QStringConverterBase': 'PyQt6.QtCore',
'QStringDecoder': 'PyQt6.QtCore',
'QStringEncoder': 'PyQt6.QtCore',
'QStringListModel': 'PyQt6.QtCore',
'QStyle': 'PyQt6.QtWidgets',
'QStyleFactory': 'PyQt6.QtWidgets',
'QStyleHintReturn': 'PyQt6.QtWidgets',
'QStyleHintReturnMask': 'PyQt6.QtWidgets',
'QStyleHintReturnVariant': 'PyQt6.QtWidgets',
'QStyleHints': 'PyQt6.QtGui',
'QStyleOption': 'PyQt6.QtWidgets',
'QStyleOptionButton': 'PyQt6.QtWidgets',
'QStyleOptionComboBox': 'PyQt6.QtWidgets',
'QStyleOptionComplex': 'PyQt6.QtWidgets',
'QStyleOptionDockWidget': 'PyQt6.QtWidgets',
'QStyleOptionFocusRect': 'PyQt6.QtWidgets',
'QStyleOptionFrame': 'PyQt6.QtWidgets',
'QStyleOptionGraphicsItem': 'PyQt6.QtWidgets',
'QStyleOptionGroupBox': 'PyQt6.QtWidgets',
'QStyleOptionHeader': 'PyQt6.QtWidgets',
'QStyleOptionHeaderV2': 'PyQt6.QtWidgets',
'QStyleOptionMenuItem': 'PyQt6.QtWidgets',
'QStyleOptionProgressBar': 'PyQt6.QtWidgets',
'QStyleOptionRubberBand': 'PyQt6.QtWidgets',
'QStyleOptionSizeGrip': 'PyQt6.QtWidgets',
'QStyleOptionSlider': 'PyQt6.QtWidgets',
'QStyleOptionSpinBox': 'PyQt6.QtWidgets',
'QStyleOptionTab': 'PyQt6.QtWidgets',
'QStyleOptionTabBarBase': 'PyQt6.QtWidgets',
'QStyleOptionTabWidgetFrame': 'PyQt6.QtWidgets',
'QStyleOptionTitleBar': 'PyQt6.QtWidgets',
'QStyleOptionToolBar': 'PyQt6.QtWidgets',
'QStyleOptionToolBox': 'PyQt6.QtWidgets',
'QStyleOptionToolButton': 'PyQt6.QtWidgets',
'QStyleOptionViewItem': 'PyQt6.QtWidgets',
'QStylePainter': 'PyQt6.QtWidgets',
'QStyledItemDelegate': 'PyQt6.QtWidgets',
'QSurface': 'PyQt6.QtGui',
'QSurfaceFormat': 'PyQt6.QtGui',
'QSvgGenerator': 'PyQt6.QtSvg',
'QSvgRenderer': 'PyQt6.QtSvg',
'QSwipeGesture': 'PyQt6.QtWidgets',
'QSyntaxHighlighter': 'PyQt6.QtGui',
'QSysInfo': 'PyQt6.QtCore',
'QSystemSemaphore': 'PyQt6.QtCore',
'QSystemTrayIcon': 'PyQt6.QtWidgets',
'QT_TRANSLATE_NOOP': 'PyQt6.QtCore',
'QT_TR_NOOP': 'PyQt6.QtCore',
'QT_VERSION': 'PyQt6.QtCore',
'QT_VERSION_STR': 'PyQt6.QtCore',
'QTabBar': 'PyQt6.QtWidgets',
'QTabWidget': 'PyQt6.QtWidgets',
'QTableView': 'PyQt6.QtWidgets',
'QTableWidget': 'PyQt6.QtWidgets',
'QTableWidgetItem': 'PyQt6.QtWidgets',
'QTableWidgetSelectionRange': 'PyQt6.QtWidgets',
'QTabletEvent': 'PyQt6.QtGui',
'QTapAndHoldGesture': 'PyQt6.QtWidgets',
'QTapGesture': 'PyQt6.QtWidgets',
'QTcpServer': 'PyQt6.QtNetwork',
'QTcpSocket': 'PyQt6.QtNetwork',
'QTemporaryDir': 'PyQt6.QtCore',
'QTemporaryFile': 'PyQt6.QtCore',
'QTextBlock': 'PyQt6.QtGui',
'QTextBlockFormat': 'PyQt6.QtGui',
'QTextBlockGroup': 'PyQt6.QtGui',
'QTextBlockUserData': 'PyQt6.QtGui',
'QTextBoundaryFinder': 'PyQt6.QtCore',
'QTextBrowser': 'PyQt6.QtWidgets',
'QTextCharFormat': 'PyQt6.QtGui',
'QTextCursor': 'PyQt6.QtGui',
'QTextDocument': 'PyQt6.QtGui',
'QTextDocumentFragment': 'PyQt6.QtGui',
'QTextDocumentWriter': 'PyQt6.QtGui',
'QTextEdit': 'PyQt6.QtWidgets',
'QTextFormat': 'PyQt6.QtGui',
'QTextFragment': 'PyQt6.QtGui',
'QTextFrame': 'PyQt6.QtGui',
'QTextFrameFormat': 'PyQt6.QtGui',
'QTextImageFormat': 'PyQt6.QtGui',
'QTextInlineObject': 'PyQt6.QtGui',
'QTextItem': 'PyQt6.QtGui',
'QTextLayout': 'PyQt6.QtGui',
'QTextLength': 'PyQt6.QtGui',
'QTextLine': 'PyQt6.QtGui',
'QTextList': 'PyQt6.QtGui',
'QTextListFormat': 'PyQt6.QtGui',
'QTextObject': 'PyQt6.QtGui',
'QTextObjectInterface': 'PyQt6.QtGui',
'QTextOption': 'PyQt6.QtGui',
'QTextStream': 'PyQt6.QtCore',
'QTextStreamManipulator': 'PyQt6.QtCore',
'QTextTable': 'PyQt6.QtGui',
'QTextTableCell': 'PyQt6.QtGui',
'QTextTableCellFormat': 'PyQt6.QtGui',
'QTextTableFormat': 'PyQt6.QtGui',
'QTextToSpeech': 'PyQt6.QtTextToSpeech',
'QThread': 'PyQt6.QtCore',
'QThreadPool': 'PyQt6.QtCore',
'QTime': 'PyQt6.QtCore',
'QTimeEdit': 'PyQt6.QtWidgets',
'QTimeLine': 'PyQt6.QtCore',
'QTimeZone': 'PyQt6.QtCore',
'QTimer': 'PyQt6.QtCore',
'QTimerEvent': 'PyQt6.QtCore',
'QToolBar': 'PyQt6.QtWidgets',
'QToolBox': 'PyQt6.QtWidgets',
'QToolButton': 'PyQt6.QtWidgets',
'QToolTip': 'PyQt6.QtWidgets',
'QTouchEvent': 'PyQt6.QtGui',
'QTransform': 'PyQt6.QtGui',
'QTranslator': 'PyQt6.QtCore',
'QTransposeProxyModel': 'PyQt6.QtCore',
'QTreeView': 'PyQt6.QtWidgets',
'QTreeWidget': 'PyQt6.QtWidgets',
'QTreeWidgetItem': 'PyQt6.QtWidgets',
'QTreeWidgetItemIterator': 'PyQt6.QtWidgets',
'QTypeRevision': 'PyQt6.QtCore',
'QUdpSocket': 'PyQt6.QtNetwork',
'QUndoCommand': 'PyQt6.QtGui',
'QUndoGroup': 'PyQt6.QtGui',
'QUndoStack': 'PyQt6.QtGui',
'QUndoView': 'PyQt6.QtWidgets',
'QUrl': 'PyQt6.QtCore',
'QUrlQuery': 'PyQt6.QtCore',
'QUuid': 'PyQt6.QtCore',
'QVBoxLayout': 'PyQt6.QtWidgets',
'QValidator': 'PyQt6.QtGui',
'QVariant': 'PyQt6.QtCore',
'QVariantAnimation': 'PyQt6.QtCore',
'QVector2D': 'PyQt6.QtGui',
'QVector3D': 'PyQt6.QtGui',
'QVector4D': 'PyQt6.QtGui',
'QVersionNumber': 'PyQt6.QtCore',
'QVideoFrame': 'PyQt6.QtMultimedia',
'QVideoFrameFormat': 'PyQt6.QtMultimedia',
'QVideoSink': 'PyQt6.QtMultimedia',
'QVideoWidget': 'PyQt6.QtMultimediaWidgets',
'QVoice': 'PyQt6.QtTextToSpeech',
'QWIDGETSIZE_MAX': 'PyQt6.QtWidgets',
'QWaitCondition': 'PyQt6.QtCore',
'QWhatsThis': 'PyQt6.QtWidgets',
'QWhatsThisClickedEvent': 'PyQt6.QtGui',
'QWheelEvent': 'PyQt6.QtGui',
'QWidget': 'PyQt6.QtWidgets',
'QWidgetAction': 'PyQt6.QtWidgets',
'QWidgetItem': 'PyQt6.QtWidgets',
'QWindow': 'PyQt6.QtGui',
'QWindowCapture': 'PyQt6.QtMultimedia',
'QWindowStateChangeEvent': 'PyQt6.QtGui',
'QWizard': 'PyQt6.QtWidgets',
'QWizardPage': 'PyQt6.QtWidgets',
'QWriteLocker': 'PyQt6.QtCore',
'QXmlStreamAttribute': 'PyQt6.QtCore',
'QXmlStreamAttributes': 'PyQt6.QtCore',
'QXmlStreamEntityDeclaration': 'PyQt6.QtCore',
'QXmlStreamEntityResolver': 'PyQt6.QtCore',
'QXmlStreamNamespaceDeclaration': 'PyQt6.QtCore',
'QXmlStreamNotationDeclaration': 'PyQt6.QtCore',
'QXmlStreamReader': 'PyQt6.QtCore',
'QXmlStreamWriter': 'PyQt6.QtCore',
'Q_ARG': 'PyQt6.QtCore',
'Q_RETURN_ARG': 'PyQt6.QtCore',
'Qt': 'PyQt6.QtCore',
'QtCore': 'PyQt6.QtCore',
'QtGui': 'PyQt6.QtGui',
'QtMsgType': 'PyQt6.QtCore',
'QtMultimedia': 'PyQt6.QtMultimedia',
'QtMultimediaWidgets': 'PyQt6.QtMultimediaWidgets',
'QtNetwork': 'PyQt6.QtNetwork',
'QtOpenGL': 'PyQt6.QtOpenGL',
'QtOpenGLWidgets': 'PyQt6.QtOpenGLWidgets',
'QtPrintSupport': 'PyQt6.QtPrintSupport',
'QtQuick': 'PyQt6.QtQuick',
'QtSvg': 'PyQt6.QtSvg',
'QtTextToSpeech': 'PyQt6.QtTextToSpeech',
'QtVideo': 'PyQt6.QtMultimedia',
'QtWidgets': 'PyQt6.QtWidgets',
'pyqtBoundSignal': 'PyQt6.QtCore',
'pyqtClassInfo': 'PyQt6.QtCore',
'pyqtEnum': 'PyQt6.QtCore',
'pyqtPickleProtocol': 'PyQt6.QtCore',
'pyqtProperty': 'PyQt6.QtCore',
'pyqtRemoveInputHook': 'PyQt6.QtCore',
'pyqtRestoreInputHook': 'PyQt6.QtCore',
'pyqtSetPickleProtocol': 'PyQt6.QtCore',
'pyqtSignal': 'PyQt6.QtCore',
'pyqtSlot': 'PyQt6.QtCore',
'qAbs': 'PyQt6.QtCore',
'qAddPostRoutine': 'PyQt6.QtCore',
'qAddPreRoutine': 'PyQt6.QtCore',
'qAlpha': 'PyQt6.QtGui',
'qBlue': 'PyQt6.QtGui',
'qChecksum': 'PyQt6.QtCore',
'qCompress': 'PyQt6.QtCore',
'qCritical': 'PyQt6.QtCore',
'qDebug': 'PyQt6.QtCore',
'qDrawBorderPixmap': 'PyQt6.QtWidgets',
'qDrawPlainRect': 'PyQt6.QtWidgets',
'qDrawPlainRoundedRect': 'PyQt6.QtWidgets',
'qDrawShadeLine': 'PyQt6.QtWidgets',
'qDrawShadePanel': 'PyQt6.QtWidgets',
'qDrawShadeRect': 'PyQt6.QtWidgets',
'qDrawWinButton': 'PyQt6.QtWidgets',
'qDrawWinPanel': 'PyQt6.QtWidgets',
'qEnvironmentVariable': 'PyQt6.QtCore',
'qEnvironmentVariableIntValue': 'PyQt6.QtCore',
'qEnvironmentVariableIsEmpty': 'PyQt6.QtCore',
'qEnvironmentVariableIsSet': 'PyQt6.QtCore',
'qFatal': 'PyQt6.QtCore',
'qFloatDistance': 'PyQt6.QtCore',
'qFormatLogMessage': 'PyQt6.QtCore',
'qFuzzyCompare': 'PyQt6.QtCore',
'qFuzzyIsNull': 'PyQt6.QtCore',
'qGray': 'PyQt6.QtGui',
'qGreen': 'PyQt6.QtGui',
'qInf': 'PyQt6.QtCore',
'qInfo': 'PyQt6.QtCore',
'qInstallMessageHandler': 'PyQt6.QtCore',
'qIsFinite': 'PyQt6.QtCore',
'qIsInf': 'PyQt6.QtCore',
'qIsNaN': 'PyQt6.QtCore',
'qPixelFormatAlpha': 'PyQt6.QtGui',
'qPixelFormatCmyk': 'PyQt6.QtGui',
'qPixelFormatGrayscale': 'PyQt6.QtGui',
'qPixelFormatHsl': 'PyQt6.QtGui',
'qPixelFormatHsv': 'PyQt6.QtGui',
'qPixelFormatRgba': 'PyQt6.QtGui',
'qPixelFormatYuv': 'PyQt6.QtGui',
'qPremultiply': 'PyQt6.QtGui',
'qQNaN': 'PyQt6.QtCore',
'qRed': 'PyQt6.QtGui',
'qRegisterResourceData': 'PyQt6.QtCore',
'qRemovePostRoutine': 'PyQt6.QtCore',
'qRgb': 'PyQt6.QtGui',
'qRgba': 'PyQt6.QtGui',
'qRgba64': 'PyQt6.QtGui',
'qRound': 'PyQt6.QtCore',
'qRound64': 'PyQt6.QtCore',
'qSNaN': 'PyQt6.QtCore',
'qSetFieldWidth': 'PyQt6.QtCore',
'qSetMessagePattern': 'PyQt6.QtCore',
'qSetPadChar': 'PyQt6.QtCore',
'qSetRealNumberPrecision': 'PyQt6.QtCore',
'qUncompress': 'PyQt6.QtCore',
'qUnpremultiply': 'PyQt6.QtGui',
'qUnregisterResourceData': 'PyQt6.QtCore',
'qVersion': 'PyQt6.QtCore',
'qWarning': 'PyQt6.QtCore',
'qYieldCpu': 'PyQt6.QtCore',
'qt_set_sequence_auto_mnemonic': 'PyQt6.QtGui',
'sip': 'PyQt6.sip'}
module_names = frozenset(('QtCore',
'QtGui',
'QtWidgets',
'QtNetwork',
'QtSvg',
'QtPrintSupport',
'QtOpenGL',
'QtOpenGLWidgets',
'QtQuick',
'QtMultimedia',
'QtMultimediaWidgets',
'QtTextToSpeech',
'sip')
)
| 28,190 | Python | .py | 784 | 33.961735 | 53 | 0.743852 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,827 | __main__.py | kovidgoyal_calibre/src/qt/__main__.py | #!/usr/bin/env python
# vim:fileencoding=utf-8
# License: GPL v3 Copyright: 2021, Kovid Goyal <kovid at kovidgoyal.net>
import importlib
import os
from pprint import pprint
QT_WRAPPER = 'PyQt6'
base = os.path.dirname(os.path.abspath(__file__))
module_lists = {
'core': (
'QtCore',
'QtGui',
'QtWidgets',
'QtNetwork',
'QtSvg',
'QtPrintSupport',
'QtOpenGL',
'QtOpenGLWidgets',
'QtQuick',
'QtMultimedia',
'QtMultimediaWidgets',
'QtTextToSpeech',
),
'webengine': (
'QtWebEngineCore',
'QtWebEngineWidgets',
),
'dbus': (
'QtDBus',
)
}
def scan(name):
module_names = module_lists[name]
name_map = {}
types = []
for mod_name in module_names:
mod = importlib.import_module(f'{QT_WRAPPER}.{mod_name}')
full_name = name_map[mod_name] = mod.__name__
types.append(f'import {full_name}')
for obj_name in sorted(dir(mod)):
if not obj_name.startswith('_') and obj_name not in name_map:
name_map[obj_name] = full_name
types.append(f'{obj_name} = {full_name}.{obj_name}')
with open(f'{base}/{name}.py', 'w') as f:
print('# autogenerated by __main__.py do not edit', file=f)
print(f'from .{name}_name_map import module_names, name_map', file=f)
print('''from .loader import dynamic_load
already_imported = {}
qt_modules = {}
def __getattr__(name):
return dynamic_load(name, name_map, already_imported, qt_modules, module_names)
''', file=f)
with open(f'{base}/{name}.pyi', 'w') as f:
print('# autogenerated by __main__.py do not edit', file=f)
f.write('\n'.join(types))
if name == 'core':
module_names += ('sip',)
mod = importlib.import_module(f'{QT_WRAPPER}.sip')
name_map['sip'] = mod.__name__
with open(f'{base}/{name}_name_map.py', 'w') as f:
print('# autogenerated by __main__.py do not edit', file=f)
print('name_map =', end=' ', file=f)
pprint(name_map, stream=f)
print('module_names = frozenset(', end='', file=f)
pprint(module_names, stream=f)
print(')', file=f)
top_level_module_names = ()
for name in module_lists.keys():
top_level_module_names += module_lists[name]
scan(name)
with open(f'{base}/__init__.py', 'w') as f:
print('# autogenerated by __main__.py do not edit', file=f)
print(f'{top_level_module_names=}', file=f)
print(f'''
def __getattr__(name):
if name in top_level_module_names:
import importlib
return importlib.import_module("{QT_WRAPPER}." + name)
raise AttributeError(name)
''', file=f)
| 2,720 | Python | .py | 80 | 27.725 | 83 | 0.592395 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,828 | dbus.pyi | kovidgoyal_calibre/src/qt/dbus.pyi | # autogenerated by __main__.py do not edit
import PyQt6.QtDBus
QDBus = PyQt6.QtDBus.QDBus
QDBusAbstractAdaptor = PyQt6.QtDBus.QDBusAbstractAdaptor
QDBusAbstractInterface = PyQt6.QtDBus.QDBusAbstractInterface
QDBusArgument = PyQt6.QtDBus.QDBusArgument
QDBusConnection = PyQt6.QtDBus.QDBusConnection
QDBusConnectionInterface = PyQt6.QtDBus.QDBusConnectionInterface
QDBusError = PyQt6.QtDBus.QDBusError
QDBusInterface = PyQt6.QtDBus.QDBusInterface
QDBusMessage = PyQt6.QtDBus.QDBusMessage
QDBusObjectPath = PyQt6.QtDBus.QDBusObjectPath
QDBusPendingCall = PyQt6.QtDBus.QDBusPendingCall
QDBusPendingCallWatcher = PyQt6.QtDBus.QDBusPendingCallWatcher
QDBusPendingReply = PyQt6.QtDBus.QDBusPendingReply
QDBusReply = PyQt6.QtDBus.QDBusReply
QDBusServiceWatcher = PyQt6.QtDBus.QDBusServiceWatcher
QDBusSignature = PyQt6.QtDBus.QDBusSignature
QDBusUnixFileDescriptor = PyQt6.QtDBus.QDBusUnixFileDescriptor
QDBusVariant = PyQt6.QtDBus.QDBusVariant | 936 | Python | .py | 20 | 45.85 | 64 | 0.886587 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,829 | webengine_name_map.py | kovidgoyal_calibre/src/qt/webengine_name_map.py | # autogenerated by __main__.py do not edit
name_map = {'PYQT_WEBENGINE_VERSION': 'PyQt6.QtWebEngineCore',
'PYQT_WEBENGINE_VERSION_STR': 'PyQt6.QtWebEngineCore',
'QWebEngineCertificateError': 'PyQt6.QtWebEngineCore',
'QWebEngineClientCertificateSelection': 'PyQt6.QtWebEngineCore',
'QWebEngineClientCertificateStore': 'PyQt6.QtWebEngineCore',
'QWebEngineContextMenuRequest': 'PyQt6.QtWebEngineCore',
'QWebEngineCookieStore': 'PyQt6.QtWebEngineCore',
'QWebEngineDesktopMediaRequest': 'PyQt6.QtWebEngineCore',
'QWebEngineDownloadRequest': 'PyQt6.QtWebEngineCore',
'QWebEngineFileSystemAccessRequest': 'PyQt6.QtWebEngineCore',
'QWebEngineFindTextResult': 'PyQt6.QtWebEngineCore',
'QWebEngineFullScreenRequest': 'PyQt6.QtWebEngineCore',
'QWebEngineGlobalSettings': 'PyQt6.QtWebEngineCore',
'QWebEngineHistory': 'PyQt6.QtWebEngineCore',
'QWebEngineHistoryItem': 'PyQt6.QtWebEngineCore',
'QWebEngineHistoryModel': 'PyQt6.QtWebEngineCore',
'QWebEngineHttpRequest': 'PyQt6.QtWebEngineCore',
'QWebEngineLoadingInfo': 'PyQt6.QtWebEngineCore',
'QWebEngineNavigationRequest': 'PyQt6.QtWebEngineCore',
'QWebEngineNewWindowRequest': 'PyQt6.QtWebEngineCore',
'QWebEngineNotification': 'PyQt6.QtWebEngineCore',
'QWebEnginePage': 'PyQt6.QtWebEngineCore',
'QWebEngineProfile': 'PyQt6.QtWebEngineCore',
'QWebEngineQuotaRequest': 'PyQt6.QtWebEngineCore',
'QWebEngineRegisterProtocolHandlerRequest': 'PyQt6.QtWebEngineCore',
'QWebEngineScript': 'PyQt6.QtWebEngineCore',
'QWebEngineScriptCollection': 'PyQt6.QtWebEngineCore',
'QWebEngineSettings': 'PyQt6.QtWebEngineCore',
'QWebEngineUrlRequestInfo': 'PyQt6.QtWebEngineCore',
'QWebEngineUrlRequestInterceptor': 'PyQt6.QtWebEngineCore',
'QWebEngineUrlRequestJob': 'PyQt6.QtWebEngineCore',
'QWebEngineUrlScheme': 'PyQt6.QtWebEngineCore',
'QWebEngineUrlSchemeHandler': 'PyQt6.QtWebEngineCore',
'QWebEngineView': 'PyQt6.QtWebEngineWidgets',
'QWebEngineWebAuthPinRequest': 'PyQt6.QtWebEngineCore',
'QWebEngineWebAuthUxRequest': 'PyQt6.QtWebEngineCore',
'QtWebEngineCore': 'PyQt6.QtWebEngineCore',
'QtWebEngineWidgets': 'PyQt6.QtWebEngineWidgets',
'qWebEngineChromiumSecurityPatchVersion': 'PyQt6.QtWebEngineCore',
'qWebEngineChromiumVersion': 'PyQt6.QtWebEngineCore',
'qWebEngineGetDomainAndRegistry': 'PyQt6.QtWebEngineCore',
'qWebEngineProcessName': 'PyQt6.QtWebEngineCore',
'qWebEngineVersion': 'PyQt6.QtWebEngineCore'}
module_names = frozenset(('QtWebEngineCore', 'QtWebEngineWidgets')
)
| 2,456 | Python | .py | 46 | 51.478261 | 69 | 0.824896 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,830 | dbus.py | kovidgoyal_calibre/src/qt/dbus.py | # autogenerated by __main__.py do not edit
from .dbus_name_map import module_names, name_map
from .loader import dynamic_load
already_imported = {}
qt_modules = {}
def __getattr__(name):
return dynamic_load(name, name_map, already_imported, qt_modules, module_names)
| 273 | Python | .py | 7 | 37.142857 | 83 | 0.746212 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,831 | dbus_name_map.py | kovidgoyal_calibre/src/qt/dbus_name_map.py | # autogenerated by __main__.py do not edit
name_map = {'QDBus': 'PyQt6.QtDBus',
'QDBusAbstractAdaptor': 'PyQt6.QtDBus',
'QDBusAbstractInterface': 'PyQt6.QtDBus',
'QDBusArgument': 'PyQt6.QtDBus',
'QDBusConnection': 'PyQt6.QtDBus',
'QDBusConnectionInterface': 'PyQt6.QtDBus',
'QDBusError': 'PyQt6.QtDBus',
'QDBusInterface': 'PyQt6.QtDBus',
'QDBusMessage': 'PyQt6.QtDBus',
'QDBusObjectPath': 'PyQt6.QtDBus',
'QDBusPendingCall': 'PyQt6.QtDBus',
'QDBusPendingCallWatcher': 'PyQt6.QtDBus',
'QDBusPendingReply': 'PyQt6.QtDBus',
'QDBusReply': 'PyQt6.QtDBus',
'QDBusServiceWatcher': 'PyQt6.QtDBus',
'QDBusSignature': 'PyQt6.QtDBus',
'QDBusUnixFileDescriptor': 'PyQt6.QtDBus',
'QDBusVariant': 'PyQt6.QtDBus',
'QtDBus': 'PyQt6.QtDBus'}
module_names = frozenset(('QtDBus',)
)
| 782 | Python | .py | 22 | 33.727273 | 44 | 0.739474 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,832 | loader.py | kovidgoyal_calibre/src/qt/loader.py | #!/usr/bin/env python
# vim:fileencoding=utf-8
# License: GPL v3 Copyright: 2021, Kovid Goyal <kovid at kovidgoyal.net>
import sys
from importlib import import_module
def dynamic_load(name, name_map, already_imported, qt_modules, module_names=()):
ans = already_imported.get(name, already_imported)
if ans is not already_imported:
return ans
mod_name = name_map.get(name)
if mod_name is not None:
mod = qt_modules.get(mod_name)
if mod is None:
try:
mod = qt_modules[mod_name] = import_module(mod_name)
except ImportError as err:
mod = qt_modules[mod_name] = False
print(
'Failed to import PyQt module:',
mod_name,
'with error:',
err,
file=sys.stderr
)
if mod is not False:
if name in module_names:
q = mod
else:
q = getattr(mod, name, qt_modules)
if q is not qt_modules:
already_imported[name] = q
return q
raise AttributeError(f'The object {name} is not a known Qt object')
| 1,218 | Python | .py | 33 | 25.515152 | 80 | 0.545685 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,833 | core.pyi | kovidgoyal_calibre/src/qt/core.pyi | # autogenerated by __main__.py do not edit
import PyQt6.QtCore
PYQT_VERSION = PyQt6.QtCore.PYQT_VERSION
PYQT_VERSION_STR = PyQt6.QtCore.PYQT_VERSION_STR
QAbstractAnimation = PyQt6.QtCore.QAbstractAnimation
QAbstractEventDispatcher = PyQt6.QtCore.QAbstractEventDispatcher
QAbstractItemModel = PyQt6.QtCore.QAbstractItemModel
QAbstractListModel = PyQt6.QtCore.QAbstractListModel
QAbstractNativeEventFilter = PyQt6.QtCore.QAbstractNativeEventFilter
QAbstractProxyModel = PyQt6.QtCore.QAbstractProxyModel
QAbstractTableModel = PyQt6.QtCore.QAbstractTableModel
QAnimationGroup = PyQt6.QtCore.QAnimationGroup
QBasicTimer = PyQt6.QtCore.QBasicTimer
QBitArray = PyQt6.QtCore.QBitArray
QBluetoothPermission = PyQt6.QtCore.QBluetoothPermission
QBuffer = PyQt6.QtCore.QBuffer
QByteArray = PyQt6.QtCore.QByteArray
QByteArrayMatcher = PyQt6.QtCore.QByteArrayMatcher
QCalendar = PyQt6.QtCore.QCalendar
QCalendarPermission = PyQt6.QtCore.QCalendarPermission
QCameraPermission = PyQt6.QtCore.QCameraPermission
QCborError = PyQt6.QtCore.QCborError
QCborKnownTags = PyQt6.QtCore.QCborKnownTags
QCborSimpleType = PyQt6.QtCore.QCborSimpleType
QCborStreamReader = PyQt6.QtCore.QCborStreamReader
QCborStreamWriter = PyQt6.QtCore.QCborStreamWriter
QChildEvent = PyQt6.QtCore.QChildEvent
QCollator = PyQt6.QtCore.QCollator
QCollatorSortKey = PyQt6.QtCore.QCollatorSortKey
QCommandLineOption = PyQt6.QtCore.QCommandLineOption
QCommandLineParser = PyQt6.QtCore.QCommandLineParser
QConcatenateTablesProxyModel = PyQt6.QtCore.QConcatenateTablesProxyModel
QContactsPermission = PyQt6.QtCore.QContactsPermission
QCoreApplication = PyQt6.QtCore.QCoreApplication
QCryptographicHash = PyQt6.QtCore.QCryptographicHash
QDataStream = PyQt6.QtCore.QDataStream
QDate = PyQt6.QtCore.QDate
QDateTime = PyQt6.QtCore.QDateTime
QDeadlineTimer = PyQt6.QtCore.QDeadlineTimer
QDir = PyQt6.QtCore.QDir
QDirIterator = PyQt6.QtCore.QDirIterator
QDynamicPropertyChangeEvent = PyQt6.QtCore.QDynamicPropertyChangeEvent
QEasingCurve = PyQt6.QtCore.QEasingCurve
QElapsedTimer = PyQt6.QtCore.QElapsedTimer
QEvent = PyQt6.QtCore.QEvent
QEventLoop = PyQt6.QtCore.QEventLoop
QEventLoopLocker = PyQt6.QtCore.QEventLoopLocker
QFile = PyQt6.QtCore.QFile
QFileDevice = PyQt6.QtCore.QFileDevice
QFileInfo = PyQt6.QtCore.QFileInfo
QFileSelector = PyQt6.QtCore.QFileSelector
QFileSystemWatcher = PyQt6.QtCore.QFileSystemWatcher
QGenericArgument = PyQt6.QtCore.QGenericArgument
QGenericReturnArgument = PyQt6.QtCore.QGenericReturnArgument
QIODevice = PyQt6.QtCore.QIODevice
QIODeviceBase = PyQt6.QtCore.QIODeviceBase
QIdentityProxyModel = PyQt6.QtCore.QIdentityProxyModel
QItemSelection = PyQt6.QtCore.QItemSelection
QItemSelectionModel = PyQt6.QtCore.QItemSelectionModel
QItemSelectionRange = PyQt6.QtCore.QItemSelectionRange
QJsonDocument = PyQt6.QtCore.QJsonDocument
QJsonParseError = PyQt6.QtCore.QJsonParseError
QJsonValue = PyQt6.QtCore.QJsonValue
QKeyCombination = PyQt6.QtCore.QKeyCombination
QLibrary = PyQt6.QtCore.QLibrary
QLibraryInfo = PyQt6.QtCore.QLibraryInfo
QLine = PyQt6.QtCore.QLine
QLineF = PyQt6.QtCore.QLineF
QLocale = PyQt6.QtCore.QLocale
QLocationPermission = PyQt6.QtCore.QLocationPermission
QLockFile = PyQt6.QtCore.QLockFile
QLoggingCategory = PyQt6.QtCore.QLoggingCategory
QMargins = PyQt6.QtCore.QMargins
QMarginsF = PyQt6.QtCore.QMarginsF
QMessageAuthenticationCode = PyQt6.QtCore.QMessageAuthenticationCode
QMessageLogContext = PyQt6.QtCore.QMessageLogContext
QMessageLogger = PyQt6.QtCore.QMessageLogger
QMetaClassInfo = PyQt6.QtCore.QMetaClassInfo
QMetaEnum = PyQt6.QtCore.QMetaEnum
QMetaMethod = PyQt6.QtCore.QMetaMethod
QMetaObject = PyQt6.QtCore.QMetaObject
QMetaProperty = PyQt6.QtCore.QMetaProperty
QMetaType = PyQt6.QtCore.QMetaType
QMicrophonePermission = PyQt6.QtCore.QMicrophonePermission
QMimeData = PyQt6.QtCore.QMimeData
QMimeDatabase = PyQt6.QtCore.QMimeDatabase
QMimeType = PyQt6.QtCore.QMimeType
QModelIndex = PyQt6.QtCore.QModelIndex
QModelRoleData = PyQt6.QtCore.QModelRoleData
QModelRoleDataSpan = PyQt6.QtCore.QModelRoleDataSpan
QMutex = PyQt6.QtCore.QMutex
QMutexLocker = PyQt6.QtCore.QMutexLocker
QNativeIpcKey = PyQt6.QtCore.QNativeIpcKey
QObject = PyQt6.QtCore.QObject
QObjectCleanupHandler = PyQt6.QtCore.QObjectCleanupHandler
QOperatingSystemVersion = PyQt6.QtCore.QOperatingSystemVersion
QOperatingSystemVersionBase = PyQt6.QtCore.QOperatingSystemVersionBase
QParallelAnimationGroup = PyQt6.QtCore.QParallelAnimationGroup
QPauseAnimation = PyQt6.QtCore.QPauseAnimation
QPermission = PyQt6.QtCore.QPermission
QPersistentModelIndex = PyQt6.QtCore.QPersistentModelIndex
QPluginLoader = PyQt6.QtCore.QPluginLoader
QPoint = PyQt6.QtCore.QPoint
QPointF = PyQt6.QtCore.QPointF
QProcess = PyQt6.QtCore.QProcess
QProcessEnvironment = PyQt6.QtCore.QProcessEnvironment
QPropertyAnimation = PyQt6.QtCore.QPropertyAnimation
QRandomGenerator = PyQt6.QtCore.QRandomGenerator
QReadLocker = PyQt6.QtCore.QReadLocker
QReadWriteLock = PyQt6.QtCore.QReadWriteLock
QRect = PyQt6.QtCore.QRect
QRectF = PyQt6.QtCore.QRectF
QRecursiveMutex = PyQt6.QtCore.QRecursiveMutex
QRegularExpression = PyQt6.QtCore.QRegularExpression
QRegularExpressionMatch = PyQt6.QtCore.QRegularExpressionMatch
QRegularExpressionMatchIterator = PyQt6.QtCore.QRegularExpressionMatchIterator
QResource = PyQt6.QtCore.QResource
QRunnable = PyQt6.QtCore.QRunnable
QSaveFile = PyQt6.QtCore.QSaveFile
QSemaphore = PyQt6.QtCore.QSemaphore
QSemaphoreReleaser = PyQt6.QtCore.QSemaphoreReleaser
QSequentialAnimationGroup = PyQt6.QtCore.QSequentialAnimationGroup
QSettings = PyQt6.QtCore.QSettings
QSharedMemory = PyQt6.QtCore.QSharedMemory
QSignalBlocker = PyQt6.QtCore.QSignalBlocker
QSignalMapper = PyQt6.QtCore.QSignalMapper
QSize = PyQt6.QtCore.QSize
QSizeF = PyQt6.QtCore.QSizeF
QSocketNotifier = PyQt6.QtCore.QSocketNotifier
QSortFilterProxyModel = PyQt6.QtCore.QSortFilterProxyModel
QStandardPaths = PyQt6.QtCore.QStandardPaths
QStorageInfo = PyQt6.QtCore.QStorageInfo
QStringConverter = PyQt6.QtCore.QStringConverter
QStringConverterBase = PyQt6.QtCore.QStringConverterBase
QStringDecoder = PyQt6.QtCore.QStringDecoder
QStringEncoder = PyQt6.QtCore.QStringEncoder
QStringListModel = PyQt6.QtCore.QStringListModel
QSysInfo = PyQt6.QtCore.QSysInfo
QSystemSemaphore = PyQt6.QtCore.QSystemSemaphore
QT_TRANSLATE_NOOP = PyQt6.QtCore.QT_TRANSLATE_NOOP
QT_TR_NOOP = PyQt6.QtCore.QT_TR_NOOP
QT_VERSION = PyQt6.QtCore.QT_VERSION
QT_VERSION_STR = PyQt6.QtCore.QT_VERSION_STR
QTemporaryDir = PyQt6.QtCore.QTemporaryDir
QTemporaryFile = PyQt6.QtCore.QTemporaryFile
QTextBoundaryFinder = PyQt6.QtCore.QTextBoundaryFinder
QTextStream = PyQt6.QtCore.QTextStream
QTextStreamManipulator = PyQt6.QtCore.QTextStreamManipulator
QThread = PyQt6.QtCore.QThread
QThreadPool = PyQt6.QtCore.QThreadPool
QTime = PyQt6.QtCore.QTime
QTimeLine = PyQt6.QtCore.QTimeLine
QTimeZone = PyQt6.QtCore.QTimeZone
QTimer = PyQt6.QtCore.QTimer
QTimerEvent = PyQt6.QtCore.QTimerEvent
QTranslator = PyQt6.QtCore.QTranslator
QTransposeProxyModel = PyQt6.QtCore.QTransposeProxyModel
QTypeRevision = PyQt6.QtCore.QTypeRevision
QUrl = PyQt6.QtCore.QUrl
QUrlQuery = PyQt6.QtCore.QUrlQuery
QUuid = PyQt6.QtCore.QUuid
QVariant = PyQt6.QtCore.QVariant
QVariantAnimation = PyQt6.QtCore.QVariantAnimation
QVersionNumber = PyQt6.QtCore.QVersionNumber
QWaitCondition = PyQt6.QtCore.QWaitCondition
QWriteLocker = PyQt6.QtCore.QWriteLocker
QXmlStreamAttribute = PyQt6.QtCore.QXmlStreamAttribute
QXmlStreamAttributes = PyQt6.QtCore.QXmlStreamAttributes
QXmlStreamEntityDeclaration = PyQt6.QtCore.QXmlStreamEntityDeclaration
QXmlStreamEntityResolver = PyQt6.QtCore.QXmlStreamEntityResolver
QXmlStreamNamespaceDeclaration = PyQt6.QtCore.QXmlStreamNamespaceDeclaration
QXmlStreamNotationDeclaration = PyQt6.QtCore.QXmlStreamNotationDeclaration
QXmlStreamReader = PyQt6.QtCore.QXmlStreamReader
QXmlStreamWriter = PyQt6.QtCore.QXmlStreamWriter
Q_ARG = PyQt6.QtCore.Q_ARG
Q_RETURN_ARG = PyQt6.QtCore.Q_RETURN_ARG
Qt = PyQt6.QtCore.Qt
QtMsgType = PyQt6.QtCore.QtMsgType
pyqtBoundSignal = PyQt6.QtCore.pyqtBoundSignal
pyqtClassInfo = PyQt6.QtCore.pyqtClassInfo
pyqtEnum = PyQt6.QtCore.pyqtEnum
pyqtPickleProtocol = PyQt6.QtCore.pyqtPickleProtocol
pyqtProperty = PyQt6.QtCore.pyqtProperty
pyqtRemoveInputHook = PyQt6.QtCore.pyqtRemoveInputHook
pyqtRestoreInputHook = PyQt6.QtCore.pyqtRestoreInputHook
pyqtSetPickleProtocol = PyQt6.QtCore.pyqtSetPickleProtocol
pyqtSignal = PyQt6.QtCore.pyqtSignal
pyqtSlot = PyQt6.QtCore.pyqtSlot
qAbs = PyQt6.QtCore.qAbs
qAddPostRoutine = PyQt6.QtCore.qAddPostRoutine
qAddPreRoutine = PyQt6.QtCore.qAddPreRoutine
qChecksum = PyQt6.QtCore.qChecksum
qCompress = PyQt6.QtCore.qCompress
qCritical = PyQt6.QtCore.qCritical
qDebug = PyQt6.QtCore.qDebug
qEnvironmentVariable = PyQt6.QtCore.qEnvironmentVariable
qEnvironmentVariableIntValue = PyQt6.QtCore.qEnvironmentVariableIntValue
qEnvironmentVariableIsEmpty = PyQt6.QtCore.qEnvironmentVariableIsEmpty
qEnvironmentVariableIsSet = PyQt6.QtCore.qEnvironmentVariableIsSet
qFatal = PyQt6.QtCore.qFatal
qFloatDistance = PyQt6.QtCore.qFloatDistance
qFormatLogMessage = PyQt6.QtCore.qFormatLogMessage
qFuzzyCompare = PyQt6.QtCore.qFuzzyCompare
qFuzzyIsNull = PyQt6.QtCore.qFuzzyIsNull
qInf = PyQt6.QtCore.qInf
qInfo = PyQt6.QtCore.qInfo
qInstallMessageHandler = PyQt6.QtCore.qInstallMessageHandler
qIsFinite = PyQt6.QtCore.qIsFinite
qIsInf = PyQt6.QtCore.qIsInf
qIsNaN = PyQt6.QtCore.qIsNaN
qQNaN = PyQt6.QtCore.qQNaN
qRegisterResourceData = PyQt6.QtCore.qRegisterResourceData
qRemovePostRoutine = PyQt6.QtCore.qRemovePostRoutine
qRound = PyQt6.QtCore.qRound
qRound64 = PyQt6.QtCore.qRound64
qSNaN = PyQt6.QtCore.qSNaN
qSetFieldWidth = PyQt6.QtCore.qSetFieldWidth
qSetMessagePattern = PyQt6.QtCore.qSetMessagePattern
qSetPadChar = PyQt6.QtCore.qSetPadChar
qSetRealNumberPrecision = PyQt6.QtCore.qSetRealNumberPrecision
qUncompress = PyQt6.QtCore.qUncompress
qUnregisterResourceData = PyQt6.QtCore.qUnregisterResourceData
qVersion = PyQt6.QtCore.qVersion
qWarning = PyQt6.QtCore.qWarning
qYieldCpu = PyQt6.QtCore.qYieldCpu
import PyQt6.QtGui
QAbstractFileIconProvider = PyQt6.QtGui.QAbstractFileIconProvider
QAbstractTextDocumentLayout = PyQt6.QtGui.QAbstractTextDocumentLayout
QAction = PyQt6.QtGui.QAction
QActionEvent = PyQt6.QtGui.QActionEvent
QActionGroup = PyQt6.QtGui.QActionGroup
QBackingStore = PyQt6.QtGui.QBackingStore
QBitmap = PyQt6.QtGui.QBitmap
QBrush = PyQt6.QtGui.QBrush
QChildWindowEvent = PyQt6.QtGui.QChildWindowEvent
QClipboard = PyQt6.QtGui.QClipboard
QCloseEvent = PyQt6.QtGui.QCloseEvent
QColor = PyQt6.QtGui.QColor
QColorConstants = PyQt6.QtGui.QColorConstants
QColorSpace = PyQt6.QtGui.QColorSpace
QColorTransform = PyQt6.QtGui.QColorTransform
QConicalGradient = PyQt6.QtGui.QConicalGradient
QContextMenuEvent = PyQt6.QtGui.QContextMenuEvent
QCursor = PyQt6.QtGui.QCursor
QDesktopServices = PyQt6.QtGui.QDesktopServices
QDoubleValidator = PyQt6.QtGui.QDoubleValidator
QDrag = PyQt6.QtGui.QDrag
QDragEnterEvent = PyQt6.QtGui.QDragEnterEvent
QDragLeaveEvent = PyQt6.QtGui.QDragLeaveEvent
QDragMoveEvent = PyQt6.QtGui.QDragMoveEvent
QDropEvent = PyQt6.QtGui.QDropEvent
QEnterEvent = PyQt6.QtGui.QEnterEvent
QEventPoint = PyQt6.QtGui.QEventPoint
QExposeEvent = PyQt6.QtGui.QExposeEvent
QFileOpenEvent = PyQt6.QtGui.QFileOpenEvent
QFileSystemModel = PyQt6.QtGui.QFileSystemModel
QFocusEvent = PyQt6.QtGui.QFocusEvent
QFont = PyQt6.QtGui.QFont
QFontDatabase = PyQt6.QtGui.QFontDatabase
QFontInfo = PyQt6.QtGui.QFontInfo
QFontMetrics = PyQt6.QtGui.QFontMetrics
QFontMetricsF = PyQt6.QtGui.QFontMetricsF
QGlyphRun = PyQt6.QtGui.QGlyphRun
QGradient = PyQt6.QtGui.QGradient
QGuiApplication = PyQt6.QtGui.QGuiApplication
QHelpEvent = PyQt6.QtGui.QHelpEvent
QHideEvent = PyQt6.QtGui.QHideEvent
QHoverEvent = PyQt6.QtGui.QHoverEvent
QIcon = PyQt6.QtGui.QIcon
QIconDragEvent = PyQt6.QtGui.QIconDragEvent
QIconEngine = PyQt6.QtGui.QIconEngine
QImage = PyQt6.QtGui.QImage
QImageIOHandler = PyQt6.QtGui.QImageIOHandler
QImageReader = PyQt6.QtGui.QImageReader
QImageWriter = PyQt6.QtGui.QImageWriter
QInputDevice = PyQt6.QtGui.QInputDevice
QInputEvent = PyQt6.QtGui.QInputEvent
QInputMethod = PyQt6.QtGui.QInputMethod
QInputMethodEvent = PyQt6.QtGui.QInputMethodEvent
QInputMethodQueryEvent = PyQt6.QtGui.QInputMethodQueryEvent
QIntValidator = PyQt6.QtGui.QIntValidator
QKeyEvent = PyQt6.QtGui.QKeyEvent
QKeySequence = PyQt6.QtGui.QKeySequence
QLinearGradient = PyQt6.QtGui.QLinearGradient
QMatrix2x2 = PyQt6.QtGui.QMatrix2x2
QMatrix2x3 = PyQt6.QtGui.QMatrix2x3
QMatrix2x4 = PyQt6.QtGui.QMatrix2x4
QMatrix3x2 = PyQt6.QtGui.QMatrix3x2
QMatrix3x3 = PyQt6.QtGui.QMatrix3x3
QMatrix3x4 = PyQt6.QtGui.QMatrix3x4
QMatrix4x2 = PyQt6.QtGui.QMatrix4x2
QMatrix4x3 = PyQt6.QtGui.QMatrix4x3
QMatrix4x4 = PyQt6.QtGui.QMatrix4x4
QMouseEvent = PyQt6.QtGui.QMouseEvent
QMoveEvent = PyQt6.QtGui.QMoveEvent
QMovie = PyQt6.QtGui.QMovie
QNativeGestureEvent = PyQt6.QtGui.QNativeGestureEvent
QOffscreenSurface = PyQt6.QtGui.QOffscreenSurface
QOpenGLContext = PyQt6.QtGui.QOpenGLContext
QOpenGLContextGroup = PyQt6.QtGui.QOpenGLContextGroup
QPageLayout = PyQt6.QtGui.QPageLayout
QPageRanges = PyQt6.QtGui.QPageRanges
QPageSize = PyQt6.QtGui.QPageSize
QPagedPaintDevice = PyQt6.QtGui.QPagedPaintDevice
QPaintDevice = PyQt6.QtGui.QPaintDevice
QPaintDeviceWindow = PyQt6.QtGui.QPaintDeviceWindow
QPaintEngine = PyQt6.QtGui.QPaintEngine
QPaintEngineState = PyQt6.QtGui.QPaintEngineState
QPaintEvent = PyQt6.QtGui.QPaintEvent
QPainter = PyQt6.QtGui.QPainter
QPainterPath = PyQt6.QtGui.QPainterPath
QPainterPathStroker = PyQt6.QtGui.QPainterPathStroker
QPalette = PyQt6.QtGui.QPalette
QPdfWriter = PyQt6.QtGui.QPdfWriter
QPen = PyQt6.QtGui.QPen
QPicture = PyQt6.QtGui.QPicture
QPixelFormat = PyQt6.QtGui.QPixelFormat
QPixmap = PyQt6.QtGui.QPixmap
QPixmapCache = PyQt6.QtGui.QPixmapCache
QPlatformSurfaceEvent = PyQt6.QtGui.QPlatformSurfaceEvent
QPointerEvent = PyQt6.QtGui.QPointerEvent
QPointingDevice = PyQt6.QtGui.QPointingDevice
QPointingDeviceUniqueId = PyQt6.QtGui.QPointingDeviceUniqueId
QPolygon = PyQt6.QtGui.QPolygon
QPolygonF = PyQt6.QtGui.QPolygonF
QQuaternion = PyQt6.QtGui.QQuaternion
QRadialGradient = PyQt6.QtGui.QRadialGradient
QRasterWindow = PyQt6.QtGui.QRasterWindow
QRawFont = PyQt6.QtGui.QRawFont
QRegion = PyQt6.QtGui.QRegion
QRegularExpressionValidator = PyQt6.QtGui.QRegularExpressionValidator
QResizeEvent = PyQt6.QtGui.QResizeEvent
QRgba64 = PyQt6.QtGui.QRgba64
QScreen = PyQt6.QtGui.QScreen
QScrollEvent = PyQt6.QtGui.QScrollEvent
QScrollPrepareEvent = PyQt6.QtGui.QScrollPrepareEvent
QSessionManager = PyQt6.QtGui.QSessionManager
QShortcut = PyQt6.QtGui.QShortcut
QShortcutEvent = PyQt6.QtGui.QShortcutEvent
QShowEvent = PyQt6.QtGui.QShowEvent
QSinglePointEvent = PyQt6.QtGui.QSinglePointEvent
QStandardItem = PyQt6.QtGui.QStandardItem
QStandardItemModel = PyQt6.QtGui.QStandardItemModel
QStaticText = PyQt6.QtGui.QStaticText
QStatusTipEvent = PyQt6.QtGui.QStatusTipEvent
QStyleHints = PyQt6.QtGui.QStyleHints
QSurface = PyQt6.QtGui.QSurface
QSurfaceFormat = PyQt6.QtGui.QSurfaceFormat
QSyntaxHighlighter = PyQt6.QtGui.QSyntaxHighlighter
QTabletEvent = PyQt6.QtGui.QTabletEvent
QTextBlock = PyQt6.QtGui.QTextBlock
QTextBlockFormat = PyQt6.QtGui.QTextBlockFormat
QTextBlockGroup = PyQt6.QtGui.QTextBlockGroup
QTextBlockUserData = PyQt6.QtGui.QTextBlockUserData
QTextCharFormat = PyQt6.QtGui.QTextCharFormat
QTextCursor = PyQt6.QtGui.QTextCursor
QTextDocument = PyQt6.QtGui.QTextDocument
QTextDocumentFragment = PyQt6.QtGui.QTextDocumentFragment
QTextDocumentWriter = PyQt6.QtGui.QTextDocumentWriter
QTextFormat = PyQt6.QtGui.QTextFormat
QTextFragment = PyQt6.QtGui.QTextFragment
QTextFrame = PyQt6.QtGui.QTextFrame
QTextFrameFormat = PyQt6.QtGui.QTextFrameFormat
QTextImageFormat = PyQt6.QtGui.QTextImageFormat
QTextInlineObject = PyQt6.QtGui.QTextInlineObject
QTextItem = PyQt6.QtGui.QTextItem
QTextLayout = PyQt6.QtGui.QTextLayout
QTextLength = PyQt6.QtGui.QTextLength
QTextLine = PyQt6.QtGui.QTextLine
QTextList = PyQt6.QtGui.QTextList
QTextListFormat = PyQt6.QtGui.QTextListFormat
QTextObject = PyQt6.QtGui.QTextObject
QTextObjectInterface = PyQt6.QtGui.QTextObjectInterface
QTextOption = PyQt6.QtGui.QTextOption
QTextTable = PyQt6.QtGui.QTextTable
QTextTableCell = PyQt6.QtGui.QTextTableCell
QTextTableCellFormat = PyQt6.QtGui.QTextTableCellFormat
QTextTableFormat = PyQt6.QtGui.QTextTableFormat
QTouchEvent = PyQt6.QtGui.QTouchEvent
QTransform = PyQt6.QtGui.QTransform
QUndoCommand = PyQt6.QtGui.QUndoCommand
QUndoGroup = PyQt6.QtGui.QUndoGroup
QUndoStack = PyQt6.QtGui.QUndoStack
QValidator = PyQt6.QtGui.QValidator
QVector2D = PyQt6.QtGui.QVector2D
QVector3D = PyQt6.QtGui.QVector3D
QVector4D = PyQt6.QtGui.QVector4D
QWhatsThisClickedEvent = PyQt6.QtGui.QWhatsThisClickedEvent
QWheelEvent = PyQt6.QtGui.QWheelEvent
QWindow = PyQt6.QtGui.QWindow
QWindowStateChangeEvent = PyQt6.QtGui.QWindowStateChangeEvent
qAlpha = PyQt6.QtGui.qAlpha
qBlue = PyQt6.QtGui.qBlue
qGray = PyQt6.QtGui.qGray
qGreen = PyQt6.QtGui.qGreen
qPixelFormatAlpha = PyQt6.QtGui.qPixelFormatAlpha
qPixelFormatCmyk = PyQt6.QtGui.qPixelFormatCmyk
qPixelFormatGrayscale = PyQt6.QtGui.qPixelFormatGrayscale
qPixelFormatHsl = PyQt6.QtGui.qPixelFormatHsl
qPixelFormatHsv = PyQt6.QtGui.qPixelFormatHsv
qPixelFormatRgba = PyQt6.QtGui.qPixelFormatRgba
qPixelFormatYuv = PyQt6.QtGui.qPixelFormatYuv
qPremultiply = PyQt6.QtGui.qPremultiply
qRed = PyQt6.QtGui.qRed
qRgb = PyQt6.QtGui.qRgb
qRgba = PyQt6.QtGui.qRgba
qRgba64 = PyQt6.QtGui.qRgba64
qUnpremultiply = PyQt6.QtGui.qUnpremultiply
qt_set_sequence_auto_mnemonic = PyQt6.QtGui.qt_set_sequence_auto_mnemonic
import PyQt6.QtWidgets
QAbstractButton = PyQt6.QtWidgets.QAbstractButton
QAbstractGraphicsShapeItem = PyQt6.QtWidgets.QAbstractGraphicsShapeItem
QAbstractItemDelegate = PyQt6.QtWidgets.QAbstractItemDelegate
QAbstractItemView = PyQt6.QtWidgets.QAbstractItemView
QAbstractScrollArea = PyQt6.QtWidgets.QAbstractScrollArea
QAbstractSlider = PyQt6.QtWidgets.QAbstractSlider
QAbstractSpinBox = PyQt6.QtWidgets.QAbstractSpinBox
QApplication = PyQt6.QtWidgets.QApplication
QBoxLayout = PyQt6.QtWidgets.QBoxLayout
QButtonGroup = PyQt6.QtWidgets.QButtonGroup
QCalendarWidget = PyQt6.QtWidgets.QCalendarWidget
QCheckBox = PyQt6.QtWidgets.QCheckBox
QColorDialog = PyQt6.QtWidgets.QColorDialog
QColumnView = PyQt6.QtWidgets.QColumnView
QComboBox = PyQt6.QtWidgets.QComboBox
QCommandLinkButton = PyQt6.QtWidgets.QCommandLinkButton
QCommonStyle = PyQt6.QtWidgets.QCommonStyle
QCompleter = PyQt6.QtWidgets.QCompleter
QDataWidgetMapper = PyQt6.QtWidgets.QDataWidgetMapper
QDateEdit = PyQt6.QtWidgets.QDateEdit
QDateTimeEdit = PyQt6.QtWidgets.QDateTimeEdit
QDial = PyQt6.QtWidgets.QDial
QDialog = PyQt6.QtWidgets.QDialog
QDialogButtonBox = PyQt6.QtWidgets.QDialogButtonBox
QDockWidget = PyQt6.QtWidgets.QDockWidget
QDoubleSpinBox = PyQt6.QtWidgets.QDoubleSpinBox
QErrorMessage = PyQt6.QtWidgets.QErrorMessage
QFileDialog = PyQt6.QtWidgets.QFileDialog
QFileIconProvider = PyQt6.QtWidgets.QFileIconProvider
QFocusFrame = PyQt6.QtWidgets.QFocusFrame
QFontComboBox = PyQt6.QtWidgets.QFontComboBox
QFontDialog = PyQt6.QtWidgets.QFontDialog
QFormLayout = PyQt6.QtWidgets.QFormLayout
QFrame = PyQt6.QtWidgets.QFrame
QGesture = PyQt6.QtWidgets.QGesture
QGestureEvent = PyQt6.QtWidgets.QGestureEvent
QGestureRecognizer = PyQt6.QtWidgets.QGestureRecognizer
QGraphicsAnchor = PyQt6.QtWidgets.QGraphicsAnchor
QGraphicsAnchorLayout = PyQt6.QtWidgets.QGraphicsAnchorLayout
QGraphicsBlurEffect = PyQt6.QtWidgets.QGraphicsBlurEffect
QGraphicsColorizeEffect = PyQt6.QtWidgets.QGraphicsColorizeEffect
QGraphicsDropShadowEffect = PyQt6.QtWidgets.QGraphicsDropShadowEffect
QGraphicsEffect = PyQt6.QtWidgets.QGraphicsEffect
QGraphicsEllipseItem = PyQt6.QtWidgets.QGraphicsEllipseItem
QGraphicsGridLayout = PyQt6.QtWidgets.QGraphicsGridLayout
QGraphicsItem = PyQt6.QtWidgets.QGraphicsItem
QGraphicsItemGroup = PyQt6.QtWidgets.QGraphicsItemGroup
QGraphicsLayout = PyQt6.QtWidgets.QGraphicsLayout
QGraphicsLayoutItem = PyQt6.QtWidgets.QGraphicsLayoutItem
QGraphicsLineItem = PyQt6.QtWidgets.QGraphicsLineItem
QGraphicsLinearLayout = PyQt6.QtWidgets.QGraphicsLinearLayout
QGraphicsObject = PyQt6.QtWidgets.QGraphicsObject
QGraphicsOpacityEffect = PyQt6.QtWidgets.QGraphicsOpacityEffect
QGraphicsPathItem = PyQt6.QtWidgets.QGraphicsPathItem
QGraphicsPixmapItem = PyQt6.QtWidgets.QGraphicsPixmapItem
QGraphicsPolygonItem = PyQt6.QtWidgets.QGraphicsPolygonItem
QGraphicsProxyWidget = PyQt6.QtWidgets.QGraphicsProxyWidget
QGraphicsRectItem = PyQt6.QtWidgets.QGraphicsRectItem
QGraphicsRotation = PyQt6.QtWidgets.QGraphicsRotation
QGraphicsScale = PyQt6.QtWidgets.QGraphicsScale
QGraphicsScene = PyQt6.QtWidgets.QGraphicsScene
QGraphicsSceneContextMenuEvent = PyQt6.QtWidgets.QGraphicsSceneContextMenuEvent
QGraphicsSceneDragDropEvent = PyQt6.QtWidgets.QGraphicsSceneDragDropEvent
QGraphicsSceneEvent = PyQt6.QtWidgets.QGraphicsSceneEvent
QGraphicsSceneHelpEvent = PyQt6.QtWidgets.QGraphicsSceneHelpEvent
QGraphicsSceneHoverEvent = PyQt6.QtWidgets.QGraphicsSceneHoverEvent
QGraphicsSceneMouseEvent = PyQt6.QtWidgets.QGraphicsSceneMouseEvent
QGraphicsSceneMoveEvent = PyQt6.QtWidgets.QGraphicsSceneMoveEvent
QGraphicsSceneResizeEvent = PyQt6.QtWidgets.QGraphicsSceneResizeEvent
QGraphicsSceneWheelEvent = PyQt6.QtWidgets.QGraphicsSceneWheelEvent
QGraphicsSimpleTextItem = PyQt6.QtWidgets.QGraphicsSimpleTextItem
QGraphicsTextItem = PyQt6.QtWidgets.QGraphicsTextItem
QGraphicsTransform = PyQt6.QtWidgets.QGraphicsTransform
QGraphicsView = PyQt6.QtWidgets.QGraphicsView
QGraphicsWidget = PyQt6.QtWidgets.QGraphicsWidget
QGridLayout = PyQt6.QtWidgets.QGridLayout
QGroupBox = PyQt6.QtWidgets.QGroupBox
QHBoxLayout = PyQt6.QtWidgets.QHBoxLayout
QHeaderView = PyQt6.QtWidgets.QHeaderView
QInputDialog = PyQt6.QtWidgets.QInputDialog
QItemDelegate = PyQt6.QtWidgets.QItemDelegate
QItemEditorCreatorBase = PyQt6.QtWidgets.QItemEditorCreatorBase
QItemEditorFactory = PyQt6.QtWidgets.QItemEditorFactory
QKeySequenceEdit = PyQt6.QtWidgets.QKeySequenceEdit
QLCDNumber = PyQt6.QtWidgets.QLCDNumber
QLabel = PyQt6.QtWidgets.QLabel
QLayout = PyQt6.QtWidgets.QLayout
QLayoutItem = PyQt6.QtWidgets.QLayoutItem
QLineEdit = PyQt6.QtWidgets.QLineEdit
QListView = PyQt6.QtWidgets.QListView
QListWidget = PyQt6.QtWidgets.QListWidget
QListWidgetItem = PyQt6.QtWidgets.QListWidgetItem
QMainWindow = PyQt6.QtWidgets.QMainWindow
QMdiArea = PyQt6.QtWidgets.QMdiArea
QMdiSubWindow = PyQt6.QtWidgets.QMdiSubWindow
QMenu = PyQt6.QtWidgets.QMenu
QMenuBar = PyQt6.QtWidgets.QMenuBar
QMessageBox = PyQt6.QtWidgets.QMessageBox
QPanGesture = PyQt6.QtWidgets.QPanGesture
QPinchGesture = PyQt6.QtWidgets.QPinchGesture
QPlainTextDocumentLayout = PyQt6.QtWidgets.QPlainTextDocumentLayout
QPlainTextEdit = PyQt6.QtWidgets.QPlainTextEdit
QProgressBar = PyQt6.QtWidgets.QProgressBar
QProgressDialog = PyQt6.QtWidgets.QProgressDialog
QProxyStyle = PyQt6.QtWidgets.QProxyStyle
QPushButton = PyQt6.QtWidgets.QPushButton
QRadioButton = PyQt6.QtWidgets.QRadioButton
QRubberBand = PyQt6.QtWidgets.QRubberBand
QScrollArea = PyQt6.QtWidgets.QScrollArea
QScrollBar = PyQt6.QtWidgets.QScrollBar
QScroller = PyQt6.QtWidgets.QScroller
QScrollerProperties = PyQt6.QtWidgets.QScrollerProperties
QSizeGrip = PyQt6.QtWidgets.QSizeGrip
QSizePolicy = PyQt6.QtWidgets.QSizePolicy
QSlider = PyQt6.QtWidgets.QSlider
QSpacerItem = PyQt6.QtWidgets.QSpacerItem
QSpinBox = PyQt6.QtWidgets.QSpinBox
QSplashScreen = PyQt6.QtWidgets.QSplashScreen
QSplitter = PyQt6.QtWidgets.QSplitter
QSplitterHandle = PyQt6.QtWidgets.QSplitterHandle
QStackedLayout = PyQt6.QtWidgets.QStackedLayout
QStackedWidget = PyQt6.QtWidgets.QStackedWidget
QStatusBar = PyQt6.QtWidgets.QStatusBar
QStyle = PyQt6.QtWidgets.QStyle
QStyleFactory = PyQt6.QtWidgets.QStyleFactory
QStyleHintReturn = PyQt6.QtWidgets.QStyleHintReturn
QStyleHintReturnMask = PyQt6.QtWidgets.QStyleHintReturnMask
QStyleHintReturnVariant = PyQt6.QtWidgets.QStyleHintReturnVariant
QStyleOption = PyQt6.QtWidgets.QStyleOption
QStyleOptionButton = PyQt6.QtWidgets.QStyleOptionButton
QStyleOptionComboBox = PyQt6.QtWidgets.QStyleOptionComboBox
QStyleOptionComplex = PyQt6.QtWidgets.QStyleOptionComplex
QStyleOptionDockWidget = PyQt6.QtWidgets.QStyleOptionDockWidget
QStyleOptionFocusRect = PyQt6.QtWidgets.QStyleOptionFocusRect
QStyleOptionFrame = PyQt6.QtWidgets.QStyleOptionFrame
QStyleOptionGraphicsItem = PyQt6.QtWidgets.QStyleOptionGraphicsItem
QStyleOptionGroupBox = PyQt6.QtWidgets.QStyleOptionGroupBox
QStyleOptionHeader = PyQt6.QtWidgets.QStyleOptionHeader
QStyleOptionHeaderV2 = PyQt6.QtWidgets.QStyleOptionHeaderV2
QStyleOptionMenuItem = PyQt6.QtWidgets.QStyleOptionMenuItem
QStyleOptionProgressBar = PyQt6.QtWidgets.QStyleOptionProgressBar
QStyleOptionRubberBand = PyQt6.QtWidgets.QStyleOptionRubberBand
QStyleOptionSizeGrip = PyQt6.QtWidgets.QStyleOptionSizeGrip
QStyleOptionSlider = PyQt6.QtWidgets.QStyleOptionSlider
QStyleOptionSpinBox = PyQt6.QtWidgets.QStyleOptionSpinBox
QStyleOptionTab = PyQt6.QtWidgets.QStyleOptionTab
QStyleOptionTabBarBase = PyQt6.QtWidgets.QStyleOptionTabBarBase
QStyleOptionTabWidgetFrame = PyQt6.QtWidgets.QStyleOptionTabWidgetFrame
QStyleOptionTitleBar = PyQt6.QtWidgets.QStyleOptionTitleBar
QStyleOptionToolBar = PyQt6.QtWidgets.QStyleOptionToolBar
QStyleOptionToolBox = PyQt6.QtWidgets.QStyleOptionToolBox
QStyleOptionToolButton = PyQt6.QtWidgets.QStyleOptionToolButton
QStyleOptionViewItem = PyQt6.QtWidgets.QStyleOptionViewItem
QStylePainter = PyQt6.QtWidgets.QStylePainter
QStyledItemDelegate = PyQt6.QtWidgets.QStyledItemDelegate
QSwipeGesture = PyQt6.QtWidgets.QSwipeGesture
QSystemTrayIcon = PyQt6.QtWidgets.QSystemTrayIcon
QTabBar = PyQt6.QtWidgets.QTabBar
QTabWidget = PyQt6.QtWidgets.QTabWidget
QTableView = PyQt6.QtWidgets.QTableView
QTableWidget = PyQt6.QtWidgets.QTableWidget
QTableWidgetItem = PyQt6.QtWidgets.QTableWidgetItem
QTableWidgetSelectionRange = PyQt6.QtWidgets.QTableWidgetSelectionRange
QTapAndHoldGesture = PyQt6.QtWidgets.QTapAndHoldGesture
QTapGesture = PyQt6.QtWidgets.QTapGesture
QTextBrowser = PyQt6.QtWidgets.QTextBrowser
QTextEdit = PyQt6.QtWidgets.QTextEdit
QTimeEdit = PyQt6.QtWidgets.QTimeEdit
QToolBar = PyQt6.QtWidgets.QToolBar
QToolBox = PyQt6.QtWidgets.QToolBox
QToolButton = PyQt6.QtWidgets.QToolButton
QToolTip = PyQt6.QtWidgets.QToolTip
QTreeView = PyQt6.QtWidgets.QTreeView
QTreeWidget = PyQt6.QtWidgets.QTreeWidget
QTreeWidgetItem = PyQt6.QtWidgets.QTreeWidgetItem
QTreeWidgetItemIterator = PyQt6.QtWidgets.QTreeWidgetItemIterator
QUndoView = PyQt6.QtWidgets.QUndoView
QVBoxLayout = PyQt6.QtWidgets.QVBoxLayout
QWIDGETSIZE_MAX = PyQt6.QtWidgets.QWIDGETSIZE_MAX
QWhatsThis = PyQt6.QtWidgets.QWhatsThis
QWidget = PyQt6.QtWidgets.QWidget
QWidgetAction = PyQt6.QtWidgets.QWidgetAction
QWidgetItem = PyQt6.QtWidgets.QWidgetItem
QWizard = PyQt6.QtWidgets.QWizard
QWizardPage = PyQt6.QtWidgets.QWizardPage
qDrawBorderPixmap = PyQt6.QtWidgets.qDrawBorderPixmap
qDrawPlainRect = PyQt6.QtWidgets.qDrawPlainRect
qDrawPlainRoundedRect = PyQt6.QtWidgets.qDrawPlainRoundedRect
qDrawShadeLine = PyQt6.QtWidgets.qDrawShadeLine
qDrawShadePanel = PyQt6.QtWidgets.qDrawShadePanel
qDrawShadeRect = PyQt6.QtWidgets.qDrawShadeRect
qDrawWinButton = PyQt6.QtWidgets.qDrawWinButton
qDrawWinPanel = PyQt6.QtWidgets.qDrawWinPanel
import PyQt6.QtNetwork
QAbstractNetworkCache = PyQt6.QtNetwork.QAbstractNetworkCache
QAbstractSocket = PyQt6.QtNetwork.QAbstractSocket
QAuthenticator = PyQt6.QtNetwork.QAuthenticator
QDnsDomainNameRecord = PyQt6.QtNetwork.QDnsDomainNameRecord
QDnsHostAddressRecord = PyQt6.QtNetwork.QDnsHostAddressRecord
QDnsLookup = PyQt6.QtNetwork.QDnsLookup
QDnsMailExchangeRecord = PyQt6.QtNetwork.QDnsMailExchangeRecord
QDnsServiceRecord = PyQt6.QtNetwork.QDnsServiceRecord
QDnsTextRecord = PyQt6.QtNetwork.QDnsTextRecord
QHostAddress = PyQt6.QtNetwork.QHostAddress
QHostInfo = PyQt6.QtNetwork.QHostInfo
QHstsPolicy = PyQt6.QtNetwork.QHstsPolicy
QHttp1Configuration = PyQt6.QtNetwork.QHttp1Configuration
QHttp2Configuration = PyQt6.QtNetwork.QHttp2Configuration
QHttpHeaders = PyQt6.QtNetwork.QHttpHeaders
QHttpMultiPart = PyQt6.QtNetwork.QHttpMultiPart
QHttpPart = PyQt6.QtNetwork.QHttpPart
QLocalServer = PyQt6.QtNetwork.QLocalServer
QLocalSocket = PyQt6.QtNetwork.QLocalSocket
QNetworkAccessManager = PyQt6.QtNetwork.QNetworkAccessManager
QNetworkAddressEntry = PyQt6.QtNetwork.QNetworkAddressEntry
QNetworkCacheMetaData = PyQt6.QtNetwork.QNetworkCacheMetaData
QNetworkCookie = PyQt6.QtNetwork.QNetworkCookie
QNetworkCookieJar = PyQt6.QtNetwork.QNetworkCookieJar
QNetworkDatagram = PyQt6.QtNetwork.QNetworkDatagram
QNetworkDiskCache = PyQt6.QtNetwork.QNetworkDiskCache
QNetworkInformation = PyQt6.QtNetwork.QNetworkInformation
QNetworkInterface = PyQt6.QtNetwork.QNetworkInterface
QNetworkProxy = PyQt6.QtNetwork.QNetworkProxy
QNetworkProxyFactory = PyQt6.QtNetwork.QNetworkProxyFactory
QNetworkProxyQuery = PyQt6.QtNetwork.QNetworkProxyQuery
QNetworkReply = PyQt6.QtNetwork.QNetworkReply
QNetworkRequest = PyQt6.QtNetwork.QNetworkRequest
QOcspCertificateStatus = PyQt6.QtNetwork.QOcspCertificateStatus
QOcspResponse = PyQt6.QtNetwork.QOcspResponse
QOcspRevocationReason = PyQt6.QtNetwork.QOcspRevocationReason
QPasswordDigestor = PyQt6.QtNetwork.QPasswordDigestor
QSsl = PyQt6.QtNetwork.QSsl
QSslCertificate = PyQt6.QtNetwork.QSslCertificate
QSslCertificateExtension = PyQt6.QtNetwork.QSslCertificateExtension
QSslCipher = PyQt6.QtNetwork.QSslCipher
QSslConfiguration = PyQt6.QtNetwork.QSslConfiguration
QSslDiffieHellmanParameters = PyQt6.QtNetwork.QSslDiffieHellmanParameters
QSslEllipticCurve = PyQt6.QtNetwork.QSslEllipticCurve
QSslError = PyQt6.QtNetwork.QSslError
QSslKey = PyQt6.QtNetwork.QSslKey
QSslPreSharedKeyAuthenticator = PyQt6.QtNetwork.QSslPreSharedKeyAuthenticator
QSslServer = PyQt6.QtNetwork.QSslServer
QSslSocket = PyQt6.QtNetwork.QSslSocket
QTcpServer = PyQt6.QtNetwork.QTcpServer
QTcpSocket = PyQt6.QtNetwork.QTcpSocket
QUdpSocket = PyQt6.QtNetwork.QUdpSocket
import PyQt6.QtSvg
QSvgGenerator = PyQt6.QtSvg.QSvgGenerator
QSvgRenderer = PyQt6.QtSvg.QSvgRenderer
import PyQt6.QtPrintSupport
QAbstractPrintDialog = PyQt6.QtPrintSupport.QAbstractPrintDialog
QPageSetupDialog = PyQt6.QtPrintSupport.QPageSetupDialog
QPrintDialog = PyQt6.QtPrintSupport.QPrintDialog
QPrintEngine = PyQt6.QtPrintSupport.QPrintEngine
QPrintPreviewDialog = PyQt6.QtPrintSupport.QPrintPreviewDialog
QPrintPreviewWidget = PyQt6.QtPrintSupport.QPrintPreviewWidget
QPrinter = PyQt6.QtPrintSupport.QPrinter
QPrinterInfo = PyQt6.QtPrintSupport.QPrinterInfo
import PyQt6.QtOpenGL
QAbstractOpenGLFunctions = PyQt6.QtOpenGL.QAbstractOpenGLFunctions
QOpenGLBuffer = PyQt6.QtOpenGL.QOpenGLBuffer
QOpenGLDebugLogger = PyQt6.QtOpenGL.QOpenGLDebugLogger
QOpenGLDebugMessage = PyQt6.QtOpenGL.QOpenGLDebugMessage
QOpenGLFramebufferObject = PyQt6.QtOpenGL.QOpenGLFramebufferObject
QOpenGLFramebufferObjectFormat = PyQt6.QtOpenGL.QOpenGLFramebufferObjectFormat
QOpenGLFunctions_2_0 = PyQt6.QtOpenGL.QOpenGLFunctions_2_0
QOpenGLFunctions_2_1 = PyQt6.QtOpenGL.QOpenGLFunctions_2_1
QOpenGLFunctions_4_1_Core = PyQt6.QtOpenGL.QOpenGLFunctions_4_1_Core
QOpenGLPaintDevice = PyQt6.QtOpenGL.QOpenGLPaintDevice
QOpenGLPixelTransferOptions = PyQt6.QtOpenGL.QOpenGLPixelTransferOptions
QOpenGLShader = PyQt6.QtOpenGL.QOpenGLShader
QOpenGLShaderProgram = PyQt6.QtOpenGL.QOpenGLShaderProgram
QOpenGLTexture = PyQt6.QtOpenGL.QOpenGLTexture
QOpenGLTextureBlitter = PyQt6.QtOpenGL.QOpenGLTextureBlitter
QOpenGLTimeMonitor = PyQt6.QtOpenGL.QOpenGLTimeMonitor
QOpenGLTimerQuery = PyQt6.QtOpenGL.QOpenGLTimerQuery
QOpenGLVersionFunctionsFactory = PyQt6.QtOpenGL.QOpenGLVersionFunctionsFactory
QOpenGLVersionProfile = PyQt6.QtOpenGL.QOpenGLVersionProfile
QOpenGLVertexArrayObject = PyQt6.QtOpenGL.QOpenGLVertexArrayObject
QOpenGLWindow = PyQt6.QtOpenGL.QOpenGLWindow
import PyQt6.QtOpenGLWidgets
QOpenGLWidget = PyQt6.QtOpenGLWidgets.QOpenGLWidget
import PyQt6.QtQuick
QNativeInterface = PyQt6.QtQuick.QNativeInterface
QQuickAsyncImageProvider = PyQt6.QtQuick.QQuickAsyncImageProvider
QQuickCloseEvent = PyQt6.QtQuick.QQuickCloseEvent
QQuickFramebufferObject = PyQt6.QtQuick.QQuickFramebufferObject
QQuickGraphicsConfiguration = PyQt6.QtQuick.QQuickGraphicsConfiguration
QQuickGraphicsDevice = PyQt6.QtQuick.QQuickGraphicsDevice
QQuickImageProvider = PyQt6.QtQuick.QQuickImageProvider
QQuickImageResponse = PyQt6.QtQuick.QQuickImageResponse
QQuickItem = PyQt6.QtQuick.QQuickItem
QQuickItemGrabResult = PyQt6.QtQuick.QQuickItemGrabResult
QQuickPaintedItem = PyQt6.QtQuick.QQuickPaintedItem
QQuickRenderControl = PyQt6.QtQuick.QQuickRenderControl
QQuickRenderTarget = PyQt6.QtQuick.QQuickRenderTarget
QQuickTextDocument = PyQt6.QtQuick.QQuickTextDocument
QQuickTextureFactory = PyQt6.QtQuick.QQuickTextureFactory
QQuickView = PyQt6.QtQuick.QQuickView
QQuickWindow = PyQt6.QtQuick.QQuickWindow
QSGBasicGeometryNode = PyQt6.QtQuick.QSGBasicGeometryNode
QSGClipNode = PyQt6.QtQuick.QSGClipNode
QSGDynamicTexture = PyQt6.QtQuick.QSGDynamicTexture
QSGFlatColorMaterial = PyQt6.QtQuick.QSGFlatColorMaterial
QSGGeometry = PyQt6.QtQuick.QSGGeometry
QSGGeometryNode = PyQt6.QtQuick.QSGGeometryNode
QSGImageNode = PyQt6.QtQuick.QSGImageNode
QSGMaterial = PyQt6.QtQuick.QSGMaterial
QSGMaterialShader = PyQt6.QtQuick.QSGMaterialShader
QSGMaterialType = PyQt6.QtQuick.QSGMaterialType
QSGNode = PyQt6.QtQuick.QSGNode
QSGOpacityNode = PyQt6.QtQuick.QSGOpacityNode
QSGOpaqueTextureMaterial = PyQt6.QtQuick.QSGOpaqueTextureMaterial
QSGRectangleNode = PyQt6.QtQuick.QSGRectangleNode
QSGRenderNode = PyQt6.QtQuick.QSGRenderNode
QSGRendererInterface = PyQt6.QtQuick.QSGRendererInterface
QSGSimpleRectNode = PyQt6.QtQuick.QSGSimpleRectNode
QSGSimpleTextureNode = PyQt6.QtQuick.QSGSimpleTextureNode
QSGTextNode = PyQt6.QtQuick.QSGTextNode
QSGTexture = PyQt6.QtQuick.QSGTexture
QSGTextureMaterial = PyQt6.QtQuick.QSGTextureMaterial
QSGTextureProvider = PyQt6.QtQuick.QSGTextureProvider
QSGTransformNode = PyQt6.QtQuick.QSGTransformNode
QSGVertexColorMaterial = PyQt6.QtQuick.QSGVertexColorMaterial
import PyQt6.QtMultimedia
QAudio = PyQt6.QtMultimedia.QAudio
QAudioBuffer = PyQt6.QtMultimedia.QAudioBuffer
QAudioDecoder = PyQt6.QtMultimedia.QAudioDecoder
QAudioDevice = PyQt6.QtMultimedia.QAudioDevice
QAudioFormat = PyQt6.QtMultimedia.QAudioFormat
QAudioInput = PyQt6.QtMultimedia.QAudioInput
QAudioOutput = PyQt6.QtMultimedia.QAudioOutput
QAudioSink = PyQt6.QtMultimedia.QAudioSink
QAudioSource = PyQt6.QtMultimedia.QAudioSource
QCamera = PyQt6.QtMultimedia.QCamera
QCameraDevice = PyQt6.QtMultimedia.QCameraDevice
QCameraFormat = PyQt6.QtMultimedia.QCameraFormat
QCapturableWindow = PyQt6.QtMultimedia.QCapturableWindow
QImageCapture = PyQt6.QtMultimedia.QImageCapture
QMediaCaptureSession = PyQt6.QtMultimedia.QMediaCaptureSession
QMediaDevices = PyQt6.QtMultimedia.QMediaDevices
QMediaFormat = PyQt6.QtMultimedia.QMediaFormat
QMediaMetaData = PyQt6.QtMultimedia.QMediaMetaData
QMediaPlayer = PyQt6.QtMultimedia.QMediaPlayer
QMediaRecorder = PyQt6.QtMultimedia.QMediaRecorder
QMediaTimeRange = PyQt6.QtMultimedia.QMediaTimeRange
QScreenCapture = PyQt6.QtMultimedia.QScreenCapture
QSoundEffect = PyQt6.QtMultimedia.QSoundEffect
QVideoFrame = PyQt6.QtMultimedia.QVideoFrame
QVideoFrameFormat = PyQt6.QtMultimedia.QVideoFrameFormat
QVideoSink = PyQt6.QtMultimedia.QVideoSink
QWindowCapture = PyQt6.QtMultimedia.QWindowCapture
QtVideo = PyQt6.QtMultimedia.QtVideo
import PyQt6.QtMultimediaWidgets
QGraphicsVideoItem = PyQt6.QtMultimediaWidgets.QGraphicsVideoItem
QVideoWidget = PyQt6.QtMultimediaWidgets.QVideoWidget
import PyQt6.QtTextToSpeech
QTextToSpeech = PyQt6.QtTextToSpeech.QTextToSpeech
QVoice = PyQt6.QtTextToSpeech.QVoice | 35,459 | Python | .py | 769 | 45.111834 | 79 | 0.888559 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,834 | __init__.py | kovidgoyal_calibre/src/qt/__init__.py | # autogenerated by __main__.py do not edit
top_level_module_names=('QtCore', 'QtGui', 'QtWidgets', 'QtNetwork', 'QtSvg', 'QtPrintSupport', 'QtOpenGL', 'QtOpenGLWidgets', 'QtQuick', 'QtMultimedia', 'QtMultimediaWidgets', 'QtTextToSpeech', 'QtWebEngineCore', 'QtWebEngineWidgets', 'QtDBus')
def __getattr__(name):
if name in top_level_module_names:
import importlib
return importlib.import_module("PyQt6." + name)
raise AttributeError(name)
| 466 | Python | .py | 7 | 61.714286 | 245 | 0.714912 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,835 | webengine.pyi | kovidgoyal_calibre/src/qt/webengine.pyi | # autogenerated by __main__.py do not edit
import PyQt6.QtWebEngineCore
PYQT_WEBENGINE_VERSION = PyQt6.QtWebEngineCore.PYQT_WEBENGINE_VERSION
PYQT_WEBENGINE_VERSION_STR = PyQt6.QtWebEngineCore.PYQT_WEBENGINE_VERSION_STR
QWebEngineCertificateError = PyQt6.QtWebEngineCore.QWebEngineCertificateError
QWebEngineClientCertificateSelection = PyQt6.QtWebEngineCore.QWebEngineClientCertificateSelection
QWebEngineClientCertificateStore = PyQt6.QtWebEngineCore.QWebEngineClientCertificateStore
QWebEngineContextMenuRequest = PyQt6.QtWebEngineCore.QWebEngineContextMenuRequest
QWebEngineCookieStore = PyQt6.QtWebEngineCore.QWebEngineCookieStore
QWebEngineDesktopMediaRequest = PyQt6.QtWebEngineCore.QWebEngineDesktopMediaRequest
QWebEngineDownloadRequest = PyQt6.QtWebEngineCore.QWebEngineDownloadRequest
QWebEngineFileSystemAccessRequest = PyQt6.QtWebEngineCore.QWebEngineFileSystemAccessRequest
QWebEngineFindTextResult = PyQt6.QtWebEngineCore.QWebEngineFindTextResult
QWebEngineFullScreenRequest = PyQt6.QtWebEngineCore.QWebEngineFullScreenRequest
QWebEngineGlobalSettings = PyQt6.QtWebEngineCore.QWebEngineGlobalSettings
QWebEngineHistory = PyQt6.QtWebEngineCore.QWebEngineHistory
QWebEngineHistoryItem = PyQt6.QtWebEngineCore.QWebEngineHistoryItem
QWebEngineHistoryModel = PyQt6.QtWebEngineCore.QWebEngineHistoryModel
QWebEngineHttpRequest = PyQt6.QtWebEngineCore.QWebEngineHttpRequest
QWebEngineLoadingInfo = PyQt6.QtWebEngineCore.QWebEngineLoadingInfo
QWebEngineNavigationRequest = PyQt6.QtWebEngineCore.QWebEngineNavigationRequest
QWebEngineNewWindowRequest = PyQt6.QtWebEngineCore.QWebEngineNewWindowRequest
QWebEngineNotification = PyQt6.QtWebEngineCore.QWebEngineNotification
QWebEnginePage = PyQt6.QtWebEngineCore.QWebEnginePage
QWebEngineProfile = PyQt6.QtWebEngineCore.QWebEngineProfile
QWebEngineQuotaRequest = PyQt6.QtWebEngineCore.QWebEngineQuotaRequest
QWebEngineRegisterProtocolHandlerRequest = PyQt6.QtWebEngineCore.QWebEngineRegisterProtocolHandlerRequest
QWebEngineScript = PyQt6.QtWebEngineCore.QWebEngineScript
QWebEngineScriptCollection = PyQt6.QtWebEngineCore.QWebEngineScriptCollection
QWebEngineSettings = PyQt6.QtWebEngineCore.QWebEngineSettings
QWebEngineUrlRequestInfo = PyQt6.QtWebEngineCore.QWebEngineUrlRequestInfo
QWebEngineUrlRequestInterceptor = PyQt6.QtWebEngineCore.QWebEngineUrlRequestInterceptor
QWebEngineUrlRequestJob = PyQt6.QtWebEngineCore.QWebEngineUrlRequestJob
QWebEngineUrlScheme = PyQt6.QtWebEngineCore.QWebEngineUrlScheme
QWebEngineUrlSchemeHandler = PyQt6.QtWebEngineCore.QWebEngineUrlSchemeHandler
QWebEngineWebAuthPinRequest = PyQt6.QtWebEngineCore.QWebEngineWebAuthPinRequest
QWebEngineWebAuthUxRequest = PyQt6.QtWebEngineCore.QWebEngineWebAuthUxRequest
qWebEngineChromiumSecurityPatchVersion = PyQt6.QtWebEngineCore.qWebEngineChromiumSecurityPatchVersion
qWebEngineChromiumVersion = PyQt6.QtWebEngineCore.qWebEngineChromiumVersion
qWebEngineGetDomainAndRegistry = PyQt6.QtWebEngineCore.qWebEngineGetDomainAndRegistry
qWebEngineProcessName = PyQt6.QtWebEngineCore.qWebEngineProcessName
qWebEngineVersion = PyQt6.QtWebEngineCore.qWebEngineVersion
import PyQt6.QtWebEngineWidgets
QWebEngineView = PyQt6.QtWebEngineWidgets.QWebEngineView | 3,180 | Python | .py | 44 | 71.295455 | 105 | 0.926363 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,836 | core.py | kovidgoyal_calibre/src/qt/core.py | # autogenerated by __main__.py do not edit
from .core_name_map import module_names, name_map
from .loader import dynamic_load
already_imported = {}
qt_modules = {}
def __getattr__(name):
return dynamic_load(name, name_map, already_imported, qt_modules, module_names)
| 273 | Python | .py | 7 | 37.142857 | 83 | 0.746212 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,837 | webengine.py | kovidgoyal_calibre/src/qt/webengine.py | # autogenerated by __main__.py do not edit
from .webengine_name_map import module_names, name_map
from .loader import dynamic_load
already_imported = {}
qt_modules = {}
def __getattr__(name):
return dynamic_load(name, name_map, already_imported, qt_modules, module_names)
| 278 | Python | .py | 7 | 37.857143 | 83 | 0.750929 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,838 | token_data.py | kovidgoyal_calibre/src/tinycss/token_data.py | # coding: utf8
"""
tinycss.token_data
------------------
Shared data for both implementations (Cython and Python) of the tokenizer.
:copyright: (c) 2012 by Simon Sapin.
:license: BSD, see LICENSE for more details.
"""
import functools
import operator
import re
import string
import sys
# * Raw strings with the r'' notation are used so that \ do not need
# to be escaped.
# * Names and regexps are separated by a tabulation.
# * Macros are re-ordered so that only previous definitions are needed.
# * {} are used for macro substitution with ``string.Formatter``,
# so other uses of { or } have been doubled.
# * The syntax is otherwise compatible with re.compile.
# * Some parentheses were added to add capturing groups.
# (in unicode, DIMENSION and URI)
# *** Willful violation: ***
# Numbers can take a + or - sign, but the sign is a separate DELIM token.
# Since comments are allowed anywhere between tokens, this makes
# the following this is valid. It means 10 negative pixels:
# margin-top: -/**/10px
# This makes parsing numbers a pain, so instead we’ll do the same is Firefox
# and make the sign part as of the 'num' macro. The above CSS will be invalid.
# See discussion:
# http://lists.w3.org/Archives/Public/www-style/2011Oct/0028.html
MACROS = r'''
nl \n|\r\n|\r|\f
w [ \t\r\n\f]*
nonascii [^\0-\237]
unicode \\([0-9a-f]{{1,6}})(\r\n|[ \n\r\t\f])?
simple_escape [^\n\r\f0-9a-f]
escape {unicode}|\\{simple_escape}
nmstart [_a-z]|{nonascii}|{escape}
nmchar [_a-z0-9-]|{nonascii}|{escape}
name {nmchar}+
ident [-]?{nmstart}{nmchar}*
num [-+]?(?:[0-9]*\.[0-9]+|[0-9]+)
string1 \"([^\n\r\f\\"]|\\{nl}|{escape})*\"
string2 \'([^\n\r\f\\']|\\{nl}|{escape})*\'
string {string1}|{string2}
badstring1 \"([^\n\r\f\\"]|\\{nl}|{escape})*\\?
badstring2 \'([^\n\r\f\\']|\\{nl}|{escape})*\\?
badstring {badstring1}|{badstring2}
badcomment1 \/\*[^*]*\*+([^/*][^*]*\*+)*
badcomment2 \/\*[^*]*(\*+[^/*][^*]*)*
badcomment {badcomment1}|{badcomment2}
baduri1 url\({w}([!#$%&*-~]|{nonascii}|{escape})*{w}
baduri2 url\({w}{string}{w}
baduri3 url\({w}{badstring}
baduri {baduri1}|{baduri2}|{baduri3}
'''.replace(r'\0', '\0').replace(r'\237', '\237')
# Removed these tokens. Instead, they’re tokenized as two DELIM each.
# INCLUDES ~=
# DASHMATCH |=
# They are only used in selectors but selectors3 also have ^=, *= and $=.
# We don’t actually parse selectors anyway
# Re-ordered so that the longest match is always the first.
# For example, "url('foo')" matches URI, BAD_URI, FUNCTION and IDENT,
# but URI would always be a longer match than the others.
TOKENS = r'''
S [ \t\r\n\f]+
URI url\({w}({string}|([!#$%&*-\[\]-~]|{nonascii}|{escape})*){w}\)
BAD_URI {baduri}
FUNCTION {ident}\(
UNICODE-RANGE u\+[0-9a-f?]{{1,6}}(-[0-9a-f]{{1,6}})?
IDENT {ident}
ATKEYWORD @{ident}
HASH #{name}
DIMENSION ({num})({ident})
PERCENTAGE {num}%
NUMBER {num}
STRING {string}
BAD_STRING {badstring}
COMMENT \/\*[^*]*\*+([^/*][^*]*\*+)*\/
BAD_COMMENT {badcomment}
: :
; ;
{ \{{
} \}}
( \(
) \)
[ \[
] \]
CDO <!--
CDC -->
'''
# Strings with {macro} expanded
COMPILED_MACROS = {}
COMPILED_TOKEN_REGEXPS = [] # [(name, regexp.match)] ordered
COMPILED_TOKEN_INDEXES = {} # {name: i} helper for the C speedups
# Indexed by codepoint value of the first character of a token.
# Codepoints >= 160 (aka nonascii) all use the index 160.
# values are (i, name, regexp.match)
TOKEN_DISPATCH = []
try:
unichr
except NameError:
# Python 3
unichr = chr
unicode = str
def _init():
"""Import-time initialization."""
COMPILED_MACROS.clear()
for line in MACROS.splitlines():
if line.strip():
name, value = line.split('\t')
COMPILED_MACROS[name.strip()] = '(?:%s)' \
% value.format(**COMPILED_MACROS)
COMPILED_TOKEN_REGEXPS[:] = (
(
name.strip(),
re.compile(
value.format(**COMPILED_MACROS),
# Case-insensitive when matching eg. uRL(foo)
# but preserve the case in extracted groups
re.I
).match
)
for line in TOKENS.splitlines()
if line.strip()
for name, value in [line.split('\t')]
)
COMPILED_TOKEN_INDEXES.clear()
for i, (name, regexp) in enumerate(COMPILED_TOKEN_REGEXPS):
COMPILED_TOKEN_INDEXES[name] = i
dispatch = [[] for i in range(161)]
for chars, names in [
(' \t\r\n\f', ['S']),
('uU', ['URI', 'BAD_URI', 'UNICODE-RANGE']),
# \ is an escape outside of another token
(string.ascii_letters + '\\_-' + unichr(160), ['FUNCTION', 'IDENT']),
(string.digits + '.+-', ['DIMENSION', 'PERCENTAGE', 'NUMBER']),
('@', ['ATKEYWORD']),
('#', ['HASH']),
('\'"', ['STRING', 'BAD_STRING']),
('/', ['COMMENT', 'BAD_COMMENT']),
('<', ['CDO']),
('-', ['CDC']),
]:
for char in chars:
dispatch[ord(char)].extend(names)
for char in ':;{}()[]':
dispatch[ord(char)] = [char]
TOKEN_DISPATCH[:] = (
[
(index,) + COMPILED_TOKEN_REGEXPS[index]
for name in names
for index in [COMPILED_TOKEN_INDEXES[name]]
]
for names in dispatch
)
_init()
def _unicode_replace(match, int=int, unichr=unichr, maxunicode=sys.maxunicode):
codepoint = int(match.group(1), 16)
if codepoint <= maxunicode:
return unichr(codepoint)
else:
return '\N{REPLACEMENT CHARACTER}' # U+FFFD
UNICODE_UNESCAPE = functools.partial(
re.compile(COMPILED_MACROS['unicode'], re.I).sub,
_unicode_replace)
NEWLINE_UNESCAPE = functools.partial(
re.compile(r'()\\' + COMPILED_MACROS['nl']).sub,
'')
SIMPLE_UNESCAPE = functools.partial(
re.compile(r'\\(%s)' % COMPILED_MACROS['simple_escape'], re.I).sub,
# Same as r'\1', but faster on CPython
operator.methodcaller('group', 1))
def FIND_NEWLINES(x):
return list(re.compile(COMPILED_MACROS['nl']).finditer(x))
class Token:
r"""A single atomic token.
.. attribute:: is_container
Always ``False``.
Helps to tell :class:`Token` apart from :class:`ContainerToken`.
.. attribute:: type
The type of token as a string:
``S``
A sequence of white space
``IDENT``
An identifier: a name that does not start with a digit.
A name is a sequence of letters, digits, ``_``, ``-``, escaped
characters and non-ASCII characters. Eg: ``margin-left``
``HASH``
``#`` followed immediately by a name. Eg: ``#ff8800``
``ATKEYWORD``
``@`` followed immediately by an identifier. Eg: ``@page``
``URI``
Eg: ``url(foo)`` The content may or may not be quoted.
``UNICODE-RANGE``
``U+`` followed by one or two hexadecimal
Unicode codepoints. Eg: ``U+20-00FF``
``INTEGER``
An integer with an optional ``+`` or ``-`` sign
``NUMBER``
A non-integer number with an optional ``+`` or ``-`` sign
``DIMENSION``
An integer or number followed immediately by an
identifier (the unit). Eg: ``12px``
``PERCENTAGE``
An integer or number followed immediately by ``%``
``STRING``
A string, quoted with ``"`` or ``'``
``:`` or ``;``
That character.
``DELIM``
A single character not matched in another token. Eg: ``,``
See the source of the :mod:`.token_data` module for the precise
regular expressions that match various tokens.
Note that other token types exist in the early tokenization steps,
but these are ignored, are syntax errors, or are later transformed
into :class:`ContainerToken` or :class:`FunctionToken`.
.. attribute:: value
The parsed value:
* INTEGER, NUMBER, PERCENTAGE or DIMENSION tokens: the numeric value
as an int or float.
* STRING tokens: the unescaped string without quotes
* URI tokens: the unescaped URI without quotes or
``url(`` and ``)`` markers.
* IDENT, ATKEYWORD or HASH tokens: the unescaped token,
with ``@`` or ``#`` markers left as-is
* Other tokens: same as :attr:`as_css`
*Unescaped* refers to the various escaping methods based on the
backslash ``\`` character in CSS syntax.
.. attribute:: unit
* DIMENSION tokens: the normalized (unescaped, lower-case)
unit name as a string. eg. ``'px'``
* PERCENTAGE tokens: the string ``'%'``
* Other tokens: ``None``
.. attribute:: line
The line number in the CSS source of the start of this token.
.. attribute:: column
The column number (inside a source line) of the start of this token.
"""
is_container = False
__slots__ = 'type', '_as_css', 'value', 'unit', 'line', 'column'
def __init__(self, type_, css_value, value, unit, line, column):
self.type = type_
self._as_css = css_value
self.value = value
self.unit = unit
self.line = line
self.column = column
def as_css(self):
"""
Return as an Unicode string the CSS representation of the token,
as parsed in the source.
"""
return self._as_css
def __repr__(self):
return ('<Token {0.type} at {0.line}:{0.column} {0.value!r}{1}>'
.format(self, self.unit or ''))
class ContainerToken:
"""A token that contains other (nested) tokens.
.. attribute:: is_container
Always ``True``.
Helps to tell :class:`ContainerToken` apart from :class:`Token`.
.. attribute:: type
The type of token as a string. One of ``{``, ``(``, ``[`` or
``FUNCTION``. For ``FUNCTION``, the object is actually a
:class:`FunctionToken`.
.. attribute:: unit
Always ``None``. Included to make :class:`ContainerToken` behave
more like :class:`Token`.
.. attribute:: content
A list of :class:`Token` or nested :class:`ContainerToken`,
not including the opening or closing token.
.. attribute:: line
The line number in the CSS source of the start of this token.
.. attribute:: column
The column number (inside a source line) of the start of this token.
"""
is_container = True
unit = None
__slots__ = 'type', '_css_start', '_css_end', 'content', 'line', 'column'
def __init__(self, type_, css_start, css_end, content, line, column):
self.type = type_
self._css_start = css_start
self._css_end = css_end
self.content = content
self.line = line
self.column = column
def as_css(self):
"""
Return as an Unicode string the CSS representation of the token,
as parsed in the source.
"""
parts = [self._css_start]
parts.extend(token.as_css() for token in self.content)
parts.append(self._css_end)
return ''.join(parts)
format_string = '<ContainerToken {0.type} at {0.line}:{0.column}>'
def __repr__(self):
return (self.format_string + ' {0.content}').format(self)
class FunctionToken(ContainerToken):
"""A specialized :class:`ContainerToken` for a ``FUNCTION`` group.
Has an additional attribute:
.. attribute:: function_name
The unescaped name of the function, with the ``(`` marker removed.
"""
__slots__ = 'function_name',
def __init__(self, type_, css_start, css_end, function_name, content,
line, column):
super(FunctionToken, self).__init__(
type_, css_start, css_end, content, line, column)
# Remove the ( marker:
self.function_name = function_name[:-1]
format_string = ('<FunctionToken {0.function_name}() at '
'{0.line}:{0.column}>')
class TokenList(list):
"""
A mixed list of :class:`~.token_data.Token` and
:class:`~.token_data.ContainerToken` objects.
This is a subclass of the builtin :class:`~builtins.list` type.
It can be iterated, indexed and sliced as usual, but also has some
additional API:
"""
@property
def line(self):
"""The line number in the CSS source of the first token."""
return self[0].line
@property
def column(self):
"""The column number (inside a source line) of the first token."""
return self[0].column
def as_css(self):
"""
Return as an Unicode string the CSS representation of the tokens,
as parsed in the source.
"""
return ''.join(token.as_css() for token in self)
def load_c_tokenizer():
from calibre_extensions import tokenizer
tokens = list(':;(){}[]') + ['DELIM', 'INTEGER', 'STRING']
tokenizer.init(
COMPILED_TOKEN_REGEXPS, UNICODE_UNESCAPE, NEWLINE_UNESCAPE,
SIMPLE_UNESCAPE, FIND_NEWLINES, TOKEN_DISPATCH, COMPILED_TOKEN_INDEXES,
*tokens)
return tokenizer
| 13,353 | Python | .py | 345 | 31.744928 | 79 | 0.593357 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,839 | parsing.py | kovidgoyal_calibre/src/tinycss/parsing.py | # coding: utf8
"""
tinycss.parsing
---------------
Utilities for parsing lists of tokens.
:copyright: (c) 2012 by Simon Sapin.
:license: BSD, see LICENSE for more details.
"""
# TODO: unit tests
def split_on_comma(tokens):
"""Split a list of tokens on commas, ie ``,`` DELIM tokens.
Only "top-level" comma tokens are splitting points, not commas inside a
function or other :class:`ContainerToken`.
:param tokens:
An iterable of :class:`~.token_data.Token` or
:class:`~.token_data.ContainerToken`.
:returns:
A list of lists of tokens
"""
parts = []
this_part = []
for token in tokens:
if token.type == 'DELIM' and token.value == ',':
parts.append(this_part)
this_part = []
else:
this_part.append(token)
parts.append(this_part)
return parts
def strip_whitespace(tokens):
"""Remove whitespace at the beginning and end of a token list.
Whitespace tokens in-between other tokens in the list are preserved.
:param tokens:
A list of :class:`~.token_data.Token` or
:class:`~.token_data.ContainerToken`.
:return:
A new sub-sequence of the list.
"""
for i, token in enumerate(tokens):
if token.type != 'S':
break
else:
return [] # only whitespace
tokens = tokens[i:]
while tokens and tokens[-1].type == 'S':
tokens.pop()
return tokens
def remove_whitespace(tokens):
"""Remove any top-level whitespace in a token list.
Whitespace tokens inside recursive :class:`~.token_data.ContainerToken`
are preserved.
:param tokens:
A list of :class:`~.token_data.Token` or
:class:`~.token_data.ContainerToken`.
:return:
A new sub-sequence of the list.
"""
return [token for token in tokens if token.type != 'S']
def validate_value(tokens):
"""Validate a property value.
:param tokens:
an iterable of tokens
:raises:
:class:`ParseError` if there is any invalid token for the 'value'
production of the core grammar.
"""
for token in tokens:
type_ = token.type
if type_ == '{':
validate_block(token.content, 'property value')
else:
validate_any(token, 'property value')
def validate_block(tokens, context):
"""
:raises:
:class:`ParseError` if there is any invalid token for the 'block'
production of the core grammar.
:param tokens: an iterable of tokens
:param context: a string for the 'unexpected in ...' message
"""
for token in tokens:
type_ = token.type
if type_ == '{':
validate_block(token.content, context)
elif type_ not in (';', 'ATKEYWORD'):
validate_any(token, context)
def validate_any(token, context):
"""
:raises:
:class:`ParseError` if this is an invalid token for the
'any' production of the core grammar.
:param token: a single token
:param context: a string for the 'unexpected in ...' message
"""
type_ = token.type
if type_ in ('FUNCTION', '(', '['):
for token in token.content:
validate_any(token, type_)
elif type_ not in ('S', 'IDENT', 'DIMENSION', 'PERCENTAGE', 'NUMBER',
'INTEGER', 'URI', 'DELIM', 'STRING', 'HASH', ':',
'UNICODE-RANGE'):
if type_ in ('}', ')', ']'):
adjective = 'unmatched'
else:
adjective = 'unexpected'
raise ParseError(token,
'{0} {1} token in {2}'.format(adjective, type_, context))
class ParseError(ValueError):
"""Details about a CSS syntax error. Usually indicates that something
(a rule or a declaration) was ignored and will not appear as a parsed
object.
This exception is typically logged in a list rather than being propagated
to the user API.
.. attribute:: line
Source line where the error occurred.
.. attribute:: column
Column in the source line where the error occurred.
.. attribute:: reason
What happened (a string).
"""
def __init__(self, subject, reason):
self.line = subject.line
self.column = subject.column
self.reason = reason
super(ParseError, self).__init__(
'Parse error at {0.line}:{0.column}, {0.reason}'.format(self))
| 4,461 | Python | .py | 126 | 28.126984 | 77 | 0.609819 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,840 | color3.py | kovidgoyal_calibre/src/tinycss/color3.py | # coding: utf8
"""
tinycss.colors3
---------------
Parser for CSS 3 color values
http://www.w3.org/TR/css3-color/
This module does not provide anything that integrates in a parser class,
only functions that parse single tokens from (eg.) a property value.
:copyright: (c) 2012 by Simon Sapin.
:license: BSD, see LICENSE for more details.
"""
import collections
import itertools
import re
from .tokenizer import tokenize_grouped
class RGBA(collections.namedtuple('RGBA', ['red', 'green', 'blue', 'alpha'])):
"""An RGBA color.
A tuple of four floats in the 0..1 range: ``(r, g, b, a)``.
Also has ``red``, ``green``, ``blue`` and ``alpha`` attributes to access
the same values.
"""
def parse_color_string(css_string):
"""Parse a CSS string as a color value.
This is a convenience wrapper around :func:`parse_color` in case you
have a string that is not from a CSS stylesheet.
:param css_string:
An unicode string in CSS syntax.
:returns:
Same as :func:`parse_color`.
"""
tokens = list(tokenize_grouped(css_string.strip()))
if len(tokens) == 1:
return parse_color(tokens[0])
def parse_color(token):
"""Parse single token as a color value.
:param token:
A single :class:`~.token_data.Token` or
:class:`~.token_data.ContainerToken`, as found eg. in a
property value.
:returns:
* ``None``, if the token is not a valid CSS 3 color value.
(No exception is raised.)
* For the *currentColor* keyword: the string ``'currentColor'``
* Every other values (including keywords, HSL and HSLA) is converted
to RGBA and returned as an :class:`RGBA` object (a 4-tuple with
attribute access).
The alpha channel is clipped to [0, 1], but R, G, or B can be
out of range (eg. ``rgb(-51, 306, 0)`` is represented as
``(-.2, 1.2, 0, 1)``.)
"""
if token.type == 'IDENT':
return COLOR_KEYWORDS.get(token.value.lower())
elif token.type == 'HASH':
for multiplier, regexp in HASH_REGEXPS:
match = regexp(token.value)
if match:
r, g, b = [int(group * multiplier, 16) / 255
for group in match.groups()]
return RGBA(r, g, b, 1.)
elif token.type == 'FUNCTION':
args = parse_comma_separated(token.content)
if args:
name = token.function_name.lower()
if name == 'rgb':
return parse_rgb(args, alpha=1.)
elif name == 'rgba':
alpha = parse_alpha(args[3:])
if alpha is not None:
return parse_rgb(args[:3], alpha)
elif name == 'hsl':
return parse_hsl(args, alpha=1.)
elif name == 'hsla':
alpha = parse_alpha(args[3:])
if alpha is not None:
return parse_hsl(args[:3], alpha)
def parse_alpha(args):
"""
If args is a list of a single INTEGER or NUMBER token,
return its value clipped to the 0..1 range
Otherwise, return None.
"""
if len(args) == 1 and args[0].type in ('NUMBER', 'INTEGER'):
return min(1, max(0, args[0].value))
def parse_rgb(args, alpha):
"""
If args is a list of 3 INTEGER tokens or 3 PERCENTAGE tokens,
return RGB values as a tuple of 3 floats in 0..1.
Otherwise, return None.
"""
types = [arg.type for arg in args]
if types == ['INTEGER', 'INTEGER', 'INTEGER']:
r, g, b = [arg.value / 255 for arg in args[:3]]
return RGBA(r, g, b, alpha)
elif types == ['PERCENTAGE', 'PERCENTAGE', 'PERCENTAGE']:
r, g, b = [arg.value / 100 for arg in args[:3]]
return RGBA(r, g, b, alpha)
def parse_hsl(args, alpha):
"""
If args is a list of 1 INTEGER token and 2 PERCENTAGE tokens,
return RGB values as a tuple of 3 floats in 0..1.
Otherwise, return None.
"""
types = [arg.type for arg in args]
if types == ['INTEGER', 'PERCENTAGE', 'PERCENTAGE']:
hsl = [arg.value for arg in args[:3]]
r, g, b = hsl_to_rgb(*hsl)
return RGBA(r, g, b, alpha)
def hsl_to_rgb(hue, saturation, lightness):
"""
:param hue: degrees
:param saturation: percentage
:param lightness: percentage
:returns: (r, g, b) as floats in the 0..1 range
"""
hue = (hue / 360) % 1
saturation = min(1, max(0, saturation / 100))
lightness = min(1, max(0, lightness / 100))
# Translated from ABC: http://www.w3.org/TR/css3-color/#hsl-color
def hue_to_rgb(m1, m2, h):
if h < 0:
h += 1
if h > 1:
h -= 1
if h * 6 < 1:
return m1 + (m2 - m1) * h * 6
if h * 2 < 1:
return m2
if h * 3 < 2:
return m1 + (m2 - m1) * (2 / 3 - h) * 6
return m1
if lightness <= 0.5:
m2 = lightness * (saturation + 1)
else:
m2 = lightness + saturation - lightness * saturation
m1 = lightness * 2 - m2
return (
hue_to_rgb(m1, m2, hue + 1 / 3),
hue_to_rgb(m1, m2, hue),
hue_to_rgb(m1, m2, hue - 1 / 3),
)
def parse_comma_separated(tokens):
"""Parse a list of tokens (typically the content of a function token)
as arguments made of a single token each, separated by mandatory commas,
with optional white space around each argument.
return the argument list without commas or white space;
or None if the function token content do not match the description above.
"""
tokens = [token for token in tokens if token.type != 'S']
if not tokens:
return []
if len(tokens) % 2 == 1 and all(
token.type == 'DELIM' and token.value == ','
for token in tokens[1::2]):
return tokens[::2]
HASH_REGEXPS = (
(2, re.compile(r'^#([\da-f])([\da-f])([\da-f])$', re.I).match),
(1, re.compile(r'^#([\da-f]{2})([\da-f]{2})([\da-f]{2})$', re.I).match),
)
# (r, g, b) in 0..255
BASIC_COLOR_KEYWORDS = [
('black', (0, 0, 0)),
('silver', (192, 192, 192)),
('gray', (128, 128, 128)),
('white', (255, 255, 255)),
('maroon', (128, 0, 0)),
('red', (255, 0, 0)),
('purple', (128, 0, 128)),
('fuchsia', (255, 0, 255)),
('green', (0, 128, 0)),
('lime', (0, 255, 0)),
('olive', (128, 128, 0)),
('yellow', (255, 255, 0)),
('navy', (0, 0, 128)),
('blue', (0, 0, 255)),
('teal', (0, 128, 128)),
('aqua', (0, 255, 255)),
]
# (r, g, b) in 0..255
EXTENDED_COLOR_KEYWORDS = [
('aliceblue', (240, 248, 255)),
('antiquewhite', (250, 235, 215)),
('aqua', (0, 255, 255)),
('aquamarine', (127, 255, 212)),
('azure', (240, 255, 255)),
('beige', (245, 245, 220)),
('bisque', (255, 228, 196)),
('black', (0, 0, 0)),
('blanchedalmond', (255, 235, 205)),
('blue', (0, 0, 255)),
('blueviolet', (138, 43, 226)),
('brown', (165, 42, 42)),
('burlywood', (222, 184, 135)),
('cadetblue', (95, 158, 160)),
('chartreuse', (127, 255, 0)),
('chocolate', (210, 105, 30)),
('coral', (255, 127, 80)),
('cornflowerblue', (100, 149, 237)),
('cornsilk', (255, 248, 220)),
('crimson', (220, 20, 60)),
('cyan', (0, 255, 255)),
('darkblue', (0, 0, 139)),
('darkcyan', (0, 139, 139)),
('darkgoldenrod', (184, 134, 11)),
('darkgray', (169, 169, 169)),
('darkgreen', (0, 100, 0)),
('darkgrey', (169, 169, 169)),
('darkkhaki', (189, 183, 107)),
('darkmagenta', (139, 0, 139)),
('darkolivegreen', (85, 107, 47)),
('darkorange', (255, 140, 0)),
('darkorchid', (153, 50, 204)),
('darkred', (139, 0, 0)),
('darksalmon', (233, 150, 122)),
('darkseagreen', (143, 188, 143)),
('darkslateblue', (72, 61, 139)),
('darkslategray', (47, 79, 79)),
('darkslategrey', (47, 79, 79)),
('darkturquoise', (0, 206, 209)),
('darkviolet', (148, 0, 211)),
('deeppink', (255, 20, 147)),
('deepskyblue', (0, 191, 255)),
('dimgray', (105, 105, 105)),
('dimgrey', (105, 105, 105)),
('dodgerblue', (30, 144, 255)),
('firebrick', (178, 34, 34)),
('floralwhite', (255, 250, 240)),
('forestgreen', (34, 139, 34)),
('fuchsia', (255, 0, 255)),
('gainsboro', (220, 220, 220)),
('ghostwhite', (248, 248, 255)),
('gold', (255, 215, 0)),
('goldenrod', (218, 165, 32)),
('gray', (128, 128, 128)),
('green', (0, 128, 0)),
('greenyellow', (173, 255, 47)),
('grey', (128, 128, 128)),
('honeydew', (240, 255, 240)),
('hotpink', (255, 105, 180)),
('indianred', (205, 92, 92)),
('indigo', (75, 0, 130)),
('ivory', (255, 255, 240)),
('khaki', (240, 230, 140)),
('lavender', (230, 230, 250)),
('lavenderblush', (255, 240, 245)),
('lawngreen', (124, 252, 0)),
('lemonchiffon', (255, 250, 205)),
('lightblue', (173, 216, 230)),
('lightcoral', (240, 128, 128)),
('lightcyan', (224, 255, 255)),
('lightgoldenrodyellow', (250, 250, 210)),
('lightgray', (211, 211, 211)),
('lightgreen', (144, 238, 144)),
('lightgrey', (211, 211, 211)),
('lightpink', (255, 182, 193)),
('lightsalmon', (255, 160, 122)),
('lightseagreen', (32, 178, 170)),
('lightskyblue', (135, 206, 250)),
('lightslategray', (119, 136, 153)),
('lightslategrey', (119, 136, 153)),
('lightsteelblue', (176, 196, 222)),
('lightyellow', (255, 255, 224)),
('lime', (0, 255, 0)),
('limegreen', (50, 205, 50)),
('linen', (250, 240, 230)),
('magenta', (255, 0, 255)),
('maroon', (128, 0, 0)),
('mediumaquamarine', (102, 205, 170)),
('mediumblue', (0, 0, 205)),
('mediumorchid', (186, 85, 211)),
('mediumpurple', (147, 112, 219)),
('mediumseagreen', (60, 179, 113)),
('mediumslateblue', (123, 104, 238)),
('mediumspringgreen', (0, 250, 154)),
('mediumturquoise', (72, 209, 204)),
('mediumvioletred', (199, 21, 133)),
('midnightblue', (25, 25, 112)),
('mintcream', (245, 255, 250)),
('mistyrose', (255, 228, 225)),
('moccasin', (255, 228, 181)),
('navajowhite', (255, 222, 173)),
('navy', (0, 0, 128)),
('oldlace', (253, 245, 230)),
('olive', (128, 128, 0)),
('olivedrab', (107, 142, 35)),
('orange', (255, 165, 0)),
('orangered', (255, 69, 0)),
('orchid', (218, 112, 214)),
('palegoldenrod', (238, 232, 170)),
('palegreen', (152, 251, 152)),
('paleturquoise', (175, 238, 238)),
('palevioletred', (219, 112, 147)),
('papayawhip', (255, 239, 213)),
('peachpuff', (255, 218, 185)),
('peru', (205, 133, 63)),
('pink', (255, 192, 203)),
('plum', (221, 160, 221)),
('powderblue', (176, 224, 230)),
('purple', (128, 0, 128)),
('red', (255, 0, 0)),
('rosybrown', (188, 143, 143)),
('royalblue', (65, 105, 225)),
('saddlebrown', (139, 69, 19)),
('salmon', (250, 128, 114)),
('sandybrown', (244, 164, 96)),
('seagreen', (46, 139, 87)),
('seashell', (255, 245, 238)),
('sienna', (160, 82, 45)),
('silver', (192, 192, 192)),
('skyblue', (135, 206, 235)),
('slateblue', (106, 90, 205)),
('slategray', (112, 128, 144)),
('slategrey', (112, 128, 144)),
('snow', (255, 250, 250)),
('springgreen', (0, 255, 127)),
('steelblue', (70, 130, 180)),
('tan', (210, 180, 140)),
('teal', (0, 128, 128)),
('thistle', (216, 191, 216)),
('tomato', (255, 99, 71)),
('turquoise', (64, 224, 208)),
('violet', (238, 130, 238)),
('wheat', (245, 222, 179)),
('white', (255, 255, 255)),
('whitesmoke', (245, 245, 245)),
('yellow', (255, 255, 0)),
('yellowgreen', (154, 205, 50)),
]
# (r, g, b, a) in 0..1 or a string marker
SPECIAL_COLOR_KEYWORDS = {
'currentcolor': 'currentColor',
'transparent': RGBA(0., 0., 0., 0.),
}
# RGBA namedtuples of (r, g, b, a) in 0..1 or a string marker
COLOR_KEYWORDS = SPECIAL_COLOR_KEYWORDS.copy()
COLOR_KEYWORDS.update(
# 255 maps to 1, 0 to 0, the rest is linear.
(keyword, RGBA(r / 255., g / 255., b / 255., 1.))
for keyword, (r, g, b) in itertools.chain(
BASIC_COLOR_KEYWORDS, EXTENDED_COLOR_KEYWORDS))
| 12,242 | Python | .py | 339 | 30.032448 | 78 | 0.540388 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,841 | media3.py | kovidgoyal_calibre/src/tinycss/media3.py | #!/usr/bin/env python
# vim:fileencoding=utf-8
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
from polyglot.builtins import error_message
from tinycss.css21 import CSS21Parser
from tinycss.parsing import ParseError, remove_whitespace, split_on_comma
class MediaQuery:
__slots__ = 'media_type', 'expressions', 'negated'
def __init__(self, media_type='all', expressions=(), negated=False):
self.media_type = media_type
self.expressions = expressions
self.negated = negated
def __repr__(self):
return '<MediaQuery type=%s negated=%s expressions=%s>' % (
self.media_type, self.negated, self.expressions)
def __eq__(self, other):
return self.media_type == getattr(other, 'media_type', None) and \
self.negated == getattr(other, 'negated', None) and \
self.expressions == getattr(other, 'expressions', None)
class MalformedExpression(Exception):
def __init__(self, tok, msg):
Exception.__init__(self, msg)
self.tok = tok
class CSSMedia3Parser(CSS21Parser):
''' Parse media queries as defined by the CSS 3 media module '''
def parse_media(self, tokens, errors):
if not tokens:
return [MediaQuery('all')]
queries = []
for part in split_on_comma(remove_whitespace(tokens)):
negated = False
media_type = None
expressions = []
try:
for i, tok in enumerate(part):
if i == 0 and tok.type == 'IDENT':
val = tok.value.lower()
if val == 'only':
continue # ignore leading ONLY
if val == 'not':
negated = True
continue
if media_type is None and tok.type == 'IDENT':
media_type = tok.value
continue
elif media_type is None:
media_type = 'all'
if tok.type == 'IDENT' and tok.value.lower() == 'and':
continue
if not tok.is_container:
raise MalformedExpression(tok, 'expected a media expression not a %s' % tok.type)
if tok.type != '(':
raise MalformedExpression(tok, 'media expressions must be in parentheses not %s' % tok.type)
content = remove_whitespace(tok.content)
if len(content) == 0:
raise MalformedExpression(tok, 'media expressions cannot be empty')
if content[0].type != 'IDENT':
raise MalformedExpression(content[0], 'expected a media feature not a %s' % tok.type)
media_feature, expr = content[0].value, None
if len(content) > 1:
if len(content) < 3:
raise MalformedExpression(content[1], 'malformed media feature definition')
if content[1].type != ':':
raise MalformedExpression(content[1], 'expected a :')
expr = content[2:]
if len(expr) == 1:
expr = expr[0]
elif len(expr) == 3 and (expr[0].type, expr[1].type, expr[1].value, expr[2].type) == (
'INTEGER', 'DELIM', '/', 'INTEGER'):
# This should really be moved into token_data, but
# since RATIO is not part of CSS 2.1 and does not
# occur anywhere else, we special case it here.
r = expr[0]
r.value = (expr[0].value, expr[2].value)
r.type = 'RATIO'
r._as_css = expr[0]._as_css + expr[1]._as_css + expr[2]._as_css
expr = r
else:
raise MalformedExpression(expr[0], 'malformed media feature definition')
expressions.append((media_feature, expr))
except MalformedExpression as err:
errors.append(ParseError(err.tok, error_message(err)))
media_type, negated, expressions = 'all', True, ()
queries.append(MediaQuery(media_type or 'all', expressions=tuple(expressions), negated=negated))
return queries
| 4,582 | Python | .py | 86 | 36 | 116 | 0.50983 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,842 | decoding.py | kovidgoyal_calibre/src/tinycss/decoding.py | # coding: utf8
"""
tinycss.decoding
----------------
Decoding stylesheets from bytes to Unicode.
http://www.w3.org/TR/CSS21/syndata.html#charset
:copyright: (c) 2012 by Simon Sapin.
:license: BSD, see LICENSE for more details.
"""
import operator
import re
from polyglot.binary import from_hex_bytes
__all__ = ['decode'] # Everything else is implementation detail
def decode(css_bytes, protocol_encoding=None,
linking_encoding=None, document_encoding=None):
"""
Determine the character encoding from the passed metadata and the
``@charset`` rule in the stylesheet (if any); and decode accordingly.
If no encoding information is available or decoding fails,
decoding defaults to UTF-8 and then fall back on ISO-8859-1.
:param css_bytes:
a CSS stylesheet as a byte string
:param protocol_encoding:
The "charset" parameter of a "Content-Type" HTTP header (if any),
or similar metadata for other protocols.
:param linking_encoding:
``<link charset="">`` or other metadata from the linking mechanism
(if any)
:param document_encoding:
Encoding of the referring style sheet or document (if any)
:return:
A tuple of an Unicode string, with any BOM removed, and the
encoding that was used.
"""
if protocol_encoding:
css_unicode = try_encoding(css_bytes, protocol_encoding)
if css_unicode is not None:
return css_unicode, protocol_encoding
for encoding, pattern in ENCODING_MAGIC_NUMBERS:
match = pattern(css_bytes)
if match:
has_at_charset = isinstance(encoding, tuple)
if has_at_charset:
extract, endianness = encoding
encoding = extract(match.group(1))
# Get an ASCII-only unicode value.
# This is the only thing that works on both Python 2 and 3
# for bytes.decode()
# Non-ASCII encoding names are invalid anyway,
# but make sure they stay invalid.
encoding = encoding.decode('ascii', 'replace')
encoding = encoding.replace('\ufffd', '?')
if encoding.replace('-', '').replace('_', '').lower() in [
'utf16', 'utf32']:
encoding += endianness
encoding = encoding.encode('ascii', 'replace').decode('ascii')
css_unicode = try_encoding(css_bytes, encoding)
if css_unicode and not (has_at_charset and not
css_unicode.startswith('@charset "')):
return css_unicode, encoding
break
for encoding in [linking_encoding, document_encoding]:
if encoding:
css_unicode = try_encoding(css_bytes, encoding)
if css_unicode is not None:
return css_unicode, encoding
css_unicode = try_encoding(css_bytes, 'UTF-8')
if css_unicode is not None:
return css_unicode, 'UTF-8'
return try_encoding(css_bytes, 'ISO-8859-1', fallback=False), 'ISO-8859-1'
def try_encoding(css_bytes, encoding, fallback=True):
if fallback:
try:
css_unicode = css_bytes.decode(encoding)
# LookupError means unknown encoding
except (UnicodeDecodeError, LookupError):
return None
else:
css_unicode = css_bytes.decode(encoding)
if css_unicode and css_unicode[0] == '\ufeff':
# Remove any Byte Order Mark
css_unicode = css_unicode[1:]
return css_unicode
def hex2re(hex_data):
return re.escape(from_hex_bytes(hex_data.replace(' ', '').encode('ascii')))
class Slicer:
"""Slice()[start:stop:end] == slice(start, stop, end)"""
def __getitem__(self, slice_):
return operator.itemgetter(slice_)
Slice = Slicer()
# List of (bom_size, encoding, pattern)
# bom_size is in bytes and can be zero
# encoding is a string or (slice_, endianness) for "as specified"
# slice_ is a slice object.How to extract the specified
ENCODING_MAGIC_NUMBERS = [
((Slice[:], ''), re.compile(
hex2re('EF BB BF 40 63 68 61 72 73 65 74 20 22')
+ b'([^\x22]*?)'
+ hex2re('22 3B')).match),
('UTF-8', re.compile(
hex2re('EF BB BF')).match),
((Slice[:], ''), re.compile(
hex2re('40 63 68 61 72 73 65 74 20 22')
+ b'([^\x22]*?)'
+ hex2re('22 3B')).match),
((Slice[1::2], '-BE'), re.compile(
hex2re('FE FF 00 40 00 63 00 68 00 61 00 72 00 73 00 65 00'
'74 00 20 00 22')
+ b'((\x00[^\x22])*?)'
+ hex2re('00 22 00 3B')).match),
((Slice[1::2], '-BE'), re.compile(
hex2re('00 40 00 63 00 68 00 61 00 72 00 73 00 65 00 74 00'
'20 00 22')
+ b'((\x00[^\x22])*?)'
+ hex2re('00 22 00 3B')).match),
((Slice[::2], '-LE'), re.compile(
hex2re('FF FE 40 00 63 00 68 00 61 00 72 00 73 00 65 00 74'
'00 20 00 22 00')
+ b'(([^\x22]\x00)*?)'
+ hex2re('22 00 3B 00')).match),
((Slice[::2], '-LE'), re.compile(
hex2re('40 00 63 00 68 00 61 00 72 00 73 00 65 00 74 00 20'
'00 22 00')
+ b'(([^\x22]\x00)*?)'
+ hex2re('22 00 3B 00')).match),
((Slice[3::4], '-BE'), re.compile(
hex2re('00 00 FE FF 00 00 00 40 00 00 00 63 00 00 00 68 00'
'00 00 61 00 00 00 72 00 00 00 73 00 00 00 65 00 00'
'00 74 00 00 00 20 00 00 00 22')
+ b'((\x00\x00\x00[^\x22])*?)'
+ hex2re('00 00 00 22 00 00 00 3B')).match),
((Slice[3::4], '-BE'), re.compile(
hex2re('00 00 00 40 00 00 00 63 00 00 00 68 00 00 00 61 00'
'00 00 72 00 00 00 73 00 00 00 65 00 00 00 74 00 00'
'00 20 00 00 00 22')
+ b'((\x00\x00\x00[^\x22])*?)'
+ hex2re('00 00 00 22 00 00 00 3B')).match),
# Python does not support 2143 or 3412 endianness, AFAIK.
# I guess we could fix it up ourselves but meh. Patches welcome.
# ((Slice[2::4], '-2143'), re.compile(
# hex2re('00 00 FF FE 00 00 40 00 00 00 63 00 00 00 68 00 00'
# '00 61 00 00 00 72 00 00 00 73 00 00 00 65 00 00 00'
# '74 00 00 00 20 00 00 00 22 00')
# + b'((\x00\x00[^\x22]\x00)*?)'
# + hex2re('00 00 22 00 00 00 3B 00')).match),
# ((Slice[2::4], '-2143'), re.compile(
# hex2re('00 00 40 00 00 00 63 00 00 00 68 00 00 00 61 00 00'
# '00 72 00 00 00 73 00 00 00 65 00 00 00 74 00 00 00'
# '20 00 00 00 22 00')
# + b'((\x00\x00[^\x22]\x00)*?)'
# + hex2re('00 00 22 00 00 00 3B 00')).match),
# ((Slice[1::4], '-3412'), re.compile(
# hex2re('FE FF 00 00 00 40 00 00 00 63 00 00 00 68 00 00 00'
# '61 00 00 00 72 00 00 00 73 00 00 00 65 00 00 00 74'
# '00 00 00 20 00 00 00 22 00 00')
# + b'((\x00[^\x22]\x00\x00)*?)'
# + hex2re('00 22 00 00 00 3B 00 00')).match),
# ((Slice[1::4], '-3412'), re.compile(
# hex2re('00 40 00 00 00 63 00 00 00 68 00 00 00 61 00 00 00'
# '72 00 00 00 73 00 00 00 65 00 00 00 74 00 00 00 20'
# '00 00 00 22 00 00')
# + b'((\x00[^\x22]\x00\x00)*?)'
# + hex2re('00 22 00 00 00 3B 00 00')).match),
((Slice[::4], '-LE'), re.compile(
hex2re('FF FE 00 00 40 00 00 00 63 00 00 00 68 00 00 00 61'
'00 00 00 72 00 00 00 73 00 00 00 65 00 00 00 74 00'
'00 00 20 00 00 00 22 00 00 00')
+ b'(([^\x22]\x00\x00\x00)*?)'
+ hex2re('22 00 00 00 3B 00 00 00')).match),
((Slice[::4], '-LE'), re.compile(
hex2re('40 00 00 00 63 00 00 00 68 00 00 00 61 00 00 00 72'
'00 00 00 73 00 00 00 65 00 00 00 74 00 00 00 20 00'
'00 00 22 00 00 00')
+ b'(([^\x22]\x00\x00\x00)*?)'
+ hex2re('22 00 00 00 3B 00 00 00')).match),
('UTF-32-BE', re.compile(
hex2re('00 00 FE FF')).match),
('UTF-32-LE', re.compile(
hex2re('FF FE 00 00')).match),
# ('UTF-32-2143', re.compile(
# hex2re('00 00 FF FE')).match),
# ('UTF-32-3412', re.compile(
# hex2re('FE FF 00 00')).match),
('UTF-16-BE', re.compile(
hex2re('FE FF')).match),
('UTF-16-LE', re.compile(
hex2re('FF FE')).match),
# Some of there are supported by Python, but I didn’t bother.
# You know the story with patches ...
# # as specified, transcoded from EBCDIC to ASCII
# ('as_specified-EBCDIC', re.compile(
# hex2re('7C 83 88 81 99 A2 85 A3 40 7F')
# + b'([^\x7F]*?)'
# + hex2re('7F 5E')).match),
# # as specified, transcoded from IBM1026 to ASCII
# ('as_specified-IBM1026', re.compile(
# hex2re('AE 83 88 81 99 A2 85 A3 40 FC')
# + b'([^\xFC]*?)'
# + hex2re('FC 5E')).match),
# # as specified, transcoded from GSM 03.38 to ASCII
# ('as_specified-GSM_03.38', re.compile(
# hex2re('00 63 68 61 72 73 65 74 20 22')
# + b'([^\x22]*?)'
# + hex2re('22 3B')).match),
]
| 9,085 | Python | .py | 205 | 37.097561 | 79 | 0.557858 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,843 | css21.py | kovidgoyal_calibre/src/tinycss/css21.py | # coding: utf8
"""
tinycss.css21
-------------
Parser for CSS 2.1
http://www.w3.org/TR/CSS21/syndata.html
:copyright: (c) 2012 by Simon Sapin.
:license: BSD, see LICENSE for more details.
"""
from itertools import chain, islice
from tinycss.decoding import decode
from tinycss.parsing import ParseError, remove_whitespace, split_on_comma, strip_whitespace, validate_any, validate_value
from tinycss.token_data import TokenList
from tinycss.tokenizer import tokenize_grouped
# stylesheet : [ CDO | CDC | S | statement ]*;
# statement : ruleset | at-rule;
# at-rule : ATKEYWORD S* any* [ block | ';' S* ];
# block : '{' S* [ any | block | ATKEYWORD S* | ';' S* ]* '}' S*;
# ruleset : selector? '{' S* declaration? [ ';' S* declaration? ]* '}' S*;
# selector : any+;
# declaration : property S* ':' S* value;
# property : IDENT;
# value : [ any | block | ATKEYWORD S* ]+;
# any : [ IDENT | NUMBER | PERCENTAGE | DIMENSION | STRING
# | DELIM | URI | HASH | UNICODE-RANGE | INCLUDES
# | DASHMATCH | ':' | FUNCTION S* [any|unused]* ')'
# | '(' S* [any|unused]* ')' | '[' S* [any|unused]* ']'
# ] S*;
# unused : block | ATKEYWORD S* | ';' S* | CDO S* | CDC S*;
class Stylesheet:
"""
A parsed CSS stylesheet.
.. attribute:: rules
A mixed list, in source order, of :class:`RuleSet` and various
at-rules such as :class:`ImportRule`, :class:`MediaRule`
and :class:`PageRule`.
Use their :obj:`at_keyword` attribute to distinguish them.
.. attribute:: errors
A list of :class:`~.parsing.ParseError`. Invalid rules and declarations
are ignored, with the details logged in this list.
.. attribute:: encoding
The character encoding that was used to decode the stylesheet
from bytes, or ``None`` for Unicode stylesheets.
"""
def __init__(self, rules, errors, encoding):
self.rules = rules
self.errors = errors
self.encoding = encoding
def __repr__(self):
return '<{0.__class__.__name__} {1} rules {2} errors>'.format(
self, len(self.rules), len(self.errors))
class AtRule:
"""
An unparsed at-rule.
.. attribute:: at_keyword
The normalized (lower-case) at-keyword as a string. Eg: ``'@page'``
.. attribute:: head
The part of the at-rule between the at-keyword and the ``{``
marking the body, or the ``;`` marking the end of an at-rule without
a body. A :class:`~.token_data.TokenList`.
.. attribute:: body
The content of the body between ``{`` and ``}`` as a
:class:`~.token_data.TokenList`, or ``None`` if there is no body
(ie. if the rule ends with ``;``).
The head was validated against the core grammar but **not** the body,
as the body might contain declarations. In case of an error in a
declaration, parsing should continue from the next declaration.
The whole rule should not be ignored as it would be for an error
in the head.
These at-rules are expected to be parsed further before reaching
the user API.
"""
__slots__ = 'at_keyword', 'head', 'body', 'line', 'column'
def __init__(self, at_keyword, head, body, line, column):
self.at_keyword = at_keyword
self.head = TokenList(head)
self.body = TokenList(body) if body is not None else body
self.line = line
self.column = column
def __repr__(self):
return ('<{0.__class__.__name__} {0.line}:{0.column} {0.at_keyword}>'
.format(self))
class RuleSet:
"""A ruleset.
.. attribute:: at_keyword
Always ``None``. Helps to tell rulesets apart from at-rules.
.. attribute:: selector
The selector as a :class:`~.token_data.TokenList`.
In CSS 3, this is actually called a selector group.
``rule.selector.as_css()`` gives the selector as a string.
This string can be used with *cssselect*, see :ref:`selectors3`.
.. attribute:: declarations
The list of :class:`Declaration`, in source order.
"""
at_keyword = None
__slots__ = 'selector', 'declarations', 'line', 'column'
def __init__(self, selector, declarations, line, column):
self.selector = TokenList(selector)
self.declarations = declarations
self.line = line
self.column = column
def __repr__(self):
return ('<{0.__class__.__name__} at {0.line}:{0.column} {1}>'
.format(self, self.selector.as_css()))
class Declaration:
"""A property declaration.
.. attribute:: name
The property name as a normalized (lower-case) string.
.. attribute:: value
The property value as a :class:`~.token_data.TokenList`.
The value is not parsed. UAs using tinycss may only support
some properties or some values and tinycss does not know which.
They need to parse values themselves and ignore declarations with
unknown or unsupported properties or values, and fall back
on any previous declaration.
:mod:`tinycss.color3` parses color values, but other values
will need specific parsing/validation code.
.. attribute:: priority
Either the string ``'important'`` or ``None``.
"""
__slots__ = 'name', 'value', 'priority', 'line', 'column'
def __init__(self, name, value, priority, line, column):
self.name = name
self.value = TokenList(value)
self.priority = priority
self.line = line
self.column = column
def __repr__(self):
priority = ' !' + self.priority if self.priority else ''
return ('<{0.__class__.__name__} {0.line}:{0.column}'
' {0.name}: {1}{2}>'.format(
self, self.value.as_css(), priority))
class PageRule:
"""A parsed CSS 2.1 @page rule.
.. attribute:: at_keyword
Always ``'@page'``
.. attribute:: selector
The page selector.
In CSS 2.1 this is either ``None`` (no selector), or the string
``'first'``, ``'left'`` or ``'right'`` for the pseudo class
of the same name.
.. attribute:: specificity
Specificity of the page selector. This is a tuple of four integers,
but these tuples are mostly meant to be compared to each other.
.. attribute:: declarations
A list of :class:`Declaration`, in source order.
.. attribute:: at_rules
The list of parsed at-rules inside the @page block, in source order.
Always empty for CSS 2.1.
"""
at_keyword = '@page'
__slots__ = 'selector', 'specificity', 'declarations', 'at_rules', 'line', 'column'
def __init__(self, selector, specificity, declarations, at_rules,
line, column):
self.selector = selector
self.specificity = specificity
self.declarations = declarations
self.at_rules = at_rules
self.line = line
self.column = column
def __repr__(self):
return ('<{0.__class__.__name__} {0.line}:{0.column}'
' {0.selector}>'.format(self))
class MediaRule:
"""A parsed @media rule.
.. attribute:: at_keyword
Always ``'@media'``
.. attribute:: media
For CSS 2.1 without media queries: the media types
as a list of strings.
.. attribute:: rules
The list :class:`RuleSet` and various at-rules inside the @media
block, in source order.
"""
at_keyword = '@media'
__slots__ = 'media', 'rules', 'line', 'column'
def __init__(self, media, rules, line, column):
self.media = media
self.rules = rules
self.line = line
self.column = column
def __repr__(self):
return ('<{0.__class__.__name__} {0.line}:{0.column}'
' {0.media}>'.format(self))
class ImportRule:
"""A parsed @import rule.
.. attribute:: at_keyword
Always ``'@import'``
.. attribute:: uri
The URI to be imported, as read from the stylesheet.
(URIs are not made absolute.)
.. attribute:: media
For CSS 2.1 without media queries: the media types
as a list of strings.
This attribute is explicitly ``['all']`` if the media was omitted
in the source.
"""
at_keyword = '@import'
__slots__ = 'uri', 'media', 'line', 'column'
def __init__(self, uri, media, line, column):
self.uri = uri
self.media = media
self.line = line
self.column = column
def __repr__(self):
return ('<{0.__class__.__name__} {0.line}:{0.column}'
' {0.uri}>'.format(self))
def _remove_at_charset(tokens):
"""Remove any valid @charset at the beginning of a token stream.
:param tokens:
An iterable of tokens
:returns:
A possibly truncated iterable of tokens
"""
tokens = iter(tokens)
header = list(islice(tokens, 4))
if [t.type for t in header] == ['ATKEYWORD', 'S', 'STRING', ';']:
atkw, space, string, semicolon = header
if ((atkw.value, space.value) == ('@charset', ' ')
and string.as_css()[0] == '"'):
# Found a valid @charset rule, only keep what’s after it.
return tokens
return chain(header, tokens)
class CSS21Parser:
"""Parser for CSS 2.1
This parser supports the core CSS syntax as well as @import, @media,
@page and !important.
Note that property values are still not parsed, as UAs using this
parser may only support some properties or some values.
Currently the parser holds no state. It being a class only allows
subclassing and overriding its methods.
"""
def __init__(self):
self.at_parsers = {
'@' + x:getattr(self, 'parse_%s_rule' % x) for x in ('media', 'page', 'import', 'charset')}
# User API:
def parse_stylesheet_file(self, css_file, protocol_encoding=None,
linking_encoding=None, document_encoding=None):
"""Parse a stylesheet from a file or filename.
Character encoding-related parameters and behavior are the same
as in :meth:`parse_stylesheet_bytes`.
:param css_file:
Either a file (any object with a :meth:`~file.read` method)
or a filename.
:return:
A :class:`Stylesheet`.
"""
if hasattr(css_file, 'read'):
css_bytes = css_file.read()
else:
with open(css_file, 'rb') as fd:
css_bytes = fd.read()
return self.parse_stylesheet_bytes(css_bytes, protocol_encoding,
linking_encoding, document_encoding)
def parse_stylesheet_bytes(self, css_bytes, protocol_encoding=None,
linking_encoding=None, document_encoding=None):
"""Parse a stylesheet from a byte string.
The character encoding is determined from the passed metadata and the
``@charset`` rule in the stylesheet (if any).
If no encoding information is available or decoding fails,
decoding defaults to UTF-8 and then fall back on ISO-8859-1.
:param css_bytes:
A CSS stylesheet as a byte string.
:param protocol_encoding:
The "charset" parameter of a "Content-Type" HTTP header (if any),
or similar metadata for other protocols.
:param linking_encoding:
``<link charset="">`` or other metadata from the linking mechanism
(if any)
:param document_encoding:
Encoding of the referring style sheet or document (if any)
:return:
A :class:`Stylesheet`.
"""
css_unicode, encoding = decode(css_bytes, protocol_encoding,
linking_encoding, document_encoding)
return self.parse_stylesheet(css_unicode, encoding=encoding)
def parse_stylesheet(self, css_unicode, encoding=None):
"""Parse a stylesheet from an Unicode string.
:param css_unicode:
A CSS stylesheet as an unicode string.
:param encoding:
The character encoding used to decode the stylesheet from bytes,
if any.
:return:
A :class:`Stylesheet`.
"""
tokens = tokenize_grouped(css_unicode)
if encoding:
tokens = _remove_at_charset(tokens)
rules, errors = self.parse_rules(tokens, context='stylesheet')
return Stylesheet(rules, errors, encoding)
def parse_style_attr(self, css_source):
"""Parse a "style" attribute (eg. of an HTML element).
This method only accepts Unicode as the source (HTML) document
is supposed to handle the character encoding.
:param css_source:
The attribute value, as an unicode string.
:return:
A tuple of the list of valid :class:`Declaration` and
a list of :class:`~.parsing.ParseError`.
"""
return self.parse_declaration_list(tokenize_grouped(css_source))
# API for subclasses:
def parse_rules(self, tokens, context):
"""Parse a sequence of rules (rulesets and at-rules).
:param tokens:
An iterable of tokens.
:param context:
Either ``'stylesheet'`` or an at-keyword such as ``'@media'``.
(Most at-rules are only allowed in some contexts.)
:return:
A tuple of a list of parsed rules and a list of
:class:`~.parsing.ParseError`.
"""
rules = []
errors = []
tokens = iter(tokens)
for token in tokens:
if token.type not in ('S', 'CDO', 'CDC'):
try:
if token.type == 'ATKEYWORD':
rule = self.read_at_rule(token, tokens)
result = self.parse_at_rule(
rule, rules, errors, context)
rules.append(result)
else:
rule, rule_errors = self.parse_ruleset(token, tokens)
rules.append(rule)
errors.extend(rule_errors)
except ParseError as exc:
errors.append(exc)
# Skip the entire rule
return rules, errors
def read_at_rule(self, at_keyword_token, tokens):
"""Read an at-rule from a token stream.
:param at_keyword_token:
The ATKEYWORD token that starts this at-rule
You may have read it already to distinguish the rule
from a ruleset.
:param tokens:
An iterator of subsequent tokens. Will be consumed just enough
for one at-rule.
:return:
An unparsed :class:`AtRule`.
:raises:
:class:`~.parsing.ParseError` if the head is invalid for the core
grammar. The body is **not** validated. See :class:`AtRule`.
"""
# CSS syntax is case-insensitive
at_keyword = at_keyword_token.value.lower()
head = []
# For the ParseError in case `tokens` is empty:
token = at_keyword_token
for token in tokens:
if token.type in '{;':
break
# Ignore white space just after the at-keyword.
else:
head.append(token)
# On unexpected end of stylesheet, pretend that a ';' was there
head = strip_whitespace(head)
for head_token in head:
validate_any(head_token, 'at-rule head')
body = token.content if token.type == '{' else None
return AtRule(at_keyword, head, body,
at_keyword_token.line, at_keyword_token.column)
def parse_at_rule(self, rule, previous_rules, errors, context):
"""Parse an at-rule.
Subclasses that override this method must use ``super()`` and
pass its return value for at-rules they do not know.
In CSS 2.1, this method handles @charset, @import, @media and @page
rules.
:param rule:
An unparsed :class:`AtRule`.
:param previous_rules:
The list of at-rules and rulesets that have been parsed so far
in this context. This list can be used to decide if the current
rule is valid. (For example, @import rules are only allowed
before anything but a @charset rule.)
:param context:
Either ``'stylesheet'`` or an at-keyword such as ``'@media'``.
(Most at-rules are only allowed in some contexts.)
:raises:
:class:`~.parsing.ParseError` if the rule is invalid.
:return:
A parsed at-rule
"""
try:
parser = self.at_parsers[rule.at_keyword]
except KeyError:
raise ParseError(rule, 'unknown at-rule in {0} context: {1}'
.format(context, rule.at_keyword))
else:
return parser(rule, previous_rules, errors, context)
def parse_page_rule(self, rule, previous_rules, errors, context):
if context != 'stylesheet':
raise ParseError(rule, '@page rule not allowed in ' + context)
selector, specificity = self.parse_page_selector(rule.head)
if rule.body is None:
raise ParseError(rule,
'invalid {0} rule: missing block'.format(rule.at_keyword))
declarations, at_rules, rule_errors = \
self.parse_declarations_and_at_rules(rule.body, '@page')
errors.extend(rule_errors)
return PageRule(selector, specificity, declarations, at_rules,
rule.line, rule.column)
def parse_media_rule(self, rule, previous_rules, errors, context):
if context != 'stylesheet':
raise ParseError(rule, '@media rule not allowed in ' + context)
media = self.parse_media(rule.head, errors)
if rule.body is None:
raise ParseError(rule,
'invalid {0} rule: missing block'.format(rule.at_keyword))
rules, rule_errors = self.parse_rules(rule.body, '@media')
errors.extend(rule_errors)
return MediaRule(media, rules, rule.line, rule.column)
def parse_import_rule(self, rule, previous_rules, errors, context):
if context != 'stylesheet':
raise ParseError(rule,
'@import rule not allowed in ' + context)
for previous_rule in previous_rules:
if previous_rule.at_keyword not in ('@charset', '@import'):
if previous_rule.at_keyword:
type_ = 'an {0} rule'.format(previous_rule.at_keyword)
else:
type_ = 'a ruleset'
raise ParseError(previous_rule,
'@import rule not allowed after ' + type_)
head = rule.head
if not head:
raise ParseError(rule,
'expected URI or STRING for @import rule')
if head[0].type not in ('URI', 'STRING'):
raise ParseError(rule,
'expected URI or STRING for @import rule, got '
+ head[0].type)
uri = head[0].value
media = self.parse_media(strip_whitespace(head[1:]), errors)
if rule.body is not None:
# The position of the ';' token would be best, but we don’t
# have it anymore here.
raise ParseError(head[-1], "expected ';', got a block")
return ImportRule(uri, media, rule.line, rule.column)
def parse_charset_rule(self, rule, previous_rules, errors, context):
raise ParseError(rule, 'mis-placed or malformed @charset rule')
def parse_media(self, tokens, errors):
"""For CSS 2.1, parse a list of media types.
Media Queries are expected to override this.
:param tokens:
A list of tokens
:raises:
:class:`~.parsing.ParseError` on invalid media types/queries
:returns:
For CSS 2.1, a list of media types as strings
"""
if not tokens:
return ['all']
media_types = []
for part in split_on_comma(remove_whitespace(tokens)):
types = [token.type for token in part]
if types == ['IDENT']:
media_types.append(part[0].value)
else:
raise ParseError(tokens[0], 'expected a media type'
+ ((', got ' + ', '.join(types)) if types else ''))
return media_types
def parse_page_selector(self, tokens):
"""Parse an @page selector.
:param tokens:
An iterable of token, typically from the ``head`` attribute of
an unparsed :class:`AtRule`.
:returns:
A page selector. For CSS 2.1, this is ``'first'``, ``'left'``,
``'right'`` or ``None``.
:raises:
:class:`~.parsing.ParseError` on invalid selectors
"""
if not tokens:
return None, (0, 0)
if (len(tokens) == 2 and tokens[0].type == ':'
and tokens[1].type == 'IDENT'):
pseudo_class = tokens[1].value
specificity = {
'first': (1, 0), 'left': (0, 1), 'right': (0, 1),
}.get(pseudo_class)
if specificity:
return pseudo_class, specificity
raise ParseError(tokens[0], 'invalid @page selector')
def parse_declarations_and_at_rules(self, tokens, context):
"""Parse a mixed list of declarations and at rules, as found eg.
in the body of an @page rule.
Note that to add supported at-rules inside @page,
:class:`~.page3.CSSPage3Parser` extends :meth:`parse_at_rule`,
not this method.
:param tokens:
An iterable of token, typically from the ``body`` attribute of
an unparsed :class:`AtRule`.
:param context:
An at-keyword such as ``'@page'``.
(Most at-rules are only allowed in some contexts.)
:returns:
A tuple of:
* A list of :class:`Declaration`
* A list of parsed at-rules (empty for CSS 2.1)
* A list of :class:`~.parsing.ParseError`
"""
at_rules = []
declarations = []
errors = []
tokens = iter(tokens)
for token in tokens:
if token.type == 'ATKEYWORD':
try:
rule = self.read_at_rule(token, tokens)
result = self.parse_at_rule(
rule, at_rules, errors, context)
at_rules.append(result)
except ParseError as err:
errors.append(err)
elif token.type != 'S':
declaration_tokens = []
while token and token.type != ';':
declaration_tokens.append(token)
token = next(tokens, None)
if declaration_tokens:
try:
declarations.append(
self.parse_declaration(declaration_tokens))
except ParseError as err:
errors.append(err)
return declarations, at_rules, errors
def parse_ruleset(self, first_token, tokens):
"""Parse a ruleset: a selector followed by declaration block.
:param first_token:
The first token of the ruleset (probably of the selector).
You may have read it already to distinguish the rule
from an at-rule.
:param tokens:
an iterator of subsequent tokens. Will be consumed just enough
for one ruleset.
:return:
a tuple of a :class:`RuleSet` and an error list.
The errors are recovered :class:`~.parsing.ParseError` in declarations.
(Parsing continues from the next declaration on such errors.)
:raises:
:class:`~.parsing.ParseError` if the selector is invalid for the
core grammar.
Note a that a selector can be valid for the core grammar but
not for CSS 2.1 or another level.
"""
selector = []
for token in chain([first_token], tokens):
if token.type == '{':
# Parse/validate once we’ve read the whole rule
selector = strip_whitespace(selector)
if not selector:
raise ParseError(first_token, 'empty selector')
for selector_token in selector:
validate_any(selector_token, 'selector')
declarations, errors = self.parse_declaration_list(
token.content)
ruleset = RuleSet(selector, declarations,
first_token.line, first_token.column)
return ruleset, errors
else:
selector.append(token)
raise ParseError(token, 'no declaration block found for ruleset')
def parse_declaration_list(self, tokens):
"""Parse a ``;`` separated declaration list.
You may want to use :meth:`parse_declarations_and_at_rules` (or
some other method that uses :func:`parse_declaration` directly)
instead if you have not just declarations in the same context.
:param tokens:
an iterable of tokens. Should stop at (before) the end
of the block, as marked by ``}``.
:return:
a tuple of the list of valid :class:`Declaration` and a list
of :class:`~.parsing.ParseError`
"""
# split at ';'
parts = []
this_part = []
for token in tokens:
if token.type == ';':
parts.append(this_part)
this_part = []
else:
this_part.append(token)
parts.append(this_part)
declarations = []
errors = []
for tokens in parts:
tokens = strip_whitespace(tokens)
if tokens:
try:
declarations.append(self.parse_declaration(tokens))
except ParseError as exc:
errors.append(exc)
# Skip the entire declaration
return declarations, errors
def parse_declaration(self, tokens):
"""Parse a single declaration.
:param tokens:
an iterable of at least one token. Should stop at (before)
the end of the declaration, as marked by a ``;`` or ``}``.
Empty declarations (ie. consecutive ``;`` with only white space
in-between) should be skipped earlier and not passed to
this method.
:returns:
a :class:`Declaration`
:raises:
:class:`~.parsing.ParseError` if the tokens do not match the
'declaration' production of the core grammar.
"""
tokens = iter(tokens)
name_token = next(tokens) # assume there is at least one
if name_token.type == 'IDENT':
# CSS syntax is case-insensitive
property_name = name_token.value.lower()
else:
raise ParseError(name_token,
'expected a property name, got {0}'.format(name_token.type))
token = name_token # In case ``tokens`` is now empty
for token in tokens:
if token.type == ':':
break
elif token.type != 'S':
raise ParseError(
token, "expected ':', got {0}".format(token.type))
else:
raise ParseError(token, "expected ':'")
value = strip_whitespace(list(tokens))
if not value:
raise ParseError(token, 'expected a property value')
validate_value(value)
value, priority = self.parse_value_priority(value)
return Declaration(
property_name, value, priority, name_token.line, name_token.column)
def parse_value_priority(self, tokens):
"""Separate any ``!important`` marker at the end of a property value.
:param tokens:
A list of tokens for the property value.
:returns:
A tuple of the actual property value (a list of tokens)
and the :attr:`~Declaration.priority`.
"""
value = list(tokens)
# Walk the token list from the end
token = value.pop()
if token.type == 'IDENT' and token.value.lower() == 'important':
while value:
token = value.pop()
if token.type == 'DELIM' and token.value == '!':
# Skip any white space before the '!'
while value and value[-1].type == 'S':
value.pop()
if not value:
raise ParseError(
token, 'expected a value before !important')
return value, 'important'
# Skip white space between '!' and 'important'
elif token.type != 'S':
break
return tokens, None
| 29,393 | Python | .py | 665 | 33.403008 | 121 | 0.57423 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,844 | __init__.py | kovidgoyal_calibre/src/tinycss/__init__.py | # coding: utf8
"""
tinycss
-------
A CSS parser, and nothing else.
:copyright: (c) 2012 by Simon Sapin.
:license: BSD, see LICENSE for more details.
"""
from .version import VERSION
__version__ = VERSION
from tinycss.css21 import CSS21Parser
from tinycss.fonts3 import CSSFonts3Parser
from tinycss.media3 import CSSMedia3Parser
from tinycss.page3 import CSSPage3Parser
PARSER_MODULES = {
'page3': CSSPage3Parser,
'fonts3': CSSFonts3Parser,
'media3': CSSMedia3Parser,
}
def make_parser(*features, **kwargs):
"""Make a parser object with the chosen features.
:param features:
Positional arguments are base classes the new parser class will extend.
The string ``'page3'`` is accepted as short for
:class:`~page3.CSSPage3Parser`.
:param kwargs:
Keyword arguments are passed to the parser’s constructor.
:returns:
An instance of a new subclass of :class:`CSS21Parser`
"""
if features:
bases = tuple(PARSER_MODULES.get(f, f) for f in features)
parser_class = type('CustomCSSParser', bases + (CSS21Parser,), {})
else:
parser_class = CSS21Parser
return parser_class(**kwargs)
def make_full_parser(**kwargs):
''' A parser that parses all supported CSS 3 modules in addition to CSS 2.1 '''
features = tuple(PARSER_MODULES)
return make_parser(*features, **kwargs)
| 1,407 | Python | .py | 40 | 30.375 | 83 | 0.698155 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,845 | fonts3.py | kovidgoyal_calibre/src/tinycss/fonts3.py | #!/usr/bin/env python
# vim:fileencoding=utf-8
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
import re
from tinycss.css21 import CSS21Parser, ParseError
from .tokenizer import tokenize_grouped
def parse_font_family_tokens(tokens):
families = []
current_family = ''
def commit():
val = current_family.strip()
if val:
families.append(val)
for token in tokens:
if token.type == 'STRING':
if current_family:
commit()
current_family = token.value
elif token.type == 'DELIM':
if token.value == ',':
if current_family:
commit()
current_family = ''
elif token.type == 'IDENT':
current_family += ' ' + token.value
if current_family:
commit()
return families
def parse_font_family(css_string):
return parse_font_family_tokens(tokenize_grouped(type('')(css_string).strip()))
def serialize_single_font_family(x):
xl = x.lower()
if xl in GENERIC_FAMILIES:
if xl == 'sansserif':
xl = 'sans-serif'
return xl
if SIMPLE_NAME_PAT.match(x) is not None and not x.lower().startswith('and'):
# css_parser dies if a font name starts with and
return x
return '"%s"' % x.replace('"', r'\"')
def serialize_font_family(families):
return ', '.join(map(serialize_single_font_family, families))
GLOBAL_IDENTS = frozenset('inherit initial unset normal'.split())
STYLE_IDENTS = frozenset('italic oblique'.split())
VARIANT_IDENTS = frozenset(('small-caps',))
WEIGHT_IDENTS = frozenset('bold bolder lighter'.split())
STRETCH_IDENTS = frozenset('ultra-condensed extra-condensed condensed semi-condensed semi-expanded expanded extra-expanded ultra-expanded'.split())
BEFORE_SIZE_IDENTS = STYLE_IDENTS | VARIANT_IDENTS | WEIGHT_IDENTS | STRETCH_IDENTS
SIZE_IDENTS = frozenset('xx-small x-small small medium large x-large xx-large larger smaller'.split())
WEIGHT_SIZES = frozenset(map(int, '100 200 300 400 500 600 700 800 900'.split()))
LEGACY_FONT_SPEC = frozenset('caption icon menu message-box small-caption status-bar'.split())
GENERIC_FAMILIES = frozenset('serif sans-serif sansserif cursive fantasy monospace'.split())
SIMPLE_NAME_PAT = re.compile(r'[a-zA-Z][a-zA-Z0-9_-]*$')
def serialize_font(font_dict):
ans = []
for x in 'style variant weight stretch'.split():
val = font_dict.get('font-' + x)
if val is not None:
ans.append(val)
val = font_dict.get('font-size')
if val is not None:
fs = val
val = font_dict.get('line-height')
if val is not None:
fs += '/' + val
ans.append(fs)
val = font_dict.get('font-family')
if val:
ans.append(serialize_font_family(val))
return ' '.join(ans)
def parse_font(css_string):
# See https://www.w3.org/TR/css-fonts-3/#font-prop
style = variant = weight = stretch = size = height = None
tokens = list(reversed(tuple(tokenize_grouped(type('')(css_string).strip()))))
if tokens and tokens[-1].value in LEGACY_FONT_SPEC:
return {'font-family':['sans-serif']}
while tokens:
tok = tokens.pop()
if tok.type == 'STRING':
tokens.append(tok)
break
if tok.type == 'INTEGER':
if size is None:
if weight is None and tok.value in WEIGHT_SIZES:
weight = tok.as_css()
continue
break
if height is None:
height = tok.as_css()
break
break
if tok.type == 'NUMBER':
if size is not None and height is None:
height = tok.as_css()
break
if tok.type == 'DELIM':
if tok.value == '/' and size is not None and height is None:
continue
break
if tok.type in ('DIMENSION', 'PERCENTAGE'):
if size is None:
size = tok.as_css()
continue
if height is None:
height = tok.as_css()
break
if tok.type == 'IDENT':
if tok.value in GLOBAL_IDENTS:
if size is not None:
if height is None:
height = tok.value
else:
tokens.append(tok)
break
if style is None:
style = tok.value
elif variant is None:
variant = tok.value
elif weight is None:
weight = tok.value
elif stretch is None:
stretch = tok.value
elif size is None:
size = tok.value
elif height is None:
height = tok.value
break
else:
tokens.append(tok)
break
continue
if tok.value in BEFORE_SIZE_IDENTS:
if size is not None:
break
if tok.value in STYLE_IDENTS:
style = tok.value
elif tok.value in VARIANT_IDENTS:
variant = tok.value
elif tok.value in WEIGHT_IDENTS:
weight = tok.value
elif tok.value in STRETCH_IDENTS:
stretch = tok.value
elif tok.value in SIZE_IDENTS:
size = tok.value
else:
tokens.append(tok)
break
families = parse_font_family_tokens(reversed(tokens))
ans = {}
if style is not None:
ans['font-style'] = style
if variant is not None:
ans['font-variant'] = variant
if weight is not None:
ans['font-weight'] = weight
if stretch is not None:
ans['font-stretch'] = stretch
if size is not None:
ans['font-size'] = size
if height is not None:
ans['line-height'] = height
if families:
ans['font-family'] = families
return ans
class FontFaceRule:
at_keyword = '@font-face'
__slots__ = 'declarations', 'line', 'column'
def __init__(self, declarations, line, column):
self.declarations = declarations
self.line = line
self.column = column
def __repr__(self):
return ('<{0.__class__.__name__} at {0.line}:{0.column}>'
.format(self))
class CSSFonts3Parser(CSS21Parser):
''' Parse @font-face rules from the CSS 3 fonts module '''
ALLOWED_CONTEXTS_FOR_FONT_FACE = {'stylesheet', '@media', '@page'}
def __init__(self):
super(CSSFonts3Parser, self).__init__()
self.at_parsers['@font-face'] = self.parse_font_face_rule
def parse_font_face_rule(self, rule, previous_rules, errors, context):
if context not in self.ALLOWED_CONTEXTS_FOR_FONT_FACE:
raise ParseError(rule,
'@font-face rule not allowed in ' + context)
if rule.body is None:
raise ParseError(rule,
'invalid {0} rule: missing block'.format(rule.at_keyword))
if rule.head:
raise ParseError(rule, '{0} rule is not allowed to have content before the descriptor declaration'.format(rule.at_keyword))
declarations, decerrors = self.parse_declaration_list(rule.body)
errors.extend(decerrors)
return FontFaceRule(declarations, rule.line, rule.column)
| 7,595 | Python | .py | 193 | 28.834197 | 147 | 0.56697 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,846 | page3.py | kovidgoyal_calibre/src/tinycss/page3.py | # coding: utf8
"""
tinycss.page3
------------------
Support for CSS 3 Paged Media syntax:
http://dev.w3.org/csswg/css3-page/
Adds support for named page selectors and margin rules.
:copyright: (c) 2012 by Simon Sapin.
:license: BSD, see LICENSE for more details.
"""
from .css21 import CSS21Parser, ParseError
class MarginRule:
"""A parsed at-rule for margin box.
.. attribute:: at_keyword
One of the 16 following strings:
* ``@top-left-corner``
* ``@top-left``
* ``@top-center``
* ``@top-right``
* ``@top-right-corner``
* ``@bottom-left-corner``
* ``@bottom-left``
* ``@bottom-center``
* ``@bottom-right``
* ``@bottom-right-corner``
* ``@left-top``
* ``@left-middle``
* ``@left-bottom``
* ``@right-top``
* ``@right-middle``
* ``@right-bottom``
.. attribute:: declarations
A list of :class:`~.css21.Declaration` objects.
.. attribute:: line
Source line where this was read.
.. attribute:: column
Source column where this was read.
"""
__slots__ = 'at_keyword', 'declarations', 'line', 'column'
def __init__(self, at_keyword, declarations, line, column):
self.at_keyword = at_keyword
self.declarations = declarations
self.line = line
self.column = column
class CSSPage3Parser(CSS21Parser):
"""Extend :class:`~.css21.CSS21Parser` for `CSS 3 Paged Media`_ syntax.
.. _CSS 3 Paged Media: http://dev.w3.org/csswg/css3-page/
Compared to CSS 2.1, the ``at_rules`` and ``selector`` attributes of
:class:`~.css21.PageRule` objects are modified:
* ``at_rules`` is not always empty, it is a list of :class:`MarginRule`
objects.
* ``selector``, instead of a single string, is a tuple of the page name
and the pseudo class. Each of these may be a ``None`` or a string.
+--------------------------+------------------------+
| CSS | Parsed selectors |
+==========================+========================+
| .. code-block:: css | .. code-block:: python |
| | |
| @page {} | (None, None) |
| @page :first {} | (None, 'first') |
| @page chapter {} | ('chapter', None) |
| @page table:right {} | ('table', 'right') |
+--------------------------+------------------------+
"""
PAGE_MARGIN_AT_KEYWORDS = (
'@top-left-corner',
'@top-left',
'@top-center',
'@top-right',
'@top-right-corner',
'@bottom-left-corner',
'@bottom-left',
'@bottom-center',
'@bottom-right',
'@bottom-right-corner',
'@left-top',
'@left-middle',
'@left-bottom',
'@right-top',
'@right-middle',
'@right-bottom',
)
def __init__(self):
super(CSSPage3Parser, self).__init__()
for x in self.PAGE_MARGIN_AT_KEYWORDS:
self.at_parsers[x] = self.parse_page_margin_rule
def parse_page_margin_rule(self, rule, previous_rules, errors, context):
if context != '@page':
raise ParseError(rule,
'%s rule not allowed in %s' % (rule.at_keyword, context))
if rule.head:
raise ParseError(rule.head[0],
'unexpected %s token in %s rule header'
% (rule.head[0].type, rule.at_keyword))
declarations, body_errors = self.parse_declaration_list(rule.body)
errors.extend(body_errors)
return MarginRule(rule.at_keyword, declarations,
rule.line, rule.column)
def parse_page_selector(self, head):
"""Parse an @page selector.
:param head:
The ``head`` attribute of an unparsed :class:`AtRule`.
:returns:
A page selector. For CSS 2.1, this is 'first', 'left', 'right'
or None. 'blank' is added by GCPM.
:raises:
:class`~parsing.ParseError` on invalid selectors
"""
if not head:
return (None, None), (0, 0, 0)
if head[0].type == 'IDENT':
name = head.pop(0).value
while head and head[0].type == 'S':
head.pop(0)
if not head:
return (name, None), (1, 0, 0)
name_specificity = (1,)
else:
name = None
name_specificity = (0,)
if (len(head) == 2 and head[0].type == ':'
and head[1].type == 'IDENT'):
pseudo_class = head[1].value
specificity = {
'first': (1, 0), 'blank': (1, 0),
'left': (0, 1), 'right': (0, 1),
}.get(pseudo_class)
if specificity:
return (name, pseudo_class), (name_specificity + specificity)
raise ParseError(head[0], 'invalid @page selector')
| 5,050 | Python | .py | 130 | 29.869231 | 77 | 0.507673 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,847 | tokenizer.py | kovidgoyal_calibre/src/tinycss/tokenizer.py | # coding: utf8
"""
tinycss.tokenizer
-----------------
Tokenizer for the CSS core syntax:
http://www.w3.org/TR/CSS21/syndata.html#tokenization
This is the pure-python implementation. See also speedups.pyx
:copyright: (c) 2012 by Simon Sapin.
:license: BSD, see LICENSE for more details.
"""
from tinycss import token_data
def tokenize_flat(css_source, ignore_comments=True,
# Make these local variable to avoid global lookups in the loop
tokens_dispatch=token_data.TOKEN_DISPATCH,
unicode_unescape=token_data.UNICODE_UNESCAPE,
newline_unescape=token_data.NEWLINE_UNESCAPE,
simple_unescape=token_data.SIMPLE_UNESCAPE,
find_newlines=token_data.FIND_NEWLINES,
Token=token_data.Token,
len=len,
int=int,
float=float,
list=list,
_None=None,
):
"""
:param css_source:
CSS as an unicode string
:param ignore_comments:
if true (the default) comments will not be included in the
return value
:return:
An iterator of :class:`Token`
"""
pos = 0
line = 1
column = 1
source_len = len(css_source)
tokens = []
while pos < source_len:
char = css_source[pos]
if char in ':;{}()[]':
type_ = char
css_value = char
else:
codepoint = min(ord(char), 160)
for _index, type_, regexp in tokens_dispatch[codepoint]:
match = regexp(css_source, pos)
if match is not None:
# First match is the longest. See comments on TOKENS above.
css_value = match.group()
break
else:
# No match.
# "Any other character not matched by the above rules,
# and neither a single nor a double quote."
# ... but quotes at the start of a token are always matched
# by STRING or BAD_STRING. So DELIM is any single character.
type_ = 'DELIM'
css_value = char
length = len(css_value)
next_pos = pos + length
# A BAD_COMMENT is a comment at EOF. Ignore it too.
if not (ignore_comments and type_ in ('COMMENT', 'BAD_COMMENT')):
# Parse numbers, extract strings and URIs, unescape
unit = _None
if type_ == 'DIMENSION':
value = match.group(1)
value = float(value) if '.' in value else int(value)
unit = match.group(2)
unit = simple_unescape(unit)
unit = unicode_unescape(unit)
unit = unit.lower() # normalize
elif type_ == 'PERCENTAGE':
value = css_value[:-1]
value = float(value) if '.' in value else int(value)
unit = '%'
elif type_ == 'NUMBER':
value = css_value
if '.' in value:
value = float(value)
else:
value = int(value)
type_ = 'INTEGER'
elif type_ in ('IDENT', 'ATKEYWORD', 'HASH', 'FUNCTION'):
value = simple_unescape(css_value)
value = unicode_unescape(value)
elif type_ == 'URI':
value = match.group(1)
if value and value[0] in '"\'':
value = value[1:-1] # Remove quotes
value = newline_unescape(value)
value = simple_unescape(value)
value = unicode_unescape(value)
elif type_ == 'STRING':
value = css_value[1:-1] # Remove quotes
value = newline_unescape(value)
value = simple_unescape(value)
value = unicode_unescape(value)
# BAD_STRING can only be one of:
# * Unclosed string at the end of the stylesheet:
# Close the string, but this is not an error.
# Make it a "good" STRING token.
# * Unclosed string at the (unescaped) end of the line:
# Close the string, but this is an error.
# Leave it as a BAD_STRING, don’t bother parsing it.
# See http://www.w3.org/TR/CSS21/syndata.html#parsing-errors
elif type_ == 'BAD_STRING' and next_pos == source_len:
type_ = 'STRING'
value = css_value[1:] # Remove quote
value = newline_unescape(value)
value = simple_unescape(value)
value = unicode_unescape(value)
else:
value = css_value
tokens.append(Token(type_, css_value, value, unit, line, column))
pos = next_pos
newlines = find_newlines(css_value)
if newlines:
line += len(newlines)
# Add 1 to have lines start at column 1, not 0
column = length - newlines[-1].end() + 1
else:
column += length
return tokens
def regroup(tokens):
"""
Match pairs of tokens: () [] {} function()
(Strings in "" or '' are taken care of by the tokenizer.)
Opening tokens are replaced by a :class:`ContainerToken`.
Closing tokens are removed. Unmatched closing tokens are invalid
but left as-is. All nested structures that are still open at
the end of the stylesheet are implicitly closed.
:param tokens:
a *flat* iterable of tokens, as returned by :func:`tokenize_flat`.
:return:
A tree of tokens.
"""
# "global" objects for the inner recursion
pairs = {'FUNCTION': ')', '(': ')', '[': ']', '{': '}'}
tokens = iter(tokens)
eof = [False]
def _regroup_inner(stop_at=None,
tokens=tokens, pairs=pairs, eof=eof,
ContainerToken=token_data.ContainerToken,
FunctionToken=token_data.FunctionToken):
for token in tokens:
type_ = token.type
if type_ == stop_at:
return
end = pairs.get(type_)
if end is None:
yield token # Not a grouping token
else:
assert not isinstance(token, ContainerToken), (
'Token looks already grouped: {0}'.format(token))
content = list(_regroup_inner(end))
if eof[0]:
end = '' # Implicit end of structure at EOF.
if type_ == 'FUNCTION':
yield FunctionToken(token.type, token.as_css(), end,
token.value, content,
token.line, token.column)
else:
yield ContainerToken(token.type, token.as_css(), end,
content,
token.line, token.column)
else:
eof[0] = True # end of file/stylesheet
return _regroup_inner()
def tokenize_grouped(css_source, ignore_comments=True):
"""
:param css_source:
CSS as an unicode string
:param ignore_comments:
if true (the default) comments will not be included in the
return value
:return:
An iterator of :class:`Token`
"""
return regroup(tokenize_flat(css_source, ignore_comments))
# Optional Cython version of tokenize_flat
# Make both versions available with explicit names for tests.
python_tokenize_flat = tokenize_flat
try:
tok = token_data.load_c_tokenizer()
except (ImportError, RuntimeError):
c_tokenize_flat = None
else:
# Use the c tokenizer by default
c_tokenize_flat = tokenize_flat = lambda s, ignore_comments=False:tok.tokenize_flat(s, ignore_comments)
| 7,737 | Python | .py | 191 | 29.109948 | 107 | 0.551316 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,848 | color3.py | kovidgoyal_calibre/src/tinycss/tests/color3.py | #!/usr/bin/env python
# vim:fileencoding=utf-8
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
from tinycss.color3 import hsl_to_rgb, parse_color_string
from tinycss.tests import BaseTest
class TestColor3(BaseTest):
def test_color_parsing(self):
for css_source, expected_result in [
('', None),
(' /* hey */\n', None),
('4', None),
('top', None),
('/**/transparent', (0, 0, 0, 0)),
('transparent', (0, 0, 0, 0)),
(' transparent\n', (0, 0, 0, 0)),
('TransParent', (0, 0, 0, 0)),
('currentColor', 'currentColor'),
('CURRENTcolor', 'currentColor'),
('current_Color', None),
('black', (0, 0, 0, 1)),
('white', (1, 1, 1, 1)),
('fuchsia', (1, 0, 1, 1)),
('cyan', (0, 1, 1, 1)),
('CyAn', (0, 1, 1, 1)),
('darkkhaki', (189 / 255., 183 / 255., 107 / 255., 1)),
('#', None),
('#f', None),
('#ff', None),
('#fff', (1, 1, 1, 1)),
('#ffg', None),
('#ffff', None),
('#fffff', None),
('#ffffff', (1, 1, 1, 1)),
('#fffffg', None),
('#fffffff', None),
('#ffffffff', None),
('#fffffffff', None),
('#cba987', (203 / 255., 169 / 255., 135 / 255., 1)),
('#CbA987', (203 / 255., 169 / 255., 135 / 255., 1)),
('#1122aA', (17 / 255., 34 / 255., 170 / 255., 1)),
('#12a', (17 / 255., 34 / 255., 170 / 255., 1)),
('rgb(203, 169, 135)', (203 / 255., 169 / 255., 135 / 255., 1)),
('RGB(255, 255, 255)', (1, 1, 1, 1)),
('rgB(0, 0, 0)', (0, 0, 0, 1)),
('rgB(0, 51, 255)', (0, .2, 1, 1)),
('rgb(0,51,255)', (0, .2, 1, 1)),
('rgb(0\t, 51 ,255)', (0, .2, 1, 1)),
('rgb(/* R */0, /* G */51, /* B */255)', (0, .2, 1, 1)),
('rgb(-51, 306, 0)', (-.2, 1.2, 0, 1)), # out of 0..1 is allowed
('rgb(42%, 3%, 50%)', (.42, .03, .5, 1)),
('RGB(100%, 100%, 100%)', (1, 1, 1, 1)),
('rgB(0%, 0%, 0%)', (0, 0, 0, 1)),
('rgB(10%, 20%, 30%)', (.1, .2, .3, 1)),
('rgb(10%,20%,30%)', (.1, .2, .3, 1)),
('rgb(10%\t, 20% ,30%)', (.1, .2, .3, 1)),
('rgb(/* R */10%, /* G */20%, /* B */30%)', (.1, .2, .3, 1)),
('rgb(-12%, 110%, 1400%)', (-.12, 1.1, 14, 1)), # out of 0..1 is allowed
('rgb(10%, 50%, 0)', None),
('rgb(255, 50%, 0%)', None),
('rgb(0, 0 0)', None),
('rgb(0, 0, 0deg)', None),
('rgb(0, 0, light)', None),
('rgb()', None),
('rgb(0)', None),
('rgb(0, 0)', None),
('rgb(0, 0, 0, 0)', None),
('rgb(0%)', None),
('rgb(0%, 0%)', None),
('rgb(0%, 0%, 0%, 0%)', None),
('rgb(0%, 0%, 0%, 0)', None),
('rgba(0, 0, 0, 0)', (0, 0, 0, 0)),
('rgba(203, 169, 135, 0.3)', (203 / 255., 169 / 255., 135 / 255., 0.3)),
('RGBA(255, 255, 255, 0)', (1, 1, 1, 0)),
('rgBA(0, 51, 255, 1)', (0, 0.2, 1, 1)),
('rgba(0, 51, 255, 1.1)', (0, 0.2, 1, 1)),
('rgba(0, 51, 255, 37)', (0, 0.2, 1, 1)),
('rgba(0, 51, 255, 0.42)', (0, 0.2, 1, 0.42)),
('rgba(0, 51, 255, 0)', (0, 0.2, 1, 0)),
('rgba(0, 51, 255, -0.1)', (0, 0.2, 1, 0)),
('rgba(0, 51, 255, -139)', (0, 0.2, 1, 0)),
('rgba(42%, 3%, 50%, 0.3)', (.42, .03, .5, 0.3)),
('RGBA(100%, 100%, 100%, 0)', (1, 1, 1, 0)),
('rgBA(0%, 20%, 100%, 1)', (0, 0.2, 1, 1)),
('rgba(0%, 20%, 100%, 1.1)', (0, 0.2, 1, 1)),
('rgba(0%, 20%, 100%, 37)', (0, 0.2, 1, 1)),
('rgba(0%, 20%, 100%, 0.42)', (0, 0.2, 1, 0.42)),
('rgba(0%, 20%, 100%, 0)', (0, 0.2, 1, 0)),
('rgba(0%, 20%, 100%, -0.1)', (0, 0.2, 1, 0)),
('rgba(0%, 20%, 100%, -139)', (0, 0.2, 1, 0)),
('rgba(255, 255, 255, 0%)', None),
('rgba(10%, 50%, 0, 1)', None),
('rgba(255, 50%, 0%, 1)', None),
('rgba(0, 0, 0 0)', None),
('rgba(0, 0, 0, 0deg)', None),
('rgba(0, 0, 0, light)', None),
('rgba()', None),
('rgba(0)', None),
('rgba(0, 0, 0)', None),
('rgba(0, 0, 0, 0, 0)', None),
('rgba(0%)', None),
('rgba(0%, 0%)', None),
('rgba(0%, 0%, 0%)', None),
('rgba(0%, 0%, 0%, 0%)', None),
('rgba(0%, 0%, 0%, 0%, 0%)', None),
('HSL(0, 0%, 0%)', (0, 0, 0, 1)),
('hsL(0, 100%, 50%)', (1, 0, 0, 1)),
('hsl(60, 100%, 37.5%)', (0.75, 0.75, 0, 1)),
('hsl(780, 100%, 37.5%)', (0.75, 0.75, 0, 1)),
('hsl(-300, 100%, 37.5%)', (0.75, 0.75, 0, 1)),
('hsl(300, 50%, 50%)', (0.75, 0.25, 0.75, 1)),
('hsl(10, 50%, 0)', None),
('hsl(50%, 50%, 0%)', None),
('hsl(0, 0% 0%)', None),
('hsl(30deg, 100%, 100%)', None),
('hsl(0, 0%, light)', None),
('hsl()', None),
('hsl(0)', None),
('hsl(0, 0%)', None),
('hsl(0, 0%, 0%, 0%)', None),
('HSLA(-300, 100%, 37.5%, 1)', (0.75, 0.75, 0, 1)),
('hsLA(-300, 100%, 37.5%, 12)', (0.75, 0.75, 0, 1)),
('hsla(-300, 100%, 37.5%, 0.2)', (0.75, 0.75, 0, .2)),
('hsla(-300, 100%, 37.5%, 0)', (0.75, 0.75, 0, 0)),
('hsla(-300, 100%, 37.5%, -3)', (0.75, 0.75, 0, 0)),
('hsla(10, 50%, 0, 1)', None),
('hsla(50%, 50%, 0%, 1)', None),
('hsla(0, 0% 0%, 1)', None),
('hsla(30deg, 100%, 100%, 1)', None),
('hsla(0, 0%, light, 1)', None),
('hsla()', None),
('hsla(0)', None),
('hsla(0, 0%)', None),
('hsla(0, 0%, 0%, 50%)', None),
('hsla(0, 0%, 0%, 1, 0%)', None),
('cmyk(0, 0, 0, 0)', None),
]:
result = parse_color_string(css_source)
if isinstance(result, tuple):
for got, expected in zip(result, expected_result):
# Compensate for floating point errors:
self.assertLess(abs(got - expected), 1e-10)
for i, attr in enumerate(['red', 'green', 'blue', 'alpha']):
self.ae(getattr(result, attr), result[i])
else:
self.ae(result, expected_result)
def test_hsl(self):
for hsl, expected_rgb in [
# http://en.wikipedia.org/wiki/HSL_and_HSV#Examples
((0, 0, 100), (1, 1, 1)),
((127, 0, 100), (1, 1, 1)),
((0, 0, 50), (0.5, 0.5, 0.5)),
((127, 0, 50), (0.5, 0.5, 0.5)),
((0, 0, 0), (0, 0, 0)),
((127, 0, 0), (0, 0, 0)),
((0, 100, 50), (1, 0, 0)),
((60, 100, 37.5), (0.75, 0.75, 0)),
((780, 100, 37.5), (0.75, 0.75, 0)),
((-300, 100, 37.5), (0.75, 0.75, 0)),
((120, 100, 25), (0, 0.5, 0)),
((180, 100, 75), (0.5, 1, 1)),
((240, 100, 75), (0.5, 0.5, 1)),
((300, 50, 50), (0.75, 0.25, 0.75)),
((61.8, 63.8, 39.3), (0.628, 0.643, 0.142)),
((251.1, 83.2, 51.1), (0.255, 0.104, 0.918)),
((134.9, 70.7, 39.6), (0.116, 0.675, 0.255)),
((49.5, 89.3, 49.7), (0.941, 0.785, 0.053)),
((283.7, 77.5, 54.2), (0.704, 0.187, 0.897)),
((14.3, 81.7, 62.4), (0.931, 0.463, 0.316)),
((56.9, 99.1, 76.5), (0.998, 0.974, 0.532)),
((162.4, 77.9, 44.7), (0.099, 0.795, 0.591)),
((248.3, 60.1, 37.3), (0.211, 0.149, 0.597)),
((240.5, 29, 60.7), (0.495, 0.493, 0.721)),
]:
for got, expected in zip(hsl_to_rgb(*hsl), expected_rgb):
# Compensate for floating point errors and Wikipedia’s rounding:
self.assertLess(abs(got - expected), 0.001)
| 8,384 | Python | .py | 177 | 34.655367 | 85 | 0.352065 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,849 | media3.py | kovidgoyal_calibre/src/tinycss/tests/media3.py | #!/usr/bin/env python
# vim:fileencoding=utf-8
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
from tinycss.media3 import CSSMedia3Parser
from tinycss.media3 import MediaQuery as MQ
from tinycss.tests import BaseTest, jsonify
def jsonify_expr(e):
if e is None:
return None
return next(jsonify([e]))
def jsonify_expressions(mqlist):
for mq in mqlist:
mq.expressions = tuple(
(k, jsonify_expr(e)) for k, e in mq.expressions)
return mqlist
class TestFonts3(BaseTest):
def test_media_queries(self):
'Test parsing of media queries from the CSS 3 media module'
for css, media_query_list, expected_errors in [
# CSS 2.1 (simple media queries)
('@media {}', [MQ()], []),
('@media all {}', [MQ()], []),
('@media screen {}', [MQ('screen')], []),
('@media , screen {}', [MQ(), MQ('screen')], []),
('@media screen, {}', [MQ('screen'), MQ()], []),
# Examples from the CSS 3 specs
('@media screen and (color) {}', [MQ('screen', (('color', None),))], []),
('@media all and (min-width:500px) {}', [
MQ('all', (('min-width', ('DIMENSION', 500)),))], []),
('@media (min-width:500px) {}', [
MQ('all', (('min-width', ('DIMENSION', 500)),))], []),
('@media (orientation: portrait) {}', [
MQ('all', (('orientation', ('IDENT', 'portrait')),))], []),
('@media screen and (color), projection and (color) {}', [
MQ('screen', (('color', None),)), MQ('projection', (('color', None),)),], []),
('@media not screen and (color) {}', [
MQ('screen', (('color', None),), True)], []),
('@media only screen and (color) {}', [
MQ('screen', (('color', None),))], []),
('@media aural and (device-aspect-ratio: 16/9) {}', [
MQ('aural', (('device-aspect-ratio', ('RATIO', (16, 9))),))], []),
('@media (resolution: 166dpi) {}', [
MQ('all', (('resolution', ('DIMENSION', 166)),))], []),
('@media (min-resolution: 166DPCM) {}', [
MQ('all', (('min-resolution', ('DIMENSION', 166)),))], []),
# Malformed media queries
('@media (example, all,), speech {}', [MQ(negated=True), MQ('speech')], ['expected a :']),
('@media &test, screen {}', [MQ(negated=True), MQ('screen')], ['expected a media expression not a DELIM']),
]:
stylesheet = CSSMedia3Parser().parse_stylesheet(css)
self.assert_errors(stylesheet.errors, expected_errors)
self.ae(len(stylesheet.rules), 1)
rule = stylesheet.rules[0]
self.ae(jsonify_expressions(rule.media), media_query_list)
| 2,983 | Python | .py | 55 | 41.527273 | 123 | 0.493484 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,850 | decoding.py | kovidgoyal_calibre/src/tinycss/tests/decoding.py | #!/usr/bin/env python
# vim:fileencoding=utf-8
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
from tinycss.decoding import decode
from tinycss.tests import BaseTest
def params(css, encoding, use_bom=False, expect_error=False, **kwargs):
"""Nicer syntax to make a tuple."""
return css, encoding, use_bom, expect_error, kwargs
class TestDecoding(BaseTest):
def test_decoding(self):
for (css, encoding, use_bom, expect_error, kwargs) in [
params('', 'utf8'), # default to utf8
params('�', 'utf8'),
params('é', 'latin1'), # utf8 fails, fall back on ShiftJIS
params('£', 'ShiftJIS', expect_error=True),
params('£', 'ShiftJIS', protocol_encoding='Shift-JIS'),
params('£', 'ShiftJIS', linking_encoding='Shift-JIS'),
params('£', 'ShiftJIS', document_encoding='Shift-JIS'),
params('£', 'ShiftJIS', protocol_encoding='utf8',
document_encoding='ShiftJIS'),
params('@charset "utf8"; £', 'ShiftJIS', expect_error=True),
params('@charset "utf£8"; £', 'ShiftJIS', expect_error=True),
params('@charset "unknown-encoding"; £', 'ShiftJIS', expect_error=True),
params('@charset "utf8"; £', 'ShiftJIS', document_encoding='ShiftJIS'),
params('£', 'ShiftJIS', linking_encoding='utf8',
document_encoding='ShiftJIS'),
params('@charset "utf-32"; �', 'utf-32-be'),
params('@charset "Shift-JIS"; £', 'ShiftJIS'),
params('@charset "ISO-8859-8"; £', 'ShiftJIS', expect_error=True),
params('�', 'utf-16-le', expect_error=True), # no BOM
params('�', 'utf-16-le', use_bom=True),
params('�', 'utf-32-be', expect_error=True),
params('�', 'utf-32-be', use_bom=True),
params('�', 'utf-32-be', document_encoding='utf-32-be'),
params('�', 'utf-32-be', linking_encoding='utf-32-be'),
params('@charset "utf-32-le"; �', 'utf-32-be',
use_bom=True, expect_error=True),
# protocol_encoding takes precedence over @charset
params('@charset "ISO-8859-8"; £', 'ShiftJIS',
protocol_encoding='Shift-JIS'),
params('@charset "unknown-encoding"; £', 'ShiftJIS',
protocol_encoding='Shift-JIS'),
params('@charset "Shift-JIS"; £', 'ShiftJIS',
protocol_encoding='utf8'),
# @charset takes precedence over document_encoding
params('@charset "Shift-JIS"; £', 'ShiftJIS',
document_encoding='ISO-8859-8'),
# @charset takes precedence over linking_encoding
params('@charset "Shift-JIS"; £', 'ShiftJIS',
linking_encoding='ISO-8859-8'),
# linking_encoding takes precedence over document_encoding
params('£', 'ShiftJIS',
linking_encoding='Shift-JIS', document_encoding='ISO-8859-8'),
]:
if use_bom:
source = '\ufeff' + css
else:
source = css
css_bytes = source.encode(encoding)
result, result_encoding = decode(css_bytes, **kwargs)
if expect_error:
self.assertNotEqual(result, css)
else:
self.ae(result, css)
| 3,474 | Python | .py | 65 | 41.015385 | 85 | 0.555262 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,851 | css21.py | kovidgoyal_calibre/src/tinycss/tests/css21.py | #!/usr/bin/env python
# vim:fileencoding=utf-8
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
import io
import os
import tempfile
from tinycss.css21 import CSS21Parser
from tinycss.tests import BaseTest
from tinycss.tests.tokenizing import jsonify
class CoreParser(CSS21Parser):
"""A parser that always accepts unparsed at-rules."""
def parse_at_rule(self, rule, stylesheet_rules, errors, context):
return rule
def parse_bytes(css_bytes, kwargs):
return CSS21Parser().parse_stylesheet_bytes(css_bytes, **kwargs)
def parse_bytesio_file(css_bytes, kwargs):
css_file = io.BytesIO(css_bytes)
return CSS21Parser().parse_stylesheet_file(css_file, **kwargs)
def parse_filename(css_bytes, kwargs):
css_file = tempfile.NamedTemporaryFile(delete=False)
try:
css_file.write(css_bytes)
# Windows can not open the filename a second time while
# it is still open for writing.
css_file.close()
return CSS21Parser().parse_stylesheet_file(css_file.name, **kwargs)
finally:
os.remove(css_file.name)
class TestCSS21(BaseTest):
def test_bytes(self):
for (css_bytes, kwargs, expected_result, parse) in [
params + (parse,)
for parse in [parse_bytes, parse_bytesio_file, parse_filename]
for params in [
('@import "é";'.encode('utf8'), {}, 'é'),
('@import "é";'.encode('utf16'), {}, 'é'), # with a BOM
('@import "é";'.encode('latin1'), {}, 'é'),
('@import "£";'.encode('Shift-JIS'), {}, '\x81\x92'), # latin1 mojibake
('@charset "Shift-JIS";@import "£";'.encode('Shift-JIS'), {}, '£'),
(' @charset "Shift-JIS";@import "£";'.encode('Shift-JIS'), {},
'\x81\x92'),
('@import "£";'.encode('Shift-JIS'),
{'document_encoding': 'Shift-JIS'}, '£'),
('@import "£";'.encode('Shift-JIS'),
{'document_encoding': 'utf8'}, '\x81\x92'),
('@charset "utf8"; @import "£";'.encode('utf8'),
{'document_encoding': 'latin1'}, '£'),
# Mojibake yay!
(' @charset "utf8"; @import "é";'.encode('utf8'),
{'document_encoding': 'latin1'}, 'é'),
('@import "é";'.encode('utf8'), {'document_encoding': 'latin1'}, 'é'),
]
]:
stylesheet = parse(css_bytes, kwargs)
self.ae(stylesheet.rules[0].at_keyword, '@import')
self.ae(stylesheet.rules[0].uri, expected_result)
def test_at_rules(self):
for (css_source, expected_rules, expected_errors) in [
(' /* hey */\n', 0, []),
('foo {}', 1, []),
('foo{} @lipsum{} bar{}', 2,
['unknown at-rule in stylesheet context: @lipsum']),
('@charset "ascii"; foo {}', 1, []),
(' @charset "ascii"; foo {}', 1, ['mis-placed or malformed @charset rule']),
('@charset ascii; foo {}', 1, ['mis-placed or malformed @charset rule']),
('foo {} @charset "ascii";', 1, ['mis-placed or malformed @charset rule']),
]:
# Pass 'encoding' to allow @charset
stylesheet = CSS21Parser().parse_stylesheet(css_source, encoding='utf8')
self.assert_errors(stylesheet.errors, expected_errors)
self.ae(len(stylesheet.rules), expected_rules)
def test_core_parser(self):
for (css_source, expected_rules, expected_errors) in [
(' /* hey */\n', [], []),
('foo{} /* hey */\n@bar;@baz{}',
[('foo', []), ('@bar', [], None), ('@baz', [], [])], []),
('@import "foo.css"/**/;', [
('@import', [('STRING', 'foo.css')], None)], []),
('@import "foo.css"/**/', [
('@import', [('STRING', 'foo.css')], None)], []),
('@import "foo.css', [
('@import', [('STRING', 'foo.css')], None)], []),
('{}', [], ['empty selector']),
('a{b:4}', [('a', [('b', [('INTEGER', 4)])])], []),
('@page {\t b: 4; @margin}', [('@page', [], [
('S', '\t '), ('IDENT', 'b'), (':', ':'), ('S', ' '), ('INTEGER', 4),
(';', ';'), ('S', ' '), ('ATKEYWORD', '@margin'),
])], []),
('foo', [], ['no declaration block found']),
('foo @page {} bar {}', [('bar', [])],
['unexpected ATKEYWORD token in selector']),
('foo { content: "unclosed string;\n color:red; ; margin/**/\n: 2cm; }',
[('foo', [('margin', [('DIMENSION', 2)])])],
['unexpected BAD_STRING token in property value']),
('foo { 4px; bar: 12% }',
[('foo', [('bar', [('PERCENTAGE', 12)])])],
['expected a property name, got DIMENSION']),
('foo { bar! 3cm auto ; baz: 7px }',
[('foo', [('baz', [('DIMENSION', 7)])])],
["expected ':', got DELIM"]),
('foo { bar ; baz: {("}"/* comment */) {0@fizz}} }',
[('foo', [('baz', [('{', [
('(', [('STRING', '}')]), ('S', ' '),
('{', [('INTEGER', 0), ('ATKEYWORD', '@fizz')])
])])])],
["expected ':'"]),
('foo { bar: ; baz: not(z) }',
[('foo', [('baz', [('FUNCTION', 'not', [('IDENT', 'z')])])])],
['expected a property value']),
('foo { bar: (]) ; baz: U+20 }',
[('foo', [('baz', [('UNICODE-RANGE', 'U+20')])])],
['unmatched ] token in (']),
]:
stylesheet = CoreParser().parse_stylesheet(css_source)
self.assert_errors(stylesheet.errors, expected_errors)
result = [
(rule.at_keyword, list(jsonify(rule.head)),
list(jsonify(rule.body))
if rule.body is not None else None)
if rule.at_keyword else
(rule.selector.as_css(), [
(decl.name, list(jsonify(decl.value)))
for decl in rule.declarations])
for rule in stylesheet.rules
]
self.ae(result, expected_rules)
def test_parse_style_attr(self):
for (css_source, expected_declarations, expected_errors) in [
(' /* hey */\n', [], []),
('b:4', [('b', [('INTEGER', 4)])], []),
('{b:4}', [], ['expected a property name, got {']),
('b:4} c:3', [], ['unmatched } token in property value']),
(' 4px; bar: 12% ',
[('bar', [('PERCENTAGE', 12)])],
['expected a property name, got DIMENSION']),
('bar! 3cm auto ; baz: 7px',
[('baz', [('DIMENSION', 7)])],
["expected ':', got DELIM"]),
('foo; bar ; baz: {("}"/* comment */) {0@fizz}}',
[('baz', [('{', [
('(', [('STRING', '}')]), ('S', ' '),
('{', [('INTEGER', 0), ('ATKEYWORD', '@fizz')])
])])],
["expected ':'", "expected ':'"]),
('bar: ; baz: not(z)',
[('baz', [('FUNCTION', 'not', [('IDENT', 'z')])])],
['expected a property value']),
('bar: (]) ; baz: U+20',
[('baz', [('UNICODE-RANGE', 'U+20')])],
['unmatched ] token in (']),
]:
declarations, errors = CSS21Parser().parse_style_attr(css_source)
self.assert_errors(errors, expected_errors)
result = [(decl.name, list(jsonify(decl.value)))
for decl in declarations]
self.ae(result, expected_declarations)
def test_important(self):
for (css_source, expected_declarations, expected_errors) in [
(' /* hey */\n', [], []),
('a:1; b:2',
[('a', [('INTEGER', 1)], None), ('b', [('INTEGER', 2)], None)], []),
('a:1 important; b: important',
[('a', [('INTEGER', 1), ('S', ' '), ('IDENT', 'important')], None),
('b', [('IDENT', 'important')], None)],
[]),
('a:1 !important; b:2',
[('a', [('INTEGER', 1)], 'important'), ('b', [('INTEGER', 2)], None)],
[]),
('a:1!\t Im\\50 O\\RTant; b:2',
[('a', [('INTEGER', 1)], 'important'), ('b', [('INTEGER', 2)], None)],
[]),
('a: !important; b:2',
[('b', [('INTEGER', 2)], None)],
['expected a value before !important']),
]:
declarations, errors = CSS21Parser().parse_style_attr(css_source)
self.assert_errors(errors, expected_errors)
result = [(decl.name, list(jsonify(decl.value)), decl.priority)
for decl in declarations]
self.ae(result, expected_declarations)
def test_at_import(self):
for (css_source, expected_rules, expected_errors) in [
(' /* hey */\n', [], []),
('@import "foo.css";', [('foo.css', ['all'])], []),
('@import url(foo.css);', [('foo.css', ['all'])], []),
('@import "foo.css" screen, print;',
[('foo.css', ['screen', 'print'])], []),
('@charset "ascii"; @import "foo.css"; @import "bar.css";',
[('foo.css', ['all']), ('bar.css', ['all'])], []),
('foo {} @import "foo.css";',
[], ['@import rule not allowed after a ruleset']),
('@page {} @import "foo.css";',
[], ['@import rule not allowed after an @page rule']),
('@import ;',
[], ['expected URI or STRING for @import rule']),
('@import foo.css;',
[], ['expected URI or STRING for @import rule, got IDENT']),
('@import "foo.css" {}',
[], ["expected ';', got a block"]),
]:
# Pass 'encoding' to allow @charset
stylesheet = CSS21Parser().parse_stylesheet(css_source, encoding='utf8')
self.assert_errors(stylesheet.errors, expected_errors)
result = [
(rule.uri, rule.media)
for rule in stylesheet.rules
if rule.at_keyword == '@import'
]
self.ae(result, expected_rules)
def test_at_page(self):
for (css, expected_result, expected_errors) in [
('@page {}', (None, (0, 0), []), []),
('@page:first {}', ('first', (1, 0), []), []),
('@page :left{}', ('left', (0, 1), []), []),
('@page\t\n:right {}', ('right', (0, 1), []), []),
('@page :last {}', None, ['invalid @page selector']),
('@page : right {}', None, ['invalid @page selector']),
('@page table:left {}', None, ['invalid @page selector']),
('@page;', None, ['invalid @page rule: missing block']),
('@page { a:1; ; b: 2 }',
(None, (0, 0), [('a', [('INTEGER', 1)]), ('b', [('INTEGER', 2)])]),
[]),
('@page { a:1; c: ; b: 2 }',
(None, (0, 0), [('a', [('INTEGER', 1)]), ('b', [('INTEGER', 2)])]),
['expected a property value']),
('@page { a:1; @top-left {} b: 2 }',
(None, (0, 0), [('a', [('INTEGER', 1)]), ('b', [('INTEGER', 2)])]),
['unknown at-rule in @page context: @top-left']),
('@page { a:1; @top-left {}; b: 2 }',
(None, (0, 0), [('a', [('INTEGER', 1)]), ('b', [('INTEGER', 2)])]),
['unknown at-rule in @page context: @top-left']),
]:
stylesheet = CSS21Parser().parse_stylesheet(css)
self.assert_errors(stylesheet.errors, expected_errors)
if expected_result is None:
self.assertFalse(stylesheet.rules)
else:
self.ae(len(stylesheet.rules), 1)
rule = stylesheet.rules[0]
self.ae(rule.at_keyword, '@page')
self.ae(rule.at_rules, []) # in CSS 2.1
result = (
rule.selector,
rule.specificity,
[(decl.name, list(jsonify(decl.value)))
for decl in rule.declarations],
)
self.ae(result, expected_result)
def test_at_media(self):
for (css_source, expected_rules, expected_errors) in [
(' /* hey */\n', [], []),
('@media {}', [(['all'], [])], []),
('@media all {}', [(['all'], [])], []),
('@media screen, print {}', [(['screen', 'print'], [])], []),
('@media all;', [], ['invalid @media rule: missing block']),
('@media 4 {}', [], ['expected a media type, got INTEGER']),
('@media , screen {}', [], ['expected a media type']),
('@media screen, {}', [], ['expected a media type']),
('@media screen print {}', [],
['expected a media type, got IDENT, IDENT']),
('@media all { @page { a: 1 } @media; @import; foo { a: 1 } }',
[(['all'], [('foo', [('a', [('INTEGER', 1)])])])],
['@page rule not allowed in @media',
'@media rule not allowed in @media',
'@import rule not allowed in @media']),
]:
stylesheet = CSS21Parser().parse_stylesheet(css_source)
self.assert_errors(stylesheet.errors, expected_errors)
for rule in stylesheet.rules:
self.ae(rule.at_keyword, '@media')
result = [
(rule.media, [
(sub_rule.selector.as_css(), [
(decl.name, list(jsonify(decl.value)))
for decl in sub_rule.declarations])
for sub_rule in rule.rules
])
for rule in stylesheet.rules
]
self.ae(result, expected_rules)
| 14,361 | Python | .py | 281 | 36.75089 | 92 | 0.442548 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,852 | __init__.py | kovidgoyal_calibre/src/tinycss/tests/__init__.py | #!/usr/bin/env python
# vim:fileencoding=utf-8
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
import unittest
def jsonify(tokens):
"""Turn tokens into "JSON-compatible" data structures."""
for token in tokens:
if token.type == 'FUNCTION':
yield (token.type, token.function_name,
list(jsonify(token.content)))
elif token.is_container:
yield token.type, list(jsonify(token.content))
else:
yield token.type, token.value
class BaseTest(unittest.TestCase):
longMessage = True
maxDiff = None
ae = unittest.TestCase.assertEqual
def assert_errors(self, errors, expected_errors):
"""Test not complete error messages but only substrings."""
self.ae(len(errors), len(expected_errors))
for error, expected in zip(errors, expected_errors):
self.assertIn(expected, type(u'')(error))
def jsonify_declarations(self, rule):
return [(decl.name, list(jsonify(decl.value)))
for decl in rule.declarations]
| 1,100 | Python | .py | 27 | 33.185185 | 67 | 0.65381 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,853 | fonts3.py | kovidgoyal_calibre/src/tinycss/tests/fonts3.py | #!/usr/bin/env python
# vim:fileencoding=utf-8
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
from polyglot.builtins import iteritems
from tinycss.fonts3 import CSSFonts3Parser, parse_font, parse_font_family, serialize_font
from tinycss.tests import BaseTest
class TestFonts3(BaseTest):
def test_font_face(self):
'Test parsing of font face rules'
for css, expected_declarations, expected_errors in [
('@font-face {}', [], []),
('@font-face { font-family: Moose; src: url(font1.ttf) }',
[('font-family', [('IDENT', 'Moose')]), ('src', [('URI', 'font1.ttf')])], []),
]:
stylesheet = CSSFonts3Parser().parse_stylesheet(css)
self.assert_errors(stylesheet.errors, expected_errors)
self.ae(len(stylesheet.rules), 1)
rule = stylesheet.rules[0]
self.ae(self.jsonify_declarations(rule), expected_declarations)
stylesheet = CSSFonts3Parser().parse_stylesheet('@font-face;')
self.assert_errors(stylesheet.errors, ['missing block'])
def test_parse_font_family(self):
' Test parsing of font-family values '
for raw, q in iteritems({
'"1as"': ['1as'],
'A B C, serif': ['A B C', 'serif'],
r'Red\/Black': ['Red/Black'],
'A B': ['A B'],
r'Ahem\!': ['Ahem!'],
r'"Ahem!"': ['Ahem!'],
'€42': ['€42'],
r'Hawaii\ 5-0': ['Hawaii 5-0'],
r'"X \"Y"': ['X "Y'],
'A B, C D, "E", serif': ['A B', 'C D', 'E', 'serif'],
'': [],
'"", a': ['a'],
}):
self.ae(q, parse_font_family(raw))
for single in ('serif', 'sans-serif', 'A B C'):
self.ae([single], parse_font_family(single))
def test_parse_font(self):
def t(raw, **kw):
q = {('line' if k == 'height' else 'font') + '-' + k:v for k, v in iteritems(kw)}
self.ae(q, parse_font(raw))
self.ae(q, parse_font(serialize_font(q)))
t('caption', family=['sans-serif'])
t('serif', family=['serif'])
t('12pt/14pt sans-serif', size='12pt', height='14pt', family=['sans-serif'])
t('80% sans-serif', size='80%', family=['sans-serif'])
t('x-large/110% "new century schoolbook", serif', size='x-large', height='110%', family=['new century schoolbook', 'serif'])
t('bold italic large Palatino, serif', weight='bold', style='italic', size='large', family=['Palatino', 'serif'])
t('normal small-caps 120%/120% fantasy', style='normal', variant='small-caps', size='120%', height='120%', family=['fantasy'])
t('condensed oblique 12pt Helvetica Neue, serif', stretch='condensed', style='oblique', size='12pt', family=['Helvetica Neue', 'serif'])
t('300 italic 1.3em/1.7em FB Armada, sans-serif', weight='300', style='italic', size='1.3em', height='1.7em', family=['FB Armada', 'sans-serif'])
| 3,066 | Python | .py | 55 | 44.945455 | 153 | 0.551218 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,854 | page3.py | kovidgoyal_calibre/src/tinycss/tests/page3.py | #!/usr/bin/env python
# vim:fileencoding=utf-8
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
from tinycss.page3 import CSSPage3Parser
from tinycss.tests import BaseTest
class TestPage3(BaseTest):
def test_selectors(self):
for css, expected_selector, expected_specificity, expected_errors in [
('@page {}', (None, None), (0, 0, 0), []),
('@page :first {}', (None, 'first'), (0, 1, 0), []),
('@page:left{}', (None, 'left'), (0, 0, 1), []),
('@page :right {}', (None, 'right'), (0, 0, 1), []),
('@page :blank{}', (None, 'blank'), (0, 1, 0), []),
('@page :last {}', None, None, ['invalid @page selector']),
('@page : first {}', None, None, ['invalid @page selector']),
('@page foo:first {}', ('foo', 'first'), (1, 1, 0), []),
('@page bar :left {}', ('bar', 'left'), (1, 0, 1), []),
(r'@page \26:right {}', ('&', 'right'), (1, 0, 1), []),
('@page foo {}', ('foo', None), (1, 0, 0), []),
(r'@page \26 {}', ('&', None), (1, 0, 0), []),
('@page foo fist {}', None, None, ['invalid @page selector']),
('@page foo, bar {}', None, None, ['invalid @page selector']),
('@page foo&first {}', None, None, ['invalid @page selector']),
]:
stylesheet = CSSPage3Parser().parse_stylesheet(css)
self.assert_errors(stylesheet.errors, expected_errors)
if stylesheet.rules:
self.ae(len(stylesheet.rules), 1)
rule = stylesheet.rules[0]
self.ae(rule.at_keyword, '@page')
selector = rule.selector
self.ae(rule.specificity, expected_specificity)
else:
selector = None
self.ae(selector, expected_selector)
def test_content(self):
for css, expected_declarations, expected_rules, expected_errors in [
('@page {}', [], [], []),
('@page { foo: 4; bar: z }',
[('foo', [('INTEGER', 4)]), ('bar', [('IDENT', 'z')])], [], []),
('''@page { foo: 4;
@top-center { content: "Awesome Title" }
@bottom-left { content: counter(page) }
bar: z
}''',
[('foo', [('INTEGER', 4)]), ('bar', [('IDENT', 'z')])],
[('@top-center', [('content', [('STRING', 'Awesome Title')])]),
('@bottom-left', [('content', [
('FUNCTION', 'counter', [('IDENT', 'page')])])])],
[]),
('''@page { foo: 4;
@bottom-top { content: counter(page) }
bar: z
}''',
[('foo', [('INTEGER', 4)]), ('bar', [('IDENT', 'z')])],
[],
['unknown at-rule in @page context: @bottom-top']),
('@page{} @top-right{}', [], [], [
'@top-right rule not allowed in stylesheet']),
('@page{ @top-right 4 {} }', [], [], [
'unexpected INTEGER token in @top-right rule header']),
# Not much error recovery tests here. This should be covered in test_css21
]:
stylesheet = CSSPage3Parser().parse_stylesheet(css)
self.assert_errors(stylesheet.errors, expected_errors)
self.ae(len(stylesheet.rules), 1)
rule = stylesheet.rules[0]
self.ae(rule.at_keyword, '@page')
self.ae(self.jsonify_declarations(rule), expected_declarations)
rules = [(margin_rule.at_keyword, self.jsonify_declarations(margin_rule))
for margin_rule in rule.at_rules]
self.ae(rules, expected_rules)
| 3,818 | Python | .py | 73 | 38.671233 | 86 | 0.469579 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,855 | main.py | kovidgoyal_calibre/src/tinycss/tests/main.py | #!/usr/bin/env python
# vim:fileencoding=utf-8
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
import argparse
import unittest
def find_tests():
from calibre.utils.run_tests import find_tests_in_package
return find_tests_in_package('tinycss.tests')
def run_tests(find_tests=find_tests, for_build=False):
if not for_build:
parser = argparse.ArgumentParser()
parser.add_argument('name', nargs='?', default=None,
help='The name of the test to run')
args = parser.parse_args()
if not for_build and args.name and args.name.startswith('.'):
tests = find_tests()
q = args.name[1:]
if not q.startswith('test_'):
q = 'test_' + q
ans = None
try:
for suite in tests:
for test in suite._tests:
if test.__class__.__name__ == 'ModuleImportFailure':
raise Exception('Failed to import a test module: %s' % test)
for s in test:
if s._testMethodName == q:
ans = s
raise StopIteration()
except StopIteration:
pass
if ans is None:
print('No test named %s found' % args.name)
raise SystemExit(1)
tests = ans
else:
tests = unittest.defaultTestLoader.loadTestsFromName(args.name) if not for_build and args.name else find_tests()
r = unittest.TextTestRunner
if for_build:
r = r(verbosity=0, buffer=True, failfast=True)
else:
r = r(verbosity=4)
result = r.run(tests)
if for_build and result.errors or result.failures:
raise SystemExit(1)
if __name__ == '__main__':
run_tests()
| 1,810 | Python | .py | 48 | 27.9375 | 120 | 0.571021 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,856 | tokenizing.py | kovidgoyal_calibre/src/tinycss/tests/tokenizing.py | #!/usr/bin/env python
# vim:fileencoding=utf-8
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
from tinycss.tests import BaseTest, jsonify
from tinycss.tokenizer import c_tokenize_flat, python_tokenize_flat, regroup
if c_tokenize_flat is None:
tokenizers = (python_tokenize_flat,)
else:
tokenizers = (python_tokenize_flat, c_tokenize_flat)
def token_api(self, tokenize):
for css_source in [
'(8, foo, [z])', '[8, foo, (z)]', '{8, foo, [z]}', 'func(8, foo, [z])'
]:
tokens = list(regroup(tokenize(css_source)))
self.ae(len(tokens), 1)
self.ae(len(tokens[0].content), 7)
def token_serialize_css(self, tokenize):
for tokenize in tokenizers:
for css_source in [
r'''p[example="\
foo(int x) {\
this.x = x;\
}\
"]''',
'"Lorem\\26Ipsum\ndolor" sit',
'/* Lorem\nipsum */\fa {\n color: red;\tcontent: "dolor\\\fsit" }',
'not([[lorem]]{ipsum (42)})',
'a[b{d]e}',
'a[b{"d',
]:
for _regroup in (regroup, lambda x: x):
tokens = _regroup(tokenize(css_source, ignore_comments=False))
result = ''.join(token.as_css() for token in tokens)
self.ae(result, css_source)
def comments(self, tokenize):
for ignore_comments, expected_tokens in [
(False, [
('COMMENT', '/* lorem */'),
('S', ' '),
('IDENT', 'ipsum'),
('[', [
('IDENT', 'dolor'),
('COMMENT', '/* sit */'),
]),
('BAD_COMMENT', '/* amet')
]),
(True, [
('S', ' '),
('IDENT', 'ipsum'),
('[', [
('IDENT', 'dolor'),
]),
]),
]:
css_source = '/* lorem */ ipsum[dolor/* sit */]/* amet'
tokens = regroup(tokenize(css_source, ignore_comments))
result = list(jsonify(tokens))
self.ae(result, expected_tokens)
def token_grouping(self, tokenize):
for css_source, expected_tokens in [
('', []),
(r'Lorem\26 "i\psum"4px', [
('IDENT', 'Lorem&'), ('STRING', 'ipsum'), ('DIMENSION', 4)]),
('not([[lorem]]{ipsum (42)})', [
('FUNCTION', 'not', [
('[', [
('[', [
('IDENT', 'lorem'),
]),
]),
('{', [
('IDENT', 'ipsum'),
('S', ' '),
('(', [
('INTEGER', 42),
])
])
])]),
# Close everything at EOF, no error
('a[b{"d', [
('IDENT', 'a'),
('[', [
('IDENT', 'b'),
('{', [
('STRING', 'd'),
]),
]),
]),
# Any remaining ), ] or } token is a nesting error
('a[b{d]e}', [
('IDENT', 'a'),
('[', [
('IDENT', 'b'),
('{', [
('IDENT', 'd'),
(']', ']'), # The error is visible here
('IDENT', 'e'),
]),
]),
]),
# ref:
('a[b{d}e]', [
('IDENT', 'a'),
('[', [
('IDENT', 'b'),
('{', [
('IDENT', 'd'),
]),
('IDENT', 'e'),
]),
]),
]:
tokens = regroup(tokenize(css_source, ignore_comments=False))
result = list(jsonify(tokens))
self.ae(result, expected_tokens)
def positions(self, tokenize):
css = '/* Lorem\nipsum */\fa {\n color: red;\tcontent: "dolor\\\fsit" }'
tokens = tokenize(css, ignore_comments=False)
result = [(token.type, token.line, token.column) for token in tokens]
self.ae(result, [
('COMMENT', 1, 1), ('S', 2, 9),
('IDENT', 3, 1), ('S', 3, 2), ('{', 3, 3),
('S', 3, 4), ('IDENT', 4, 5), (':', 4, 10),
('S', 4, 11), ('IDENT', 4, 12), (';', 4, 15), ('S', 4, 16),
('IDENT', 4, 17), (':', 4, 24), ('S', 4, 25), ('STRING', 4, 26),
('S', 5, 5), ('}', 5, 6)])
def tokens(self, tokenize):
for css_source, expected_tokens in [
('', []),
('red -->',
[('IDENT', 'red'), ('S', ' '), ('CDC', '-->')]),
# Longest match rule: no CDC
('red-->',
[('IDENT', 'red--'), ('DELIM', '>')]),
(r'''p[example="\
foo(int x) {\
this.x = x;\
}\
"]''', [
('IDENT', 'p'),
('[', '['),
('IDENT', 'example'),
('DELIM', '='),
('STRING', 'foo(int x) { this.x = x;}'),
(']', ']')]),
# Numbers are parsed
('42 .5 -4pX 1.25em 30%',
[('INTEGER', 42), ('S', ' '),
('NUMBER', .5), ('S', ' '),
# units are normalized to lower-case:
('DIMENSION', -4, 'px'), ('S', ' '),
('DIMENSION', 1.25, 'em'), ('S', ' '),
('PERCENTAGE', 30, '%')]),
# URLs are extracted
('url(foo.png)', [('URI', 'foo.png')]),
('url("foo.png")', [('URI', 'foo.png')]),
# Escaping
(r'/* Comment with a \ backslash */',
[('COMMENT', r'/* Comment with a \ backslash */')]), # Unchanged
# backslash followed by a newline in a string: ignored
('"Lorem\\\nIpsum"', [('STRING', 'LoremIpsum')]),
# backslash followed by a newline outside a string: stands for itself
('Lorem\\\nIpsum', [
('IDENT', 'Lorem'), ('DELIM', '\\'),
('S', '\n'), ('IDENT', 'Ipsum')]),
# Cancel the meaning of special characters
(r'"Lore\m Ipsum"', [('STRING', 'Lorem Ipsum')]), # or not special
(r'"Lorem \49psum"', [('STRING', 'Lorem Ipsum')]),
(r'"Lorem \49 psum"', [('STRING', 'Lorem Ipsum')]),
(r'"Lorem\"Ipsum"', [('STRING', 'Lorem"Ipsum')]),
(r'"Lorem\\Ipsum"', [('STRING', r'Lorem\Ipsum')]),
(r'"Lorem\5c Ipsum"', [('STRING', r'Lorem\Ipsum')]),
(r'Lorem\+Ipsum', [('IDENT', 'Lorem+Ipsum')]),
(r'Lorem+Ipsum', [('IDENT', 'Lorem'), ('DELIM', '+'), ('IDENT', 'Ipsum')]),
(r'url(foo\).png)', [('URI', 'foo).png')]),
# Unicode and backslash escaping
('\\26 B', [('IDENT', '&B')]),
('\\&B', [('IDENT', '&B')]),
('@\\26\tB', [('ATKEYWORD', '@&B')]),
('@\\&B', [('ATKEYWORD', '@&B')]),
('#\\26\nB', [('HASH', '#&B')]),
('#\\&B', [('HASH', '#&B')]),
('\\26\r\nB(', [('FUNCTION', '&B(')]),
('\\&B(', [('FUNCTION', '&B(')]),
(r'12.5\000026B', [('DIMENSION', 12.5, '&b')]),
(r'12.5\0000263B', [('DIMENSION', 12.5, '&3b')]), # max 6 digits
(r'12.5\&B', [('DIMENSION', 12.5, '&b')]),
(r'"\26 B"', [('STRING', '&B')]),
(r"'\000026B'", [('STRING', '&B')]),
(r'"\&B"', [('STRING', '&B')]),
(r'url("\26 B")', [('URI', '&B')]),
(r'url(\26 B)', [('URI', '&B')]),
(r'url("\&B")', [('URI', '&B')]),
(r'url(\&B)', [('URI', '&B')]),
(r'Lorem\110000Ipsum', [('IDENT', 'Lorem\uFFFDIpsum')]),
# Bad strings
# String ends at EOF without closing: no error, parsed
('"Lorem\\26Ipsum', [('STRING', 'Lorem&Ipsum')]),
# Unescaped newline: ends the string, error, unparsed
('"Lorem\\26Ipsum\n', [
('BAD_STRING', r'"Lorem\26Ipsum'), ('S', '\n')]),
# Tokenization restarts after the newline, so the second " starts
# a new string (which ends at EOF without errors, as above.)
('"Lorem\\26Ipsum\ndolor" sit', [
('BAD_STRING', r'"Lorem\26Ipsum'), ('S', '\n'),
('IDENT', 'dolor'), ('STRING', ' sit')]),
]:
sources = [css_source]
for css_source in sources:
tokens = tokenize(css_source, ignore_comments=False)
result = [
(token.type, token.value) + (
() if token.unit is None else (token.unit,))
for token in tokens
]
self.ae(result, expected_tokens)
class TestTokenizer(BaseTest):
def run_test(self, func):
for tokenize in tokenizers:
func(self, tokenize)
def test_token_api(self):
self.run_test(token_api)
def test_token_serialize_css(self):
self.run_test(token_serialize_css)
def test_comments(self):
self.run_test(comments)
def test_token_grouping(self):
self.run_test(token_grouping)
def test_positions(self):
"""Test the reported line/column position of each token."""
self.run_test(positions)
def test_tokens(self):
self.run_test(tokens)
| 8,863 | Python | .py | 234 | 27.474359 | 83 | 0.430318 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,857 | planet_python.recipe | kovidgoyal_calibre/recipes/planet_python.recipe | from calibre.web.feeds.news import AutomaticNewsRecipe
class BasicUserRecipe(AutomaticNewsRecipe):
title = u'Planet Python'
language = 'en'
__author__ = 'Jelle van der Waa'
oldest_article = 10
max_articles_per_feed = 100
feeds = [(u'Planet Python', u'http://planetpython.org/rss20.xml')]
| 314 | Python | .py | 8 | 35 | 70 | 0.713816 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,858 | default_tweaks.py | kovidgoyal_calibre/resources/default_tweaks.py | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
# License: GPLv3 Copyright: 2010, Kovid Goyal <kovid at kovidgoyal.net>
# Contains various tweaks that affect calibre behavior. Only edit this file if
# you know what you are doing. If you delete this file, it will be recreated from
# defaults.
#: Auto increment series index
# The algorithm used to assign a book added to an existing series a series number.
# New series numbers assigned using this tweak are always integer values, except
# if a constant non-integer is specified.
# Possible values are:
# next - First available integer larger than the largest existing number
# first_free - First available integer larger than 0
# next_free - First available integer larger than the smallest existing number
# last_free - First available integer smaller than the largest existing number. Return largest existing + 1 if no free number is found
# const - Assign the number 1 always
# no_change - Do not change the series index
# a number - Assign that number always. The number is not in quotes. Note that 0.0 can be used here.
# Examples:
# series_index_auto_increment = 'next'
# series_index_auto_increment = 'next_free'
# series_index_auto_increment = 16.5
#
# Set the use_series_auto_increment_tweak_when_importing tweak to True to
# use the above values when importing/adding books. If this tweak is set to
# False (the default) then the series number will be set to 1 if it is not
# explicitly set during the import. If set to True, then the
# series index will be set according to the series_index_auto_increment setting.
# Note that the use_series_auto_increment_tweak_when_importing tweak is used
# only when a value is not provided during import. If the importing regular
# expression produces a value for series_index, or if you are reading metadata
# from books and the import plugin produces a value, then that value will
# be used irrespective of the setting of the tweak.
series_index_auto_increment = 'next'
use_series_auto_increment_tweak_when_importing = False
#: Add separator after completing an author name
# Set this if the completion separator should be appended to the end of the
# completed text to automatically begin a new completion operation for authors.
# It can be either True or False
authors_completer_append_separator = False
#: Author sort name algorithm
# The algorithm used to copy author to author_sort.
# Possible values are:
# invert: use "fn ln" -> "ln, fn"
# copy : copy author to author_sort without modification
# comma : use 'copy' if there is a ',' in the name, otherwise use 'invert'
# nocomma : "fn ln" -> "ln fn" (without the comma)
# When this tweak is changed, the author_sort values stored with each author
# must be recomputed by right-clicking on an author in the left-hand tags
# panel, selecting 'Manage authors', and pressing
# 'Recalculate all author sort values'.
#
# The author_name_suffixes are words that are ignored when they occur at the
# end of an author name. The case of the suffix is ignored and trailing
# periods are automatically handled.
#
# The same is true for author_name_prefixes.
#
# The author_name_copywords are a set of words which, if they occur in an
# author name, cause the automatically generated author sort string to be
# identical to the author's name. This means that the sort for a string like
# "Acme Inc." will be "Acme Inc." instead of "Inc., Acme".
#
# If author_use_surname_prefixes is enabled, any of the words in
# author_surname_prefixes will be treated as a prefix to the surname, if they
# occur before the surname. So for example, "John von Neumann" would be sorted
# as "von Neumann, John" and not "Neumann, John von".
author_sort_copy_method = 'comma'
author_name_suffixes = ('Jr', 'Sr', 'Inc', 'Ph.D', 'Phd',
'MD', 'M.D', 'I', 'II', 'III', 'IV',
'Junior', 'Senior')
author_name_prefixes = ('Mr', 'Mrs', 'Ms', 'Dr', 'Prof')
author_name_copywords = (
'Agency', 'Corporation', 'Company', 'Co.', 'Council',
'Committee', 'Inc.', 'Institute', 'National', 'Society', 'Club', 'Team',
'Software', 'Games', 'Entertainment', 'Media', 'Studios',
)
author_use_surname_prefixes = False
author_surname_prefixes = ('da', 'de', 'di', 'la', 'le', 'van', 'von')
#: Splitting multiple author names
# By default, calibre splits a string containing multiple author names on
# ampersands and the words "and" and "with". You can customize the splitting
# by changing the regular expression below. Strings are split on whatever the
# specified regular expression matches, in addition to ampersands.
# Default: r'(?i),?\s+(and|with)\s+'
authors_split_regex = r'(?i),?\s+(and|with)\s+'
#: Use author sort in Tag browser
# Set which author field to display in the Tag browser (the list of authors,
# series, publishers etc on the left hand side). The choices are author and
# author_sort. This tweak affects only what is displayed under the authors
# category in the Tag browser and Content server. Please note that if you set this
# to author_sort, it is very possible to see duplicate names in the list because
# although it is guaranteed that author names are unique, there is no such
# guarantee for author_sort values. Showing duplicates won't break anything, but
# it could lead to some confusion. When using 'author_sort', the tooltip will
# show the author's name.
# Examples:
# categories_use_field_for_author_name = 'author'
# categories_use_field_for_author_name = 'author_sort'
categories_use_field_for_author_name = 'author'
#: Control partitioning of Tag browser
# When partitioning the Tag browser, the format of the subcategory label is
# controlled by a template: categories_collapsed_name_template if sorting by
# name, categories_collapsed_rating_template if sorting by average rating, and
# categories_collapsed_popularity_template if sorting by popularity. There are
# two variables available to the template: first and last. The variable 'first'
# is the initial item in the subcategory, and the variable 'last' is the final
# item in the subcategory. Both variables are 'objects'; they each have multiple
# values that are obtained by using a suffix. For example, first.name for an
# author category will be the name of the author. The sub-values available are:
# name: the printable name of the item
# count: the number of books that references this item
# avg_rating: the average rating of all the books referencing this item
# sort: the sort value. For authors, this is the author_sort for that author
# category: the category (e.g., authors, series) that the item is in.
# Note that the "r'" in front of the { is necessary if there are backslashes
# (\ characters) in the template. It doesn't hurt anything to leave it there
# even if there aren't any backslashes.
categories_collapsed_name_template = r'{first.sort:shorten(4,,0)} - {last.sort:shorten(4,,0)}'
categories_collapsed_rating_template = r'{first.avg_rating:4.2f:ifempty(0)} - {last.avg_rating:4.2f:ifempty(0)}'
categories_collapsed_popularity_template = r'{first.count:d} - {last.count:d}'
#: Specify columns to sort the booklist by on startup
# Provide a set of columns to be sorted on when calibre starts.
# The argument is None if saved sort history is to be used
# otherwise it is a list of column,order pairs. Column is the
# lookup/search name, found using the tooltip for the column
# Order is 0 for ascending, 1 for descending.
# For example, set it to [('authors',0),('title',0)] to sort by
# title within authors.
sort_columns_at_startup = None
#: Control how dates are displayed
# Format to be used for publication date and the timestamp (date).
# A string controlling how the publication date is displayed in the GUI
# d the day as number without a leading zero (1 to 31)
# dd the day as number with a leading zero (01 to 31)
# ddd the abbreviated localized day name (e.g. 'Mon' to 'Sun').
# dddd the long localized day name (e.g. 'Monday' to 'Sunday').
# M the month as number without a leading zero (1-12)
# MM the month as number with a leading zero (01-12)
# MMM the abbreviated localized month name (e.g. 'Jan' to 'Dec').
# MMMM the long localized month name (e.g. 'January' to 'December').
# yy the year as two digit number (00-99)
# yyyy the year as four digit number
# h the hours without a leading 0 (0 to 11 or 0 to 23, depending on am/pm) '
# hh the hours with a leading 0 (00 to 11 or 00 to 23, depending on am/pm) '
# m the minutes without a leading 0 (0 to 59) '
# mm the minutes with a leading 0 (00 to 59) '
# s the seconds without a leading 0 (0 to 59) '
# ss the seconds with a leading 0 (00 to 59) '
# ap use a 12-hour clock instead of a 24-hour clock, with "ap" replaced by the localized string for am or pm
# AP use a 12-hour clock instead of a 24-hour clock, with "AP" replaced by the localized string for AM or PM
# iso the date with time and timezone. Must be the only format present
# For example, given the date of 9 Jan 2010, the following formats show
# MMM yyyy ==> Jan 2010 yyyy ==> 2010 dd MMM yyyy ==> 09 Jan 2010
# MM/yyyy ==> 01/2010 d/M/yy ==> 9/1/10 yy ==> 10
#
# publication default if not set: MMM yyyy
# timestamp default if not set: dd MMM yyyy
# last_modified_display_format if not set: dd MMM yyyy
gui_pubdate_display_format = 'MMM yyyy'
gui_timestamp_display_format = 'dd MMM yyyy'
gui_last_modified_display_format = 'dd MMM yyyy'
#: Control sorting of titles and series in the library display
# Control title and series sorting in the library view. If set to
# 'library_order', the title sort field will be used instead of the title.
# Unless you have manually edited the title sort field, leading articles such as
# The and A will be ignored. If set to 'strictly_alphabetic', the titles will be
# sorted as-is (sort by title instead of title sort). For example, with
# library_order, The Client will sort under 'C'. With strictly_alphabetic, the
# book will sort under 'T'.
# This flag affects calibre's library display. It has no effect on devices. In
# addition, titles for books added before changing the flag will retain their
# order until the title is edited. Editing a title and hitting Enter
# without changing anything is sufficient to change the sort. Or you can use
# the 'Update title sort' action in the Bulk metadata edit dialog to update
# it for many books at once.
title_series_sorting = 'library_order'
#: Control formatting of title and series when used in templates
# Control how title and series names are formatted when saving to disk/sending
# to device. The behavior depends on the field being processed. If processing
# title, then if this tweak is set to 'library_order', the title will be
# replaced with title_sort. If it is set to 'strictly_alphabetic', then the
# title will not be changed. If processing series, then if set to
# 'library_order', articles such as 'The' and 'An' will be moved to the end. If
# set to 'strictly_alphabetic', the series will be sent without change.
# For example, if the tweak is set to library_order, "The Lord of the Rings"
# will become "Lord of the Rings, The". If the tweak is set to
# strictly_alphabetic, it would remain "The Lord of the Rings". Note that the
# formatter function raw_field will return the base value for title and
# series regardless of the setting of this tweak.
save_template_title_series_sorting = 'library_order'
#: Set the list of words considered to be "articles" for sort strings
# Set the list of words that are to be considered 'articles' when computing the
# title sort strings. The articles differ by language. By default, calibre uses
# a combination of articles from English and whatever language the calibre user
# interface is set to. In addition, in some contexts where the book language is
# available, the language of the book is used. You can change the list of
# articles for a given language or add a new language by editing
# per_language_title_sort_articles. To tell calibre to use a language other
# than the user interface language, set, default_language_for_title_sort. For
# example, to use German, set it to 'deu'. A value of None means the user
# interface language is used. The setting title_sort_articles is ignored
# (present only for legacy reasons).
per_language_title_sort_articles = {
# English
'eng' : (r'A\s+', r'The\s+', r'An\s+'),
# Esperanto
'epo': (r'La\s+', r"L'", 'L´'),
# Spanish
'spa' : (r'El\s+', r'La\s+', r'Lo\s+', r'Los\s+', r'Las\s+', r'Un\s+',
r'Una\s+', r'Unos\s+', r'Unas\s+'),
# French
'fra' : (r'Le\s+', r'La\s+', r"L'", u'L´', u'L’', r'Les\s+', r'Un\s+', r'Une\s+',
r'Des\s+', r'De\s+La\s+', r'De\s+', r"D'", r'D´', r'D’'),
# Polish
'pol': (),
# Italian
'ita': ('Lo\\s+', 'Il\\s+', "L'", 'L´', 'La\\s+', 'Gli\\s+',
'I\\s+', 'Le\\s+', 'Uno\\s+', 'Un\\s+', 'Una\\s+', "Un'",
'Un´', 'Dei\\s+', 'Degli\\s+', 'Delle\\s+', 'Del\\s+',
'Della\\s+', 'Dello\\s+', "Dell'", 'Dell´'),
# Portuguese
'por' : (r'A\s+', r'O\s+', r'Os\s+', r'As\s+', r'Um\s+', r'Uns\s+',
r'Uma\s+', r'Umas\s+', ),
# Romanian
'ron' : (r'Un\s+', r'O\s+', r'Nişte\s+', ),
# German
'deu' : (r'Der\s+', r'Die\s+', r'Das\s+', r'Den\s+', r'Ein\s+',
r'Eine\s+', r'Einen\s+', r'Dem\s+', r'Des\s+', r'Einem\s+',
r'Eines\s+'),
# Dutch
'nld' : (r'De\s+', r'Het\s+', r'Een\s+', r"'n\s+", r"'s\s+", r'Ene\s+',
r'Ener\s+', r'Enes\s+', r'Den\s+', r'Der\s+', r'Des\s+',
r"'t\s+"),
# Swedish
'swe' : (r'En\s+', r'Ett\s+', r'Det\s+', r'Den\s+', r'De\s+', ),
# Turkish
'tur' : (r'Bir\s+', ),
# Afrikaans
'afr' : (r"'n\s+", r'Die\s+', ),
# Greek
'ell' : (r'O\s+', r'I\s+', r'To\s+', r'Ta\s+', r'Tus\s+', r'Tis\s+',
r"'Enas\s+", r"'Mia\s+", r"'Ena\s+", r"'Enan\s+", ),
# Hungarian
'hun' : (r'A\s+', r'Az\s+', r'Egy\s+',),
}
default_language_for_title_sort = None
title_sort_articles=r'^(A|The|An)\s+'
#: Specify a folder calibre should connect to at startup
# Specify a folder that calibre should connect to at startup using
# connect_to_folder. This must be a full path to the folder. If the folder does
# not exist when calibre starts, it is ignored.
# Example for Windows:
# auto_connect_to_folder = 'C:/Users/someone/Desktop/testlib'
# Example for other operating systems:
# auto_connect_to_folder = '/home/dropbox/My Dropbox/someone/library'
auto_connect_to_folder = ''
#: Specify renaming rules for SONY collections
# Specify renaming rules for SONY collections. This tweak is only applicable if
# metadata management is set to automatic. Collections on SONYs are named
# depending upon whether the field is standard or custom. A collection derived
# from a standard field is named for the value in that field.
#
# For example, if the standard 'series' column contains the value 'Darkover', then the
# collection name is 'Darkover'. A collection derived from a custom field will
# have the name of the field added to the value. For example, if a custom series
# column named 'My Series' contains the name 'Darkover', then the collection
# will by default be named 'Darkover (My Series)'. For purposes of this
# documentation, 'Darkover' is called the value and 'My Series' is called the
# category. If two books have fields that generate the same collection name,
# then both books will be in that collection.
#
# This set of tweaks lets you specify for a standard or custom field how
# the collections are to be named. You can use it to add a description to a
# standard field, for example 'Foo (Tag)' instead of the 'Foo'. You can also use
# it to force multiple fields to end up in the same collection.
#
# For example, you could force the values in 'series', '#my_series_1', and
# '#my_series_2' to appear in collections named 'some_value (Series)', thereby
# merging all of the fields into one set of collections.
#
# There are two related tweaks. The first determines the category name to use
# for a metadata field. The second is a template, used to determines how the
# value and category are combined to create the collection name.
# The syntax of the first tweak, sony_collection_renaming_rules, is:
# {'field_lookup_name':'category_name_to_use', 'lookup_name':'name', ...}
#
# The second tweak, sony_collection_name_template, is a template. It uses the
# same template language as plugboards and save templates. This tweak controls
# how the value and category are combined together to make the collection name.
# The only two fields available are {category} and {value}. The {value} field is
# never empty. The {category} field can be empty. The default is to put the
# value first, then the category enclosed in parentheses, it isn't empty:
# '{value} {category:|(|)}'
#
# Examples: The first three examples assume that the second tweak
# has not been changed.
#
# 1) I want three series columns to be merged into one set of collections. The
# column lookup names are 'series', '#series_1' and '#series_2'. I want nothing
# in the parenthesis. The value to use in the tweak value would be:
# sony_collection_renaming_rules={'series':'', '#series_1':'', '#series_2':''}
#
# 2) I want the word '(Series)' to appear on collections made from series, and
# the word '(Tag)' to appear on collections made from tags. Use:
# sony_collection_renaming_rules={'series':'Series', 'tags':'Tag'}
#
# 3) I want 'series' and '#myseries' to be merged, and for the collection name
# to have '(Series)' appended. The renaming rule is:
# sony_collection_renaming_rules={'series':'Series', '#myseries':'Series'}
#
# 4) Same as example 2, but instead of having the category name in parentheses
# and appended to the value, I want it prepended and separated by a colon, such
# as in Series: Darkover. I must change the template used to format the category name
#
# The resulting two tweaks are:
# sony_collection_renaming_rules={'series':'Series', 'tags':'Tag'}
# sony_collection_name_template='{category:||: }{value}'
sony_collection_renaming_rules={}
sony_collection_name_template='{value}{category:| (|)}'
#: Specify how SONY collections are sorted
# Specify how SONY collections are sorted. This tweak is only applicable if
# metadata management is set to automatic. You can indicate which metadata is to
# be used to sort on a collection-by-collection basis. The format of the tweak
# is a list of metadata fields from which collections are made, followed by the
# name of the metadata field containing the sort value.
# Example: The following indicates that collections built from pubdate and tags
# are to be sorted by the value in the custom column '#mydate', that collections
# built from 'series' are to be sorted by 'series_index', and that all other
# collections are to be sorted by title. If a collection metadata field is not
# named, then if it is a series- based collection it is sorted by series order,
# otherwise it is sorted by title order.
# [(['pubdate', 'tags'],'#mydate'), (['series'],'series_index'), (['*'], 'title')]
# Note that the bracketing and parentheses are required. The syntax is
# [ ( [list of fields], sort field ) , ( [ list of fields ] , sort field ) ]
# Default: empty (no rules), so no collection attributes are named.
sony_collection_sorting_rules = []
#: Control how tags are applied when copying books to another library
# Set this to True to ensure that tags in 'Tags to add when adding
# a book' are added when copying books to another library
add_new_book_tags_when_importing_books = False
#: Set the maximum number of sort 'levels'
# Set the maximum number of sort 'levels' that calibre will use to resort the
# library after certain operations such as searches or device insertion. Each
# sort level adds a performance penalty. If the database is large (thousands of
# books) the penalty might be noticeable. If you are not concerned about multi-
# level sorts, and if you are seeing a slowdown, reduce the value of this tweak.
maximum_resort_levels = 5
#: Choose whether dates are sorted using visible fields
# Date values contain both a date and a time. When sorted, all the fields are
# used, regardless of what is displayed. Set this tweak to True to use only
# the fields that are being displayed.
sort_dates_using_visible_fields = False
#: Fuzz value for trimming covers
# The value used for the fuzz distance when trimming a cover.
# Colors within this distance are considered equal.
# The distance is in absolute intensity units.
cover_trim_fuzz_value = 10
#: Control behavior of the book list
# You can control the behavior of double clicks and pressing Enter on the books
# list. Choices: open_viewer, do_nothing, show_book_details,
# show_locked_book_details, edit_cell, edit_metadata. Selecting anything other
# than open_viewer, show_book_details, or show_locked_book_details has the side
# effect of disabling editing a field using a single click.
# Default: open_viewer.
# Example: doubleclick_on_library_view = 'do_nothing'
# You can also control whether the book list scrolls per item or
# per pixel. Default is per item.
doubleclick_on_library_view = 'open_viewer'
enter_key_behavior = 'do_nothing'
horizontal_scrolling_per_column = False
vertical_scrolling_per_row = False
#: Language to use when sorting
# Setting this tweak will force sorting to use the
# collating order for the specified language. This might be useful if you run
# calibre in English but want sorting to work in the language where you live.
# Set the tweak to the desired ISO 639-1 language code, in lower case.
# You can find the list of supported locales at
# https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes
# Default: locale_for_sorting = '' -- use the language calibre displays in
# Example: locale_for_sorting = 'fr' -- sort using French rules.
# Example: locale_for_sorting = 'nb' -- sort using Norwegian rules.
locale_for_sorting = ''
#: The number of seconds to wait before sending emails
# The number of seconds to wait before sending emails when using a
# public email server like GMX/Hotmail/Gmail. Default is: 5 minutes
# Setting it to lower may cause the server's SPAM controls to kick in,
# making email sending fail. Changes will take effect only after a restart of
# calibre. You can also change the list of hosts that calibre considers
# to be public relays here. Any relay host ending with one of the suffixes
# in the list below will be considered a public email server.
public_smtp_relay_delay = 301
public_smtp_relay_host_suffixes = ['gmail.com', 'live.com', 'gmx.com', 'outlook.com']
#: The maximum width and height for covers saved in the calibre library
# All covers in the calibre library will be resized, preserving aspect ratio,
# to fit within this size. This is to prevent slowdowns caused by extremely
# large covers
maximum_cover_size = (1650, 2200)
#: Where to send downloaded news
# When automatically sending downloaded news to a connected device, calibre
# will by default send it to the main memory. By changing this tweak, you can
# control where it is sent. Valid values are "main", "carda", "cardb". Note
# that if there isn't enough free space available on the location you choose,
# the files will be sent to the location with the most free space.
send_news_to_device_location = "main"
#: Unified toolbar on macOS
# If you enable this option and restart calibre, the toolbar will be 'unified'
# with the titlebar as is normal for macOS applications. However, doing this has
# various bugs, for instance the minimum width of the toolbar becomes twice
# what it should be and it causes other random bugs on some systems, so turn it
# on at your own risk!
unified_title_toolbar_on_osx = False
#: Save original file when converting/polishing from same format to same format
# When calibre does a conversion from the same format to the same format, for
# example, from EPUB to EPUB, the original file is saved, so that in case the
# conversion is poor, you can tweak the settings and run it again. By setting
# this to False you can prevent calibre from saving the original file.
# Similarly, by setting save_original_format_when_polishing to False you can
# prevent calibre from saving the original file when polishing.
save_original_format = True
save_original_format_when_polishing = True
#: Number of recently viewed books to show
# Right-clicking the "View" button shows a list of recently viewed books. Control
# how many should be shown, here.
gui_view_history_size = 15
#: Change the font size of the Book details panel in the interface
# Change the font size at which book details are rendered in the side panel and
# comments are rendered in the metadata edit dialog. Set it to a positive or
# negative number to increase or decrease the font size.
change_book_details_font_size_by = 0
#: What format to default to when using the "Unpack book" feature
# The "Unpack book" feature of calibre allows direct editing of a book format.
# If multiple formats are available, calibre will offer you a choice
# of formats, defaulting to your preferred output format if it is available.
# Set this tweak to a specific value of 'EPUB' or 'AZW3' to always default
# to that format rather than your output format preference.
# Set to a value of 'remember' to use whichever format you chose last time you
# used the "Unpack book" feature.
# Examples:
# default_tweak_format = None (Use output format)
# default_tweak_format = 'EPUB'
# default_tweak_format = 'remember'
default_tweak_format = None
#: Do not preselect a completion when editing authors/tags/series/etc.
# This means that you can make changes and press Enter and your changes will
# not be overwritten by a matching completion. However, if you wish to use the
# completions you will now have to press Tab to select one before pressing
# Enter. Which technique you prefer will depend on the state of metadata in
# your library and your personal editing style.
#
# If preselect_first_completion is False and you want Tab to accept what you
# typed instead of the first completion then set tab_accepts_uncompleted_text
# to True. If you do this then to select from the completions you must press
# the Down or Up arrow keys. The tweak tab_accepts_uncompleted_text is ignored
# if preselect_first_completion is True
preselect_first_completion = False
tab_accepts_uncompleted_text = False
#: Completion mode when editing authors/tags/series/etc.
# By default, when completing items, calibre will show you all the candidates
# that start with the text you have already typed. You can instead have it show
# all candidates that contain the text you have already typed. To do this, set
# completion_mode to 'contains'. For example, if you type asi it will match both
# Asimov and Quasimodo, whereas the default behavior would match only Asimov.
completion_mode = 'prefix'
#: Sort the list of libraries alphabetically
# The list of libraries in the Copy to library and Quick switch menus are
# normally sorted by most used. However, if there are more than a certain
# number of such libraries, the sorting becomes alphabetic. You can set that
# number here. The default is ten libraries.
many_libraries = 10
#: Choose available output formats for conversion
# Restrict the list of available output formats in the conversion dialogs.
# For example, if you only want to convert to EPUB and AZW3, change this to
# restrict_output_formats = ['EPUB', 'AZW3']. The default value of None causes
# all available output formats to be present.
restrict_output_formats = None
#: Set the thumbnail image quality used by the Content server
# The quality of a thumbnail is largely controlled by the compression quality
# used when creating it. Set this to a larger number to improve the quality.
# Note that the thumbnails get much larger with larger compression quality
# numbers.
# The value can be between 50 and 99
content_server_thumbnail_compression_quality = 75
#: Image file types to treat as e-books when dropping onto the "Book details" panel
# Normally, if you drop any image file in a format known to calibre onto the
# "Book details" panel, it will be used to set the cover. If you want to store
# some image types as e-books instead, you can set this tweak.
# Examples:
# cover_drop_exclude = {'tiff', 'webp'}
cover_drop_exclude = ()
#: Exclude fields when copy/pasting metadata
# You can ask calibre to not paste some metadata fields when using the
# Edit metadata->Copy metadata/Paste metadata actions. For example,
# exclude_fields_on_paste = ['cover', 'timestamp', '#mycolumn']
# to prevent pasting of the cover, Date and custom column, mycolumn.
# You can also add a shortcut in Preferences->Shortcuts->Edit metadata
# to paste metadata ignoring this tweak.
exclude_fields_on_paste = []
#: Skip internet connected check
# Skip checking whether the internet is available before downloading news.
# Useful if for some reason your operating systems network checking
# facilities are not reliable (for example NetworkManager on Linux).
skip_network_check = False
#: Tab stop width in the template editor
# Sets the width of the tab stop in the template editor in "average characters".
# For example, a value of 1 results in a space with the width of one average character.
template_editor_tab_stop_width = 4
#: Value for undefined numbers when sorting
# Sets the value to use for undefined numbers when sorting.
# For example, the value -10 sorts undefined numbers as if they were set to -10.
# Use 'maximum' for the largest possible number. Use 'minimum' for the smallest
# possible number. Quotes are optional if entering a number.
# Examples:
# value_for_undefined_numbers_when_sorting = -100
# value_for_undefined_numbers_when_sorting = '2'
# value_for_undefined_numbers_when_sorting = -0.01
# value_for_undefined_numbers_when_sorting = 'minimum'
# value_for_undefined_numbers_when_sorting = 'maximum'
value_for_undefined_numbers_when_sorting = 0
#: Allow template database functions in composite columns
# If True then the template database functions book_values() and book_count()
# can be used in composite custom columns. Note: setting this tweak to True and
# using these functions in composites can be very slow.
# Default: False
allow_template_database_functions_in_composites = False
#: Change the programs that are run when opening files/URLs
# By default, calibre passes URLs to the operating system to open using
# whatever default programs are configured there. Here you can override
# that by specifying the program to use, per URL type. For local files,
# the type is "file" and for web links it is "http*". For example:
# openers_by_scheme = { "http*": "firefox %u" } will make calibre run Firefox
# for https://whatever URLs. %u is replaced by the URL to be opened. The scheme
# takes a glob pattern allowing a single entry to match multiple URL types.
openers_by_scheme = {}
#: Set the first day of the week for calendar popups
# It must be one of the values Default, Sunday, Monday, Tuesday, Wednesday,
# Thursday, Friday, or Saturday, all in English, spelled exactly as shown.
calendar_start_day_of_week = 'Default'
| 31,485 | Python | .py | 538 | 56.505576 | 136 | 0.737375 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,859 | generate.py | kovidgoyal_calibre/imgsrc/generate.py | #!/usr/bin/env python
# vim:fileencoding=utf-8
# License: GPLv3 Copyright: 2016, Kovid Goyal <kovid at kovidgoyal.net>
import argparse
import glob
import hashlib
import json
import os
import subprocess
duplicates = {
'character-set': ['languages'],
'calibre': ['library', 'lt'],
'format-text-color': ['lookfeel'],
'books_in_series': ['series'],
'plugins.svg': ['plugins/plugin_upgrade_ok'],
}
sizes = {
'lt': '256',
'library': '1024',
'default_cover': 'original',
'viewer': '256',
'tweak': '256',
}
skip = {'calibre'}
j = os.path.join
base = os.path.dirname(os.path.abspath(__file__))
output_base = j(os.path.dirname(base), 'resources', 'images')
hash_path = j(os.path.dirname(base), '.build-cache', 'imgsrc-gen.json')
if os.path.exists(hash_path):
with open(hash_path, 'rb') as f:
hashes = json.load(f)
else:
hashes = {}
src_hashes = {}
def iterfiles(only=()):
for src in glob.glob(j(base, '*.svg')) + glob.glob(j(base, 'plugins/*.svg')):
name = os.path.relpath(src, base).rpartition('.')[0]
if only and name not in only:
continue
src_hashes[name] = h = hashlib.sha1(open(src, 'rb').read()).hexdigest()
if not only and h == hashes.get(name):
continue
output_names = [n for n in [name] + duplicates.get(name, []) if n not in skip]
output_files = [j(output_base, n) + '.png' for n in output_names]
if output_files:
yield src, output_files
def rsvg(src, size, dest):
cmd = ['rsvg-convert', '-d', '96', '-p', '96']
if size != 'original':
cmd += ['--width', size, '--height', size]
subprocess.check_call(cmd + ['-o', dest, src])
subprocess.check_call(['optipng', '-o7', '-quiet', '-strip', 'all', dest])
def render(src, output_files):
for dest in output_files:
oname = os.path.basename(dest).rpartition('.')[0]
size = sizes.get(oname, '128')
print('Rendering', oname, 'at size:', size)
rsvg(src, size, dest)
name = os.path.relpath(src, base).rpartition('.')[0]
hashes[name] = src_hashes[name]
def main():
p = argparse.ArgumentParser()
p.add_argument('only', nargs='*', default=[], help='Only render the specified icons')
args = p.parse_args()
for src, ofiles in iterfiles(args.only):
render(src, ofiles)
with open(hash_path, 'w') as f:
json.dump(hashes, f, indent=2, sort_keys=True)
if __name__ == '__main__':
main()
| 2,496 | Python | .py | 70 | 30.614286 | 89 | 0.607217 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,860 | copy-to-library.svg | kovidgoyal_calibre/imgsrc/copy-to-library.svg | <svg xmlns="http://www.w3.org/2000/svg" xmlns:svg="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" id="svg2" width="128" height="128" version="1.1" viewBox="0 0 128 128"><defs id="defs4"><marker id="marker5884" orient="auto" refX="0" refY="0" style="overflow:visible"><path id="path5886" d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z" transform="matrix(-0.8,0,0,-0.8,-10,0)" style="fill:#8eea00;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1pt;stroke-opacity:1"/></marker></defs><metadata id="metadata7"/><image id="image7" width="100%" height="100%" xlink:href="calibre.svg"/><g id="layer1" transform="translate(0,-924.36216)"><path id="path4" d="m 127.35998,951.6386 q 0,1.2023 -0.87854,2.0808 l -23.67428,23.6743 q -0.87854,0.8785 -2.08075,0.8785 -1.202207,0 -2.080745,-0.8785 -0.878538,-0.8786 -0.878538,-2.0808 l 0,-11.8371 -10.357497,0 q -4.531401,0 -8.114911,0.2774 -3.58351,0.2774 -7.120781,0.9941 -3.537271,0.7168 -6.149764,1.9652 -2.612494,1.2485 -4.878195,3.2136 -2.265706,1.9651 -3.699107,4.6701 -1.433402,2.705 -2.242581,6.4041 -0.80918,3.6991 -0.80918,8.3692 0,2.5431 0.231191,5.6874 0,0.2774 0.115597,1.0866 0.115598,0.8092 0.115598,1.2253 0,0.6936 -0.393032,1.156 -0.393027,0.4624 -1.086609,0.4624 -0.73982,0 -1.29469,-0.7861 -0.323668,-0.4161 -0.601103,-1.0172 -0.277434,-0.6011 -0.624222,-1.3872 -0.346794,-0.786 -0.485511,-1.1097 Q 44.5,981.5089 44.5,973.8333 q 0,-9.2015 2.450657,-15.3976 7.490688,-18.6342 40.458973,-18.6342 l 10.357497,0 0,-11.8372 q 0,-1.2021 0.878538,-2.0807 0.878538,-0.8785 2.080745,-0.8785 1.20221,0 2.08075,0.8785 l 23.67428,23.6743 q 0.87854,0.8786 0.87854,2.0807 z" style="fill:#58ab21;fill-opacity:.75720167"/></g></svg> | 1,690 | Python | .py | 1 | 1,690 | 1,690 | 0.689349 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,861 | render-logo.py | kovidgoyal_calibre/imgsrc/render-logo.py | #!/usr/bin/env python
# vim:fileencoding=utf-8
# License: GPLv3 Copyright: 2016, Kovid Goyal <kovid at kovidgoyal.net>
import os
import shutil
import subprocess
import sys
j = os.path.join
base = os.path.dirname(os.path.abspath(__file__))
resources = j(os.path.dirname(base), 'resources')
icons = j(os.path.dirname(base), 'icons')
srv = j(os.path.dirname(os.path.dirname(base)), 'srv')
def render(outpath, sz, background=None):
sz = str(sz)
cmd = ['rsvg-convert', j(base, 'calibre.svg'), '-w', sz, '-h', sz, '-d', '96', '-p', '96', '-o', outpath]
if background:
cmd.insert(2, background), cmd.insert(2, '-b')
subprocess.check_call(cmd)
subprocess.check_call(['optipng', '-o7', '-strip', 'all', outpath])
render(j(resources, 'images', 'library.png'), 1024)
render(j(resources, 'images', 'lt.png'), 256)
render(j(resources, 'images', 'apple-touch-icon.png'), 256, 'white')
render(j(resources, 'content-server', 'calibre.png'), 128)
render(j(srv, 'main', 'calibre-paypal-logo.png'), 60)
shutil.copy2(j(resources, 'content-server', 'calibre.png'), j(resources, 'content_server', 'calibre.png'))
shutil.copy2(j(resources, 'images', 'lt.png'), j(srv, 'common', 'favicon.png'))
shutil.copy2(j(resources, 'images', 'lt.png'), j(srv, 'common', 'favicon.png'))
subprocess.check_call([sys.executable, j(icons, 'make_ico_files.py'), 'only-logo'])
shutil.copy2(j(icons, 'library.ico'), j(srv, 'common', 'favicon.ico'))
shutil.copy2(j(icons, 'library.ico'), j(srv, 'main/static/resources/img', 'favicon.ico'))
shutil.copy2(j(icons, 'library.ico'), j(srv, 'open-books/drmfree/static/img', 'favicon.ico'))
subprocess.check_call([sys.executable, j(icons, 'icns', 'make_iconsets.py'), 'only-logo'])
os.chdir(srv)
subprocess.check_call(['git', 'commit', '-am', 'Update calibre favicons'])
for s in 'main code open-books dl1'.split():
subprocess.check_call(['./publish', s, 'update'])
| 1,906 | Python | .py | 36 | 50.805556 | 109 | 0.684041 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,862 | edit-copy.svg | kovidgoyal_calibre/imgsrc/edit-copy.svg | <svg xmlns="http://www.w3.org/2000/svg" xmlns:svg="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" id="svg2" width="128" height="128" version="1.1" viewBox="0 0 128 128"><defs id="defs4"><linearGradient id="linearGradient4139"><stop style="stop-color:#0c7e1b;stop-opacity:1" id="stop4141" offset="0"/><stop style="stop-color:#30b241;stop-opacity:1" id="stop4143" offset="1"/></linearGradient><radialGradient id="radialGradient4145" cx="64" cy="988.362" r="63" fx="64" fy="988.362" gradientTransform="translate(0,-3.1376577e-4)" gradientUnits="userSpaceOnUse" xlink:href="#linearGradient4139"/></defs><metadata id="metadata7"/><g id="layer1" transform="translate(0,-924.36216)"><path id="path4" d="m 118,1042.3622 0,-81.00004 -27,0 0,29.25 q 0,2.8125 -1.96875,4.78125 -1.96875,1.96875 -4.78125,1.96875 l -29.25,0 0,45.00004 63,0 z M 50.78125,985.83091 73,963.61217 l 0,-29.25001 -27,0 0,29.25001 q 0,2.81249 -1.96875,4.78124 -1.96875,1.96875 -4.78125,1.96875 l -29.25,0 0,45.00004 36,0 0,-18.00004 q 0,-2.8125 1.40625,-6.1875 1.40625,-3.375 3.375,-5.34375 z M 37,940.33872 l -21.02344,21.02344 21.02344,0 0,-21.02344 z m 45,27 -21.02343,21.02344 21.02343,0 0,-21.02344 z m 38.25,-14.97656 q 2.8125,0 4.78125,1.96875 Q 127,956.29966 127,959.11216 l 0,85.50004 q 0,2.8125 -1.96875,4.7812 -1.96875,1.9688 -4.78125,1.9688 l -67.5,0 q -2.8125,0 -4.78125,-1.9688 Q 46,1047.4247 46,1044.6122 l 0,-20.25 -38.25,0 q -2.8125,0 -4.78125,-1.9688 Q 1,1020.4247 1,1017.6122 l 0,-47.25004 q 0,-2.81249 1.40625,-6.1875 1.40625,-3.375 3.375,-5.34375 l 28.6875,-28.6875 q 1.96875,-1.96875 5.34375,-3.375 3.375,-1.40625 6.1875,-1.40625 l 29.25,0 q 2.8125,0 4.78125,1.96875 Q 82,929.29966 82,932.11217 l 0,23.06249 q 4.78125,-2.8125 9,-2.8125 l 29.25,0 z" style="fill:url(#radialGradient4145);fill-opacity:1"/><path style="fill:#2149c1;fill-opacity:1" id="path4195" d="m 37,940.33872 -21.02344,21.02344 21.02344,0 z m 36,23.27345 0,-29.25001 -27,0 c -0.02042,9.97976 0.04091,19.96133 -0.03076,29.93995 -0.250252,4.07148 -4.435187,6.63772 -8.254207,6.06005 l -27.715031,0 0,45.00004 36,0 0,9 c -12.979754,-0.02 -25.96133,0.041 -38.9399414,-0.031 -4.0714417,-0.2501 -6.63773528,-4.4349 -6.0600586,-8.254 0.014586,-15.4755 -0.0292134,-30.95197 0.021973,-46.42695 0.3402367,-5.4141 3.5223417,-10.04233 7.4727445,-13.53281 9.0340545,-8.95433 17.9139465,-18.07489 27.0462985,-26.92325 4.100446,-3.32858 9.440696,-4.12321 14.552234,-3.83203 8.834578,0.0786 17.687411,-0.15949 26.510266,0.12305 3.91375,0.64488 5.909632,4.76166 5.396484,8.41776 l 0,21.27169 c 0,0 -16.708261,15.5236 -9.000002,8.43751 z"/></g></svg> | 2,609 | Python | .py | 1 | 2,609 | 2,609 | 0.701418 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,863 | search_copy_saved.svg | kovidgoyal_calibre/imgsrc/search_copy_saved.svg | <svg xmlns="http://www.w3.org/2000/svg" xmlns:svg="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" id="svg2" width="128" height="128" version="1.1" viewBox="0 0 128 128"><defs id="defs4"><linearGradient id="linearGradient4136"><stop style="stop-color:#0b59bd;stop-opacity:1" id="stop4138" offset="0"/><stop style="stop-color:#b2cffa;stop-opacity:1" id="stop4140" offset="1"/></linearGradient><linearGradient id="linearGradient4142" x1="126.5" x2=".929" y1="1051.166" y2="925.594" gradientUnits="userSpaceOnUse" xlink:href="#linearGradient4136"/><radialGradient id="radialGradient4145" cx="64" cy="988.362" r="63" fx="64" fy="988.362" gradientTransform="matrix(0.45956341,0,0,0.45956341,13.055023,512.49001)" gradientUnits="userSpaceOnUse" xlink:href="#linearGradient4139"/><linearGradient id="linearGradient4139"><stop style="stop-color:#0c7e1b;stop-opacity:1" id="stop4141" offset="0"/><stop style="stop-color:#30b241;stop-opacity:1" id="stop4143" offset="1"/></linearGradient></defs><metadata id="metadata7"/><g id="layer1" transform="translate(0,-924.36216)"><path id="path6" d="M 122.3068,1022.695 84.283489,991.81358 c -1.3702,-1.02937 -2.05531,-3.08813 -1.02765,-4.80378 7.53615,-16.12698 4.79573,-35.68523 -8.56381,-49.06719 -17.81272,-17.8426 -47.95733,-16.81324 -64.399844,3.77442 -12.3318854,15.44072 -12.3318854,37.74396 -0.3425504,53.52782 13.0169874,16.47005 34.9403344,20.58765 52.4105044,12.35255 1.71277,-0.6862 3.42553,-0.3431 4.79574,1.0294 l 30.82971,38.0871 c 4.795731,5.8331 13.359541,6.1763 18.497831,1.0294 l 7.1936,-7.2057 c 4.79573,-4.4606 4.45317,-13.0388 -1.37022,-17.8426 z M 67.155879,992.15673 c -12.67444,12.69567 -33.570133,12.69567 -46.587123,0 -12.6744374,-12.69574 -12.6744374,-33.62647 0,-46.66532 12.674438,-13.03881 33.570133,-12.69569 46.587123,0 13.01699,13.03885 13.01699,33.96958 0,46.66532 z" style="fill:url(#linearGradient4142);fill-opacity:1"/><g id="g4154" transform="translate(-2.1428571,-0.80357143)"><path style="fill:url(#radialGradient4145);fill-opacity:1" id="path4" d="m 67.283504,991.52167 0,-37.22465 -12.40821,0 0,13.44223 q 0,1.29253 -0.904766,2.19729 -0.904765,0.90477 -2.197287,0.90477 l -13.44223,0 0,20.68036 28.952493,0 z m -30.891276,-25.9797 10.210924,-10.21092 0,-13.44224 -12.408212,0 0,13.44224 q 0,1.29251 -0.904765,2.19728 -0.904766,0.90476 -2.197288,0.90476 l -13.44223,0 0,20.68037 16.544283,0 0,-8.27215 q 0,-1.29253 0.646261,-2.84355 0.646261,-1.55103 1.551027,-2.45579 z m -6.333359,-20.90655 -9.661603,9.6616 9.661603,0 0,-9.6616 z m 20.680354,12.40821 -9.661599,9.66161 9.661599,0 0,-9.66161 z m 17.578299,-6.88268 q 1.292522,0 2.197288,0.90477 0.904765,0.90476 0.904765,2.19729 l 0,39.29268 q 0,1.29252 -0.904765,2.19726 -0.904766,0.90479 -2.197288,0.90479 l -31.020529,0 q -1.292522,0 -2.197287,-0.90479 -0.904766,-0.90474 -0.904766,-2.19726 l 0,-9.30616 -17.5783,0 q -1.292522,0 -2.197288,-0.90479 -0.904765,-0.90474 -0.904765,-2.19727 l 0,-21.71438 q 0,-1.29251 0.646261,-2.84354 0.646261,-1.55103 1.551026,-2.4558 L 28.8956,939.95003 q 0.904765,-0.90477 2.455792,-1.55103 1.551026,-0.64626 2.843548,-0.64626 l 13.44223,0 q 1.292522,0 2.197288,0.90477 0.904765,0.90476 0.904765,2.19729 l 0,10.59868 q 2.197288,-1.29253 4.136071,-1.29253 l 13.442228,0 z"/><path id="path4195" d="m 30.058869,944.63542 -9.661603,9.6616 9.661603,0 z m 16.544283,10.69563 0,-13.44224 -12.408212,0 c -0.0094,4.58633 0.0188,9.1735 -0.01414,13.75931 -0.115007,1.8711 -2.03825,3.05045 -3.793332,2.78497 l -12.736814,0 0,20.68037 16.544283,0 0,4.13607 c -5.96502,-0.009 -11.930877,0.0188 -17.895372,-0.0142 -1.871086,-0.11494 -3.050461,-2.03812 -2.784981,-3.79323 0.0067,-7.11197 -0.01342,-14.22439 0.0101,-21.33612 0.15636,-2.48813 1.618739,-4.61509 3.4342,-6.21919 4.151721,-4.11508 8.232595,-8.30656 12.429489,-12.37294 1.884415,-1.52969 4.338599,-1.89487 6.687675,-1.76106 4.060048,0.0361 8.128487,-0.0733 12.183148,0.0566 1.798616,0.29636 2.715851,2.18829 2.480027,3.86849 l 0,9.7757 c 0,0 -7.678506,7.13407 -4.136072,3.87757 z" style="fill:#2149c1;fill-opacity:1"/></g></g></svg> | 4,063 | Python | .py | 1 | 4,063 | 4,063 | 0.717696 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,864 | copy.svg | kovidgoyal_calibre/imgsrc/srv/copy.svg | <svg width="1792" height="1792" viewBox="0 0 1792 1792" xmlns="http://www.w3.org/2000/svg"><path d="M1696 384q40 0 68 28t28 68v1216q0 40-28 68t-68 28h-960q-40 0-68-28t-28-68v-288h-544q-40 0-68-28t-28-68v-672q0-40 20-88t48-76l408-408q28-28 76-48t88-20h416q40 0 68 28t28 68v328q68-40 128-40h416zm-544 213l-299 299h299v-299zm-640-384l-299 299h299v-299zm196 647l316-316v-416h-384v416q0 40-28 68t-68 28h-416v640h512v-256q0-40 20-88t48-76zm956 804v-1152h-384v416q0 40-28 68t-68 28h-416v640h896z"/></svg> | 497 | Python | .py | 1 | 497 | 497 | 0.770624 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,865 | generate.py | kovidgoyal_calibre/imgsrc/srv/generate.py | #!/usr/bin/env python
# vim:fileencoding=utf-8
# License: GPLv3 Copyright: 2016, Kovid Goyal <kovid at kovidgoyal.net>
import os
import re
import sys
from lxml import etree
SVG_NS = 'http://www.w3.org/2000/svg'
XLINK_NS = 'http://www.w3.org/1999/xlink'
def clone_node(node, parent):
ans = parent.makeelement(node.tag)
for k in node.keys():
ans.set(k, node.get(k))
ans.text, ans.tail = node.text, node.tail
for child in node.iterchildren('*'):
clone_node(child, ans)
parent.append(ans)
return ans
def merge():
base = os.path.dirname(os.path.abspath(__file__))
ans = etree.fromstring(
'<svg xmlns="%s" xmlns:xlink="%s"/>' % (SVG_NS, XLINK_NS),
parser=etree.XMLParser(
recover=True, no_network=True, resolve_entities=False
)
)
for f in os.listdir(base):
if not f.endswith('.svg'):
continue
with open(os.path.join(base, f), 'rb') as ff:
raw = ff.read()
svg = etree.fromstring(
raw,
parser=etree.XMLParser(
recover=True, no_network=True, resolve_entities=False
)
)
symbol = ans.makeelement('{%s}symbol' % SVG_NS)
symbol.set('viewBox', svg.get('viewBox'))
symbol.set('id', 'icon-' + f.rpartition('.')[0])
for child in svg.iterchildren('*'):
clone_node(child, symbol)
ans.append(symbol)
ans = etree.tostring(ans, encoding='unicode', pretty_print=True, with_tail=False)
ans = re.sub('<svg[^>]+>', '<svg style="display:none">', ans, count=1)
return ans
if __name__ == '__main__':
sys.stdout.write(merge().encode('utf-8'))
| 1,691 | Python | .py | 48 | 28.4375 | 85 | 0.600122 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,866 | the_daily_news_egypt.recipe | kovidgoyal_calibre/recipes/the_daily_news_egypt.recipe | __license__ = 'GPL v3'
__copyright__ = '2011, Pat Stapleton <pat.stapleton at gmail.com>'
'''
abc.net.au/news
'''
import re
from calibre.web.feeds.recipes import BasicNewsRecipe
class TheDailyNewsEG(BasicNewsRecipe):
title = u'The Daily News Egypt'
__author__ = 'Omm Mishmishah'
description = 'News from Egypt'
masthead_url = 'http://www.thedailynewsegypt.com/images/DailyNews-03_05.gif'
cover_url = 'http://www.thedailynewsegypt.com/images/DailyNews-03_05.gif'
auto_cleanup = True
oldest_article = 7
max_articles_per_feed = 100
no_stylesheets = False
use_embedded_content = False
encoding = 'utf8'
publisher = 'The Daily News Egypt'
category = 'News, Egypt, World'
language = 'en_EG'
publication_type = 'newsportal'
# preprocess_regexps = [(re.compile(r'<!--.*?-->', re.DOTALL), lambda m: '')]
# Remove annoying map links (inline-caption class is also used for some
# image captions! hence regex to match maps.google)
preprocess_regexps = [(re.compile(
r'<a class="inline-caption" href="http://maps\.google\.com.*?/a>', re.DOTALL), lambda m: '')]
conversion_options = {
'comments': description, 'tags': category, 'language': language, 'publisher': publisher, 'linearize_tables': False
}
keep_only_tags = [dict(attrs={'class': ['article section']})]
remove_tags = [dict(attrs={'class': ['related', 'tags', 'tools', 'attached-content ready',
'inline-content story left', 'inline-content map left contracted', 'published',
'story-map', 'statepromo', 'topics', ]})]
remove_attributes = ['width', 'height']
feeds = [(u'The Daily News Egypt',
u'http://www.thedailynewsegypt.com/rss.php?sectionid=all')]
| 1,813 | Python | .gyp | 38 | 41.236842 | 122 | 0.642898 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,867 | ancient_egypt.recipe | kovidgoyal_calibre/recipes/ancient_egypt.recipe | #!/usr/bin/env python
# vim:fileencoding=utf-8
'''
https://ancientegyptmagazine.com
'''
from calibre import browser
from calibre.web.feeds.news import BasicNewsRecipe
class ancientegypt(BasicNewsRecipe):
title = 'The Past: Ancient Egypt Magazine'
language = 'en'
__author__ = 'unkn0wn'
description = (
'Ancient Egypt is the world\'s leading Egyptology magazine, exploring the history, people and culture of the Nile Valley. '
'Now in a larger format with a fresh new design, AE brings you the latest news and discoveries, and feature articles covering '
'more than 5000 years of Egyptian history. Published bimonthly.'
)
no_stylesheets = True
use_embedded_content = False
remove_attributes = ['style', 'height', 'width']
ignore_duplicate_articles = {'url'}
resolve_internal_links = True
masthead_url = 'https://ancientegyptmagazine.com/media/website/ae-logo-2.png'
simultaneous_downloads = 1
extra_css = '''
[class^="meta"] { font-size:small; }
.post-subtitle { font-style: italic; color:#202020; }
.wp-block-image { font-size:small; text-align:center; }
'''
keep_only_tags = [
dict(attrs={'class':lambda x: x and '__header' in x}),
dict(attrs={'class':lambda x: x and '__background' in x}),
dict(attrs={'class':lambda x: x and '__body_area' in x}),
]
remove_tags = [
dict(attrs={'class':'ad-break'}),
dict(attrs={'class':lambda x: x and 'avatar' in x.split()}),
dict(attrs={'class':lambda x: x and '--share' in x})
]
def preprocess_html(self, soup):
exp = soup.find(attrs={'class':lambda x: x and 'post-subtitle' in x.split()})
if exp:
exp.name = 'p'
return soup
recipe_specific_options = {
'issue': {
'short': 'Enter the Issue Number you want to download ',
'long': 'For example, 136'
}
}
def parse_index(self):
soup = self.index_to_soup('https://the-past.com/category/magazines/ae/')
art = soup.find('article', attrs={'class':lambda x: x and 'tag-magazines' in x.split()})
url = art.h2.a['href']
d = self.recipe_specific_options.get('issue')
if d and isinstance(d, str):
url = 'https://the-past.com/magazines/ae/ancient-egypt-magazine-' + d + '/'
issue = self.index_to_soup(url)
ti = issue.find('h1', attrs={'class':lambda x: x and 'post-title' in x.split()})
if ti:
self.title = self.tag_to_string(ti).strip()
dt = soup.find(attrs={'class':lambda x: x and '__date' in x})
if dt:
self.timefmt = ' [' + self.tag_to_string(dt).strip() + ']'
edit = issue.find('h2', attrs={'id':'from-the-editor'})
if edit and edit.findParent('div'):
self.description = self.tag_to_string(edit.findParent('div'))
cov = issue.find('figure', attrs={'class':lambda x: x and 'wp-block-image' in x.split()})
if cov:
self.cover_url = cov.img['src']
div = issue.find('div', attrs={'class':lambda x: x and 'entry-content' in x.split()})
feeds = []
h2 = div.findAll('h2', attrs={'class':lambda x: x and 'wp-block-heading' in x.split()})
lt = div.findAll(attrs={'class':'display-posts-listing'})
for x, y in zip(h2, lt):
section = self.tag_to_string(x).strip()
self.log(section)
articles = []
for a in y.findAll('a', href=True, attrs={'class':'title'}):
url = a['href']
title = self.tag_to_string(a).strip()
desc = ''
exp = a.findNext(attrs={'class':'excerpt'})
if exp:
desc = self.tag_to_string(exp).strip()
self.log('\t', title, '\n\t', desc, '\n\t\t', url)
articles.append({'title': title, 'description':desc, 'url': url})
if articles:
feeds.append((section, articles))
return feeds
def get_browser(self, *args, **kwargs):
return self
def clone_browser(self, *args, **kwargs):
return self.get_browser()
def open_novisit(self, *args, **kwargs):
br = browser()
return br.open_novisit(*args, **kwargs)
open = open_novisit
| 4,347 | Python | .gyp | 97 | 36.061856 | 135 | 0.582428 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,868 | run-python.h | kovidgoyal_calibre/bypy/run-python.h | /*
* Copyright (C) 2019 Kovid Goyal <kovid at kovidgoyal.net>
*
* Distributed under terms of the GPL3 license.
*/
#pragma once
#define PY_SSIZE_T_CLEAN
#include <stdio.h>
#include <stdbool.h>
#include <time.h>
#include <stdlib.h>
#include <stdarg.h>
#ifdef _WIN32
#include <string.h>
#define PATH_MAX MAX_PATH
#else
#include <strings.h>
#endif
#include <errno.h>
#include <Python.h>
#ifdef __APPLE__
#include <os/log.h>
#endif
#include <bypy-freeze.h>
static void
pre_initialize_interpreter(bool is_gui_app) {
bypy_pre_initialize_interpreter(is_gui_app);
}
#define decode_char_buf(src, dest) { \
size_t tsz; \
wchar_t* t__ = Py_DecodeLocale(src, &tsz); \
if (!t__) fatal("Failed to decode path: %s", src); \
if (tsz > sizeof(dest) - 1) tsz = sizeof(dest) - 1; \
memcpy(dest, t__, tsz * sizeof(wchar_t)); \
dest[tsz] = 0; \
PyMem_RawFree(t__); \
}
#define MAX_SYS_PATHS 3
typedef struct {
int argc;
wchar_t exe_path[PATH_MAX], python_home_path[PATH_MAX], python_lib_path[PATH_MAX];
wchar_t extensions_path[PATH_MAX], resources_path[PATH_MAX], executables_path[PATH_MAX];
#ifdef __APPLE__
wchar_t bundle_resource_path[PATH_MAX], frameworks_path[PATH_MAX];
#elif defined(_WIN32)
wchar_t app_dir[PATH_MAX];
#endif
const wchar_t *basename, *module, *function;
#ifdef _WIN32
wchar_t* const *argv;
#else
char* const *argv;
#endif
} InterpreterData;
static InterpreterData interpreter_data = {0};
static void
run_interpreter() {
bypy_initialize_interpreter(
interpreter_data.exe_path, interpreter_data.python_home_path, L"site", interpreter_data.extensions_path,
interpreter_data.argc, interpreter_data.argv);
set_sys_bool("gui_app", use_os_log);
set_sys_bool("frozen", true);
set_sys_string("calibre_basename", interpreter_data.basename);
set_sys_string("calibre_module", interpreter_data.module);
set_sys_string("calibre_function", interpreter_data.function);
set_sys_string("extensions_location", interpreter_data.extensions_path);
set_sys_string("resources_location", interpreter_data.resources_path);
set_sys_string("executables_location", interpreter_data.executables_path);
#ifdef __APPLE__
set_sys_string("resourcepath", interpreter_data.bundle_resource_path);
set_sys_string("frameworks_dir", interpreter_data.frameworks_path);
set_sys_bool("new_app_bundle", true);
#elif defined(_WIN32)
set_sys_string("app_dir", interpreter_data.app_dir);
set_sys_bool("new_app_layout", true);
#else
set_sys_string("frozen_path", interpreter_data.executables_path);
#endif
int ret = bypy_run_interpreter();
exit(ret);
}
| 2,641 | Python | .pyt | 81 | 29.962963 | 116 | 0.717366 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,869 | ipython.py | kovidgoyal_calibre/src/calibre/utils/ipython.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os
import re
import sys
from calibre.constants import cache_dir, get_version, iswindows
from polyglot.builtins import exec_path
ipydir = os.path.join(cache_dir(), 'ipython')
BANNER = ('Welcome to the interactive calibre shell!\n')
def setup_pyreadline():
config = '''
#Bind keys for exit (keys only work on empty lines
#disable_readline(True) #Disable pyreadline completely.
debug_output("off") #"on" saves log info to./pyreadline_debug_log.txt
#"on_nologfile" only enables print warning messages
bind_exit_key("Control-d")
bind_exit_key("Control-z")
#Commands for moving
bind_key("Home", "beginning_of_line")
bind_key("End", "end_of_line")
bind_key("Left", "backward_char")
bind_key("Control-b", "backward_char")
bind_key("Right", "forward_char")
bind_key("Control-f", "forward_char")
bind_key("Alt-f", "forward_word")
bind_key("Alt-b", "backward_word")
bind_key("Clear", "clear_screen")
bind_key("Control-l", "clear_screen")
bind_key("Control-a", "beginning_of_line")
bind_key("Control-e", "end_of_line")
#bind_key("Control-l", "redraw_current_line")
#Commands for Manipulating the History
bind_key("Return", "accept_line")
bind_key("Control-p", "previous_history")
bind_key("Control-n", "next_history")
bind_key("Up", "history_search_backward")
bind_key("Down", "history_search_forward")
bind_key("Alt-<", "beginning_of_history")
bind_key("Alt->", "end_of_history")
bind_key("Control-r", "reverse_search_history")
bind_key("Control-s", "forward_search_history")
bind_key("Alt-p", "non_incremental_reverse_search_history")
bind_key("Alt-n", "non_incremental_forward_search_history")
bind_key("Control-z", "undo")
bind_key("Control-_", "undo")
#Commands for Changing Text
bind_key("Delete", "delete_char")
bind_key("Control-d", "delete_char")
bind_key("BackSpace", "backward_delete_char")
#bind_key("Control-Shift-v", "quoted_insert")
bind_key("Control-space", "self_insert")
bind_key("Control-BackSpace", "backward_delete_word")
#Killing and Yanking
bind_key("Control-k", "kill_line")
bind_key("Control-shift-k", "kill_whole_line")
bind_key("Escape", "kill_whole_line")
bind_key("Meta-d", "kill_word")
bind_key("Control-w", "unix_word_rubout")
#bind_key("Control-Delete", "forward_kill_word")
#Copy paste
bind_key("Shift-Right", "forward_char_extend_selection")
bind_key("Shift-Left", "backward_char_extend_selection")
bind_key("Shift-Control-Right", "forward_word_extend_selection")
bind_key("Shift-Control-Left", "backward_word_extend_selection")
bind_key("Control-m", "set_mark")
bind_key("Control-Shift-x", "copy_selection_to_clipboard")
#bind_key("Control-c", "copy_selection_to_clipboard") #Needs allow_ctrl_c(True) below to be uncommented
bind_key("Control-q", "copy_region_to_clipboard")
bind_key('Control-Shift-v', "paste_mulitline_code")
bind_key("Control-x", "cut_selection_to_clipboard")
bind_key("Control-v", "paste")
bind_key("Control-y", "yank")
bind_key("Alt-v", "ipython_paste")
#Unbinding keys:
#un_bind_key("Home")
#Other
bell_style("none") #modes: none, audible, visible(not implemented)
show_all_if_ambiguous("on")
mark_directories("on")
completer_delims(" \t\n\"\\'`@$><=;|&{(?")
complete_filesystem("on")
debug_output("off")
#allow_ctrl_c(True) #(Allows use of ctrl-c as copy key, still propagate keyboardinterrupt when not waiting for input)
history_filename(%r)
history_length(2000) #value of -1 means no limit
#set_mode("vi") #will cause following bind_keys to bind to vi mode as well as activate vi mode
#ctrl_c_tap_time_interval(0.3)
'''
try:
import pyreadline.rlmain
if not os.path.exists(ipydir):
os.makedirs(ipydir)
conf = os.path.join(ipydir, 'pyreadline.txt')
hist = os.path.join(ipydir, 'history.txt')
config = config % hist
with open(conf, 'wb') as f:
f.write(config.encode('utf-8'))
pyreadline.rlmain.config_path = conf
import atexit
import readline
import pyreadline.unicode_helper # noqa
# Normally the codepage for pyreadline is set to be sys.stdout.encoding
# if you need to change this uncomment the following line
# pyreadline.unicode_helper.pyreadline_codepage="utf8"
except ImportError:
print("Module readline not available.")
else:
# import tab completion functionality
import rlcompleter
# Override completer from rlcompleter to disable automatic ( on callable
completer_obj = rlcompleter.Completer()
def nop(val, word):
return word
completer_obj._callable_postfix = nop
readline.set_completer(completer_obj.complete)
# activate tab completion
readline.parse_and_bind("tab: complete")
readline.read_history_file()
atexit.register(readline.write_history_file)
del readline, rlcompleter, atexit
class Exit:
def __repr__(self):
raise SystemExit(0)
__str__ = __repr__
def __call__(self):
raise SystemExit(0)
class Helper:
def __repr__(self):
return "Type help() for interactive help, " \
"or help(object) for help about object."
def __call__(self, *args, **kwds):
import pydoc
return pydoc.help(*args, **kwds)
def simple_repl(user_ns={}):
if iswindows:
setup_pyreadline()
else:
try:
import rlcompleter # noqa
import readline # noqa
readline.parse_and_bind("tab: complete")
except ImportError:
pass
user_ns = user_ns or {}
import sys, re # noqa
for x in ('os', 'sys', 're'):
user_ns[x] = user_ns.get(x, globals().get(x, locals().get(x)))
user_ns['exit'] = Exit()
user_ns['help'] = Helper()
from code import InteractiveConsole
console = InteractiveConsole(user_ns)
console.interact(BANNER + 'Use exit to quit')
def ipython(user_ns=None):
os.environ['IPYTHONDIR'] = ipydir
have_ipython = True
try:
from IPython.terminal.embed import InteractiveShellEmbed
from IPython.terminal.prompts import Prompts, Token
from traitlets.config.loader import Config
except ImportError:
have_ipython = False
if not have_ipython:
return simple_repl(user_ns=user_ns)
class CustomPrompt(Prompts):
def in_prompt_tokens(self, cli=None):
return [
(Token.Prompt, 'calibre['),
(Token.PromptNum, get_version()),
(Token.Prompt, ']> '),
]
def out_prompt_tokens(self):
return []
defns = {'os':os, 're':re, 'sys':sys}
defns.update(user_ns or {})
c = Config()
user_conf = os.path.expanduser('~/.ipython/profile_default/ipython_config.py')
if os.path.exists(user_conf):
exec_path(user_conf, {'get_config': lambda: c})
c.TerminalInteractiveShell.prompts_class = CustomPrompt
c.InteractiveShellApp.exec_lines = [
'from __future__ import division, absolute_import, unicode_literals, print_function',
]
c.TerminalInteractiveShell.confirm_exit = False
c.TerminalInteractiveShell.banner1 = BANNER
c.BaseIPythonApplication.ipython_dir = ipydir
c.InteractiveShell.separate_in = ''
c.InteractiveShell.separate_out = ''
c.InteractiveShell.separate_out2 = ''
ipshell = InteractiveShellEmbed.instance(config=c, user_ns=user_ns)
ipshell()
| 8,109 | Python | .pyt | 192 | 37.15625 | 118 | 0.631652 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,870 | planet_python.recipe | kovidgoyal_calibre/recipes/planet_python.recipe | from calibre.web.feeds.news import AutomaticNewsRecipe
class BasicUserRecipe(AutomaticNewsRecipe):
title = u'Planet Python'
language = 'en'
__author__ = 'Jelle van der Waa'
oldest_article = 10
max_articles_per_feed = 100
feeds = [(u'Planet Python', u'http://planetpython.org/rss20.xml')]
| 314 | Python | .pyt | 8 | 35 | 70 | 0.713816 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,871 | substack.recipe | kovidgoyal_calibre/recipes/substack.recipe | #!/usr/bin/env python
# vim:fileencoding=utf-8
#
# Title: Substack
# License: GNU General Public License v3 – https://www.gnu.org/licenses/gpl-3.0.html
# Copyright: Nathan Cook (nathan.cook@gmail.com)
##
# Written: 2020-12-18
##
__license__ = 'GNU General Public License v3 – https://www.gnu.org/licenses/gpl-3.0.html'
__copyright__ = 'Nathan Cook – 2020-12-19'
__version__ = 'v0.1.1'
__date__ = '2020-12-19'
__author__ = 'topynate'
import json
from calibre.web.feeds.news import BasicNewsRecipe
from mechanize import Request
class Substack(BasicNewsRecipe):
title = 'Substack'
__author__ = 'topynate'
oldest_article = 7
language = 'en'
max_articles_per_feed = 100
auto_cleanup = True
needs_subscription = 'optional'
use_embedded_content = False
recipe_specific_options = {
'days': {
'short': 'Oldest article to download from this news source. In days ',
'long': 'For example, 0.5, gives you articles from the past 12 hours',
'default': str(oldest_article)
}
}
def __init__(self, *args, **kwargs):
BasicNewsRecipe.__init__(self, *args, **kwargs)
d = self.recipe_specific_options.get('days')
if d and isinstance(d, str):
self.oldest_article = float(d)
# Every Substack publication has an RSS feed at https://{name}.substack.com/feed.
# The same URL provides either all posts, or all free posts + previews of paid posts,
# depending on whether you're logged in.
feeds = [
('Novum Lumen', 'https://novumlumen.substack.com/feed'), # gratuitously self-promotional example
]
def get_browser(self):
br = BasicNewsRecipe.get_browser(self)
if self.username is not None and self.password is not None:
br.open('https://substack.com/account/login?redirect=%2F&email=&with_password=')
data = json.dumps({'email': self.username, 'password': self.password, 'captcha_response':None})
req = Request(
url='https://substack.com/api/v1/email-login',
headers={
'Accept': '*/*',
'Content-Type': 'application/json',
'Origin': 'https://substack.com',
'Referer': 'https://substack.com/account/login?redirect=%2F&email=&with_password=',
},
data=data,
method='POST')
res = br.open(req)
if res.getcode() != 200:
raise ValueError('Login failed, check username and password')
return br
| 2,630 | Python | .tac | 63 | 33.968254 | 107 | 0.605016 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,872 | attac_es.recipe | kovidgoyal_calibre/recipes/attac_es.recipe | # vim:fileencoding=utf-8
from __future__ import unicode_literals
from calibre.web.feeds.news import BasicNewsRecipe
class AttacEspanaRecipe (BasicNewsRecipe):
__author__ = 'Marc Busqué <marc@lamarciana.com>'
__url__ = 'http://www.lamarciana.com'
__version__ = '1.0.2'
__license__ = 'GPL v3'
__copyright__ = '2012, Marc Busqué <marc@lamarciana.com>'
title = u'attac.es'
description = u'La Asociación por la Tasación de las Transacciones Financieras y por la Ayuda a los Ciudadanos (ATTAC) es un movimiento internacional altermundialista que promueve el control democrático de los mercados financieros y las instituciones encargadas de su control mediante la reflexión política y la movilización social.' # noqa
url = 'http://www.attac.es'
language = 'es'
tags = 'contrainformación, información alternativa'
oldest_article = 7
remove_empty_feeds = True
no_stylesheets = True
def get_extra_css(self):
if not self.extra_css:
br = self.get_browser()
self.extra_css = br.open_novisit(
'https://raw.githubusercontent.com/laMarciana/gutenweb/master/dist/gutenweb.css').read().replace('@charset "UTF-8";', '')
return self.extra_css
cover_url = u'http://www.attac.es/wp-content/themes/attacweb/images/attaces.jpg'
feeds = [
(u'Attac', u'http://www.attac.es/feed'),
]
| 1,406 | Python | .tac | 27 | 45.592593 | 345 | 0.691123 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,873 | stackoverflow.recipe | kovidgoyal_calibre/recipes/stackoverflow.recipe | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2009, Darko Miletic <darko.miletic at gmail.com>'
'''
blog.stackoverflow.com
'''
from calibre.web.feeds.news import BasicNewsRecipe
class StackOverflowBlog(BasicNewsRecipe):
title = 'Stack Overflow - Blog'
__author__ = 'Darko Miletic'
description = 'a programming community exploit'
category = 'blog, programming'
publisher = 'StackOverflow team'
oldest_article = 30
language = 'en'
max_articles_per_feed = 100
no_stylesheets = True
use_embedded_content = True
encoding = 'utf-8'
html2lrf_options = [
'--comment', description, '--category', category, '--publisher', publisher
]
html2epub_options = 'publisher="' + publisher + \
'"\ncomments="' + description + '"\ntags="' + category + '"'
feeds = [(u'Articles', u'http://blog.stackoverflow.com/feed/')]
| 899 | Python | .tac | 25 | 31.6 | 82 | 0.666282 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,874 | tldata.py | andikleen_pmu-tools/tldata.py | import os
import csv
import re
from collections import defaultdict
import gen_level
class TLData:
"""Read a toplev output CSV file.
Exported:
times[n] All time stamps
vals[n] All values, as dicts mapping (name, cpu)->float
levels{name} All levels (includes metrics), name->list of fields
units{name} All units, name->unit
headers(set) All headers (including metrics)
metrics(set) All metrics
helptxt[col] All help texts.
cpus(set) All CPUs
"""
def __init__(self, fn, verbose=False):
self.times = []
self.vals = []
self.fn = fn
self.levels = defaultdict(set)
self.metrics = set()
self.headers = set()
self.mtime = None
self.helptxt = {}
self.cpus = set()
self.verbose = verbose
self.units = {}
def update(self):
mtime = os.path.getmtime(self.fn)
if self.mtime == mtime:
return
self.mtime = mtime
csvf = csv.reader(open(self.fn, 'r'))
prevts = None
val = {}
for r in csvf:
if r[0].strip().startswith("#"):
continue
if r[0] == "Timestamp" or r[0] == "CPUs":
continue
# 1.001088024,C1,Frontend_Bound,42.9,% Slots,,frontend_retired.latency_ge_4:pp,0.0,100.0,<==,Y
if re.match(r'[CS]?\d+.*', r[1]):
ts, cpu, name, pct, unit, helptxt = r[0], r[1], r[2], r[3], r[4], r[5]
else:
ts, name, pct, unit, helptxt = r[0], r[1], r[2], r[3], r[4]
cpu = None
key = (name, cpu)
ts, pct = float(ts), float(pct.replace("%", ""))
if name not in self.helptxt or self.helptxt[name] == "":
self.helptxt[name] = helptxt
if unit.endswith("<"):
unit = unit[:-2]
if not self.verbose:
continue
self.units[name] = unit
if prevts and ts != prevts:
self.times.append(prevts)
self.vals.append(val)
val = {}
val[key] = pct
n = gen_level.level_name(name)
if cpu:
self.cpus.add(cpu)
self.headers.add(name)
if gen_level.is_metric(name):
self.metrics.add(n)
self.levels[n].add(name)
prevts = ts
if len(val.keys()) > 0:
self.times.append(prevts)
self.vals.append(val)
early_plots = ["TopLevel", "CPU utilization", "Power", "Frequency", "CPU-METRIC"]
def sort_key(i, data):
if i in early_plots:
return early_plots.index(i)
if i in data.metrics:
return 30
return list(data.levels.keys()).index(i)
def level_order(data):
"""Return plot order of all levels."""
return sorted(data.levels.keys(), key=lambda a: sort_key(a, data))
| 2,915 | Python | .py | 83 | 25.373494 | 106 | 0.529204 | andikleen/pmu-tools | 1,984 | 331 | 178 | GPL-2.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,875 | msr.py | andikleen_pmu-tools/msr.py | #!/usr/bin/env python3
# library and tool to access Intel MSRs (model specific registers)
# Author: Andi Kleen
from __future__ import print_function
import glob
import struct
import os
def writemsr(msr, val):
n = glob.glob('/dev/cpu/[0-9]*/msr')
for c in n:
f = os.open(c, os.O_WRONLY)
os.lseek(f, msr, os.SEEK_SET)
os.write(f, struct.pack('Q', val))
os.close(f)
if not n:
raise OSError("msr module not loaded (run modprobe msr)")
def readmsr(msr, cpu = 0):
f = os.open('/dev/cpu/%d/msr' % (cpu,), os.O_RDONLY)
os.lseek(f, msr, os.SEEK_SET)
val = struct.unpack('Q', os.read(f, 8))[0]
os.close(f)
return val
def changebit(msr, bit, val):
n = glob.glob('/dev/cpu/[0-9]*/msr')
for c in n:
f = os.open(c, os.O_RDWR)
os.lseek(f, msr, os.SEEK_SET)
v = struct.unpack('Q', os.read(f, 8))[0]
if val:
v = v | (1 << bit)
else:
v = v & ~(1 << bit)
os.lseek(f, msr, os.SEEK_SET)
os.write(f, struct.pack('Q', v))
os.close(f)
if not n:
raise OSError("msr module not loaded (run modprobe msr)")
if __name__ == '__main__':
import argparse
def parse_hex(s):
try:
return int(s, 16)
except ValueError:
raise argparse.ArgumentError("Bad hex number %s" % (s))
if not os.path.exists("/dev/cpu/0/msr"):
os.system("/sbin/modprobe msr")
p = argparse.ArgumentParser(description='Access x86 model specific registers.')
p.add_argument('msr', type=parse_hex, help='number of the MSR to access')
p.add_argument('value', nargs='?', type=parse_hex, help='value to write (if not specified read)')
p.add_argument('--setbit', type=int, help='Bit number to set')
p.add_argument('--clearbit', type=int, help='Bit number to clear')
p.add_argument('--cpu', type=int, default=0, help='CPU to read on (writes always change all)')
args = p.parse_args()
if args.value is None and not args.setbit and not args.clearbit:
print("%x" % (readmsr(args.msr, args.cpu)))
elif args.setbit:
changebit(args.msr, args.setbit, 1)
elif args.clearbit:
changebit(args.msr, args.clearbit, 0)
else:
writemsr(args.msr, args.value)
| 2,291 | Python | .py | 61 | 31.262295 | 101 | 0.603689 | andikleen/pmu-tools | 1,984 | 331 | 178 | GPL-2.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,876 | counterdiff.py | andikleen_pmu-tools/counterdiff.py | #!/usr/bin/env python3
# counterdiff.py < plog program .. (or general perf arguments)
# verify plog.* output from toplev by running event one by one
# this can be used to estimate multiplexing measurement errors
from __future__ import print_function
import sys, os
def run(x):
print(x)
os.system(x)
for l in sys.stdin:
if l.find(",") < 0:
continue
n = l.strip().split(",")
run("perf stat --output l -x, -e %s %s" %
(n[1], " ".join(sys.argv[1:])))
f = open("l", "r")
for i in f:
if i.find(",") < 0:
continue
j = i.strip().split(",")
break
f.close()
if float(n[0]) > 0:
delta = (float(j[0]) - float(n[0])) / float(n[0])
else:
delta = 0
print(n[1], j[0], n[0], "%.2f" % (delta * 100.0))
| 803 | Python | .py | 27 | 24.518519 | 67 | 0.546512 | andikleen/pmu-tools | 1,984 | 331 | 178 | GPL-2.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,877 | utilized.py | andikleen_pmu-tools/utilized.py | #!/usr/bin/python
# extract utilized CPUs out of toplev CSV output
# toplev ... -I 1000 --node +CPU_Utilization -x, -o x.csv ...
# utilized.py < x.csv
# note it duplicates the core output
from __future__ import print_function
import argparse
import csv
import sys
import re
import collections
ap = argparse.ArgumentParser()
ap.add_argument('--min-util', default=10., type=float)
ap.add_argument('file', nargs='?', type=argparse.FileType('r'), default=sys.stdin)
ap.add_argument('--output', '-o', type=argparse.FileType('w'), default=sys.stdout)
args = ap.parse_args()
key = None
c = csv.reader(args.file)
wr = csv.writer(args.output)
fields = collections.OrderedDict()
util = collections.defaultdict(list)
for t in c:
if len(t) < 3 or t[0].startswith("#"):
continue
if t[0] == "Timestamp":
wr.writerow(t)
key = t[1] # XXX handle no -I
if key in fields:
fields[key].append(t)
else:
fields[key] = [t]
if t[2] == "CPU_Utilization":
util[key].append(float(t[3]))
final = []
skipped = []
for j in fields.keys():
if "-T" not in j and not j.startswith("CPU"):
if "S" in j:
final.append(j)
continue
core = re.sub(r'-T\d+', '', j)
utilization = 100
if len(util[j]) > 0:
utilization = (sum(util[j]) / len(util[j])) * 100.
if utilization >= float(args.min_util):
for k in fields[core] + fields[j]:
wr.writerow(k)
else:
skipped.append(j)
for j in final:
for k in fields[j]:
wr.writerow(k)
print("skipped", " ".join(skipped), file=sys.stderr)
| 1,600 | Python | .py | 53 | 25.981132 | 82 | 0.630759 | andikleen/pmu-tools | 1,984 | 331 | 178 | GPL-2.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,878 | ivb_server_ratios.py | andikleen_pmu-tools/ivb_server_ratios.py | # -*- coding: latin-1 -*-
#
# auto generated TopDown/TMA 4.8-full-perf description for Intel Xeon E5 v2 (code named IvyBridge EP)
# Please see http://ark.intel.com for more details on these CPUs.
#
# References:
# http://bit.ly/tma-ispass14
# http://halobates.de/blog/p/262
# https://sites.google.com/site/analysismethods/yasin-pubs
# https://download.01.org/perfmon/
# https://github.com/andikleen/pmu-tools/wiki/toplev-manual
#
# Helpers
print_error = lambda msg: False
smt_enabled = False
ebs_mode = False
version = "4.8-full-perf"
base_frequency = -1.0
Memory = 0
Average_Frequency = 0.0
num_cores = 1
num_threads = 1
num_sockets = 1
def handle_error(obj, msg):
print_error(msg)
obj.errcount += 1
obj.val = 0
obj.thresh = False
def handle_error_metric(obj, msg):
print_error(msg)
obj.errcount += 1
obj.val = 0
# Constants
Exe_Ports = 6
Mem_L2_Store_Cost = 9
Mem_L3_Weight = 7
Mem_STLB_Hit_Cost = 7
BAClear_Cost = 12
MS_Switches_Cost = 3
Avg_Assist_Cost = 66
Pipeline_Width = 4
OneMillion = 1000000
OneBillion = 1000000000
Energy_Unit = 15.6
EBS_Mode = 0
DS = 1
# Aux. formulas
def Backend_Bound_Cycles(self, EV, level):
return (STALLS_TOTAL(self, EV, level) + EV("UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC", level) - Few_Uops_Executed_Threshold(self, EV, level) - Frontend_RS_Empty_Cycles(self, EV, level) + EV("RESOURCE_STALLS.SB", level))
def Cycles_0_Ports_Utilized(self, EV, level):
return (EV("UOPS_EXECUTED.CORE:i1:c1", level)) / 2 if smt_enabled else(STALLS_TOTAL(self, EV, level) - Frontend_RS_Empty_Cycles(self, EV, level))
def Cycles_1_Port_Utilized(self, EV, level):
return (EV("UOPS_EXECUTED.CORE:c1", level) - EV("UOPS_EXECUTED.CORE:c2", level)) / 2 if smt_enabled else(EV("UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC", level) - EV("UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC", level))
def Cycles_2_Ports_Utilized(self, EV, level):
return (EV("UOPS_EXECUTED.CORE:c2", level) - EV("UOPS_EXECUTED.CORE:c3", level)) / 2 if smt_enabled else(EV("UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC", level) - EV("UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC", level))
def Cycles_3m_Ports_Utilized(self, EV, level):
return (EV("UOPS_EXECUTED.CORE:c3", level) / 2) if smt_enabled else EV("UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC", level)
def DurationTimeInSeconds(self, EV, level):
return EV("interval-ms", 0) / 1000
def Execute_Cycles(self, EV, level):
return (EV("UOPS_EXECUTED.CORE:c1", level) / 2) if smt_enabled else EV("UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC", level)
def Fetched_Uops(self, EV, level):
return (EV("IDQ.DSB_UOPS", level) + EV("LSD.UOPS", level) + EV("IDQ.MITE_UOPS", level) + EV("IDQ.MS_UOPS", level))
def Few_Uops_Executed_Threshold(self, EV, level):
EV("UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC", level)
EV("UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC", level)
return EV("UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC", level) if (IPC(self, EV, level)> 1.8) else EV("UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC", level)
# Floating Point computational (arithmetic) Operations Count
def FLOP_Count(self, EV, level):
return (1 *(EV("FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE", level) + EV("FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE", level)) + 2 * EV("FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE", level) + 4 *(EV("FP_COMP_OPS_EXE.SSE_PACKED_SINGLE", level) + EV("SIMD_FP_256.PACKED_DOUBLE", level)) + 8 * EV("SIMD_FP_256.PACKED_SINGLE", level))
# Floating Point computational (arithmetic) Operations Count
def FP_Arith_Scalar(self, EV, level):
return EV("FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE", level) + EV("FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE", level)
# Floating Point computational (arithmetic) Operations Count
def FP_Arith_Vector(self, EV, level):
return EV("FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE", level) + EV("FP_COMP_OPS_EXE.SSE_PACKED_SINGLE", level) + EV("SIMD_FP_256.PACKED_SINGLE", level) + EV("SIMD_FP_256.PACKED_DOUBLE", level)
def Frontend_RS_Empty_Cycles(self, EV, level):
EV("RS_EVENTS.EMPTY_CYCLES", level)
return EV("RS_EVENTS.EMPTY_CYCLES", level) if (self.Fetch_Latency.compute(EV)> 0.1) else 0
def Frontend_Latency_Cycles(self, EV, level):
return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE", level)) , level )
def HighIPC(self, EV, level):
val = IPC(self, EV, level) / Pipeline_Width
return val
def ITLB_Miss_Cycles(self, EV, level):
return (12 * EV("ITLB_MISSES.STLB_HIT", level) + EV("ITLB_MISSES.WALK_DURATION", level))
def LOAD_L1_MISS(self, EV, level):
return EV("MEM_LOAD_UOPS_RETIRED.L2_HIT", level) + EV("MEM_LOAD_UOPS_RETIRED.LLC_HIT", level) + EV("MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT", level) + EV("MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM", level) + EV("MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS", level)
def LOAD_L1_MISS_NET(self, EV, level):
return LOAD_L1_MISS(self, EV, level) + EV("MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM", level) + EV("MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_DRAM", level) + EV("MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_HITM", level) + EV("MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_FWD", level)
def LOAD_L3_HIT(self, EV, level):
return EV("MEM_LOAD_UOPS_RETIRED.LLC_HIT", level) * (1 + EV("MEM_LOAD_UOPS_RETIRED.HIT_LFB", level) / LOAD_L1_MISS_NET(self, EV, level))
def LOAD_LCL_MEM(self, EV, level):
return EV("MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM", level) * (1 + EV("MEM_LOAD_UOPS_RETIRED.HIT_LFB", level) / LOAD_L1_MISS_NET(self, EV, level))
def LOAD_RMT_FWD(self, EV, level):
return EV("MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_FWD", level) * (1 + EV("MEM_LOAD_UOPS_RETIRED.HIT_LFB", level) / LOAD_L1_MISS_NET(self, EV, level))
def LOAD_RMT_HITM(self, EV, level):
return EV("MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_HITM", level) * (1 + EV("MEM_LOAD_UOPS_RETIRED.HIT_LFB", level) / LOAD_L1_MISS_NET(self, EV, level))
def LOAD_RMT_MEM(self, EV, level):
return EV("MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_DRAM", level) * (1 + EV("MEM_LOAD_UOPS_RETIRED.HIT_LFB", level) / LOAD_L1_MISS_NET(self, EV, level))
def LOAD_XSNP_HIT(self, EV, level):
return EV("MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT", level) * (1 + EV("MEM_LOAD_UOPS_RETIRED.HIT_LFB", level) / LOAD_L1_MISS_NET(self, EV, level))
def LOAD_XSNP_HITM(self, EV, level):
return EV("MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM", level) * (1 + EV("MEM_LOAD_UOPS_RETIRED.HIT_LFB", level) / LOAD_L1_MISS_NET(self, EV, level))
def LOAD_XSNP_MISS(self, EV, level):
return EV("MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS", level) * (1 + EV("MEM_LOAD_UOPS_RETIRED.HIT_LFB", level) / LOAD_L1_MISS_NET(self, EV, level))
def Mem_L3_Hit_Fraction(self, EV, level):
return EV("MEM_LOAD_UOPS_RETIRED.LLC_HIT", level) / (EV("MEM_LOAD_UOPS_RETIRED.LLC_HIT", level) + Mem_L3_Weight * EV("MEM_LOAD_UOPS_RETIRED.LLC_MISS", level))
def Mem_Lock_St_Fraction(self, EV, level):
return EV("MEM_UOPS_RETIRED.LOCK_LOADS", level) / EV("MEM_UOPS_RETIRED.ALL_STORES", level)
def Memory_Bound_Fraction(self, EV, level):
return (STALLS_MEM_ANY(self, EV, level) + EV("RESOURCE_STALLS.SB", level)) / Backend_Bound_Cycles(self, EV, level)
def Mispred_Clears_Fraction(self, EV, level):
return EV("BR_MISP_RETIRED.ALL_BRANCHES", level) / (EV("BR_MISP_RETIRED.ALL_BRANCHES", level) + EV("MACHINE_CLEARS.COUNT", level))
def ORO_Demand_RFO_C1(self, EV, level):
return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO", level)) , level )
def ORO_DRD_Any_Cycles(self, EV, level):
return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", level)) , level )
def ORO_DRD_BW_Cycles(self, EV, level):
return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD:c6", level)) , level )
def SQ_Full_Cycles(self, EV, level):
return (EV("OFFCORE_REQUESTS_BUFFER.SQ_FULL", level) / 2) if smt_enabled else EV("OFFCORE_REQUESTS_BUFFER.SQ_FULL", level)
def STALLS_MEM_ANY(self, EV, level):
return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("CYCLE_ACTIVITY.STALLS_LDM_PENDING", level)) , level )
def STALLS_TOTAL(self, EV, level):
return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("CYCLE_ACTIVITY.CYCLES_NO_EXECUTE", level)) , level )
def Store_L2_Hit_Cycles(self, EV, level):
return EV("L2_RQSTS.RFO_HIT", level) * Mem_L2_Store_Cost *(1 - Mem_Lock_St_Fraction(self, EV, level))
def Mem_XSNP_HitM_Cost(self, EV, level):
return 60
def Mem_XSNP_Hit_Cost(self, EV, level):
return 43
def Mem_XSNP_None_Cost(self, EV, level):
return 41
def Mem_Local_DRAM_Cost(self, EV, level):
return 200
def Mem_Remote_DRAM_Cost(self, EV, level):
return 310
def Mem_Remote_HitM_Cost(self, EV, level):
return 200
def Mem_Remote_Fwd_Cost(self, EV, level):
return 180
def Recovery_Cycles(self, EV, level):
return (EV("INT_MISC.RECOVERY_CYCLES_ANY", level) / 2) if smt_enabled else EV("INT_MISC.RECOVERY_CYCLES", level)
def Retire_Fraction(self, EV, level):
return Retired_Slots(self, EV, level) / EV("UOPS_ISSUED.ANY", level)
# Retired slots per Logical Processor
def Retired_Slots(self, EV, level):
return EV("UOPS_RETIRED.RETIRE_SLOTS", level)
# Number of logical processors (enabled or online) on the target system
def Num_CPUs(self, EV, level):
return 8 if smt_enabled else 4
# Instructions Per Cycle (per Logical Processor)
def IPC(self, EV, level):
return EV("INST_RETIRED.ANY", level) / CLKS(self, EV, level)
# Uops Per Instruction
def UopPI(self, EV, level):
val = Retired_Slots(self, EV, level) / EV("INST_RETIRED.ANY", level)
self.thresh = (val > 1.05)
return val
# Uops per taken branch
def UpTB(self, EV, level):
val = Retired_Slots(self, EV, level) / EV("BR_INST_RETIRED.NEAR_TAKEN", level)
self.thresh = val < Pipeline_Width * 1.5
return val
# Cycles Per Instruction (per Logical Processor)
def CPI(self, EV, level):
return 1 / IPC(self, EV, level)
# Per-Logical Processor actual clocks when the Logical Processor is active.
def CLKS(self, EV, level):
return EV("CPU_CLK_UNHALTED.THREAD", level)
# Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward)
def SLOTS(self, EV, level):
return Pipeline_Width * CORE_CLKS(self, EV, level)
# The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of "execute" at rename stage.
def Execute_per_Issue(self, EV, level):
return EV("UOPS_EXECUTED.THREAD", level) / EV("UOPS_ISSUED.ANY", level)
# Instructions Per Cycle across hyper-threads (per physical core)
def CoreIPC(self, EV, level):
return EV("INST_RETIRED.ANY", level) / CORE_CLKS(self, EV, level)
# Floating Point Operations Per Cycle
def FLOPc(self, EV, level):
return FLOP_Count(self, EV, level) / CORE_CLKS(self, EV, level)
# Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor)
def ILP(self, EV, level):
return EV("UOPS_EXECUTED.THREAD", level) / EV("UOPS_EXECUTED.THREAD:c1", level)
# Core actual clocks when any Logical Processor is active on the Physical Core
def CORE_CLKS(self, EV, level):
return ((EV("CPU_CLK_UNHALTED.THREAD", level) / 2) * (1 + EV("CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE", level) / EV("CPU_CLK_UNHALTED.REF_XCLK", level))) if ebs_mode else(EV("CPU_CLK_UNHALTED.THREAD_ANY", level) / 2) if smt_enabled else CLKS(self, EV, level)
# Instructions per Load (lower number means higher occurrence rate). Tip: reduce memory accesses. #Link Opt Guide section: Minimize Register Spills
def IpLoad(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("MEM_UOPS_RETIRED.ALL_LOADS", level)
self.thresh = (val < 3)
return val
# Instructions per Store (lower number means higher occurrence rate). Tip: reduce memory accesses. #Link Opt Guide section: Minimize Register Spills
def IpStore(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("MEM_UOPS_RETIRED.ALL_STORES", level)
self.thresh = (val < 8)
return val
# Instructions per Branch (lower number means higher occurrence rate)
def IpBranch(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.ALL_BRANCHES", level)
self.thresh = (val < 8)
return val
# Instructions per (near) call (lower number means higher occurrence rate)
def IpCall(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.NEAR_CALL", level)
self.thresh = (val < 200)
return val
# Instructions per taken branch
def IpTB(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.NEAR_TAKEN", level)
self.thresh = val < Pipeline_Width * 2 + 1
return val
# Branch instructions per taken branch. . Can be used to approximate PGO-likelihood for non-loopy codes.
def BpTkBranch(self, EV, level):
return EV("BR_INST_RETIRED.ALL_BRANCHES", level) / EV("BR_INST_RETIRED.NEAR_TAKEN", level)
# Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW.
def IpArith(self, EV, level):
val = 1 /(self.FP_Scalar.compute(EV) + self.FP_Vector.compute(EV))
self.thresh = (val < 10)
return val
# Total number of retired Instructions
def Instructions(self, EV, level):
return EV("INST_RETIRED.ANY", level)
# Average number of Uops retired in cycles where at least one uop has retired.
def Retire(self, EV, level):
return Retired_Slots(self, EV, level) / EV("UOPS_RETIRED.RETIRE_SLOTS:c1", level)
def Execute(self, EV, level):
return EV("UOPS_EXECUTED.THREAD", level) / Execute_Cycles(self, EV, level)
# Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). See section 'Decoded ICache' in Optimization Manual. http://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-optimization-manual.html
def DSB_Coverage(self, EV, level):
val = EV("IDQ.DSB_UOPS", level) / Fetched_Uops(self, EV, level)
self.thresh = (val < 0.7) and HighIPC(self, EV, 1)
return val
# Instructions per speculative Unknown Branch Misprediction (BAClear) (lower number means higher occurrence rate)
def IpUnknown_Branch(self, EV, level):
return Instructions(self, EV, level) / EV("BACLEARS.ANY", level)
# Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)
def IpMispredict(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("BR_MISP_RETIRED.ALL_BRANCHES", level)
self.thresh = (val < 200)
return val
# Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).
def IpMisp_Indirect(self, EV, level):
val = Instructions(self, EV, level) / (Retire_Fraction(self, EV, level) * EV("BR_MISP_EXEC.INDIRECT", level))
self.thresh = (val < 1000)
return val
# Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)
def Load_Miss_Real_Latency(self, EV, level):
return EV("L1D_PEND_MISS.PENDING", level) / (EV("MEM_LOAD_UOPS_RETIRED.L1_MISS", level) + EV("MEM_LOAD_UOPS_RETIRED.HIT_LFB", level))
# Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)
def MLP(self, EV, level):
return EV("L1D_PEND_MISS.PENDING", level) / EV("L1D_PEND_MISS.PENDING_CYCLES", level)
# L1 cache true misses per kilo instruction for retired demand loads
def L1MPKI(self, EV, level):
return 1000 * EV("MEM_LOAD_UOPS_RETIRED.L1_MISS", level) / EV("INST_RETIRED.ANY", level)
# L2 cache true misses per kilo instruction for retired demand loads
def L2MPKI(self, EV, level):
return 1000 * EV("MEM_LOAD_UOPS_RETIRED.L2_MISS", level) / EV("INST_RETIRED.ANY", level)
# Offcore requests (L2 cache miss) per kilo instruction for demand RFOs
def L2MPKI_RFO(self, EV, level):
return 1000 * EV("OFFCORE_REQUESTS.DEMAND_RFO", level) / EV("INST_RETIRED.ANY", level)
# L3 cache true misses per kilo instruction for retired demand loads
def L3MPKI(self, EV, level):
return 1000 * EV("MEM_LOAD_UOPS_RETIRED.LLC_MISS", level) / EV("INST_RETIRED.ANY", level)
def L1D_Cache_Fill_BW(self, EV, level):
return 64 * EV("L1D.REPLACEMENT", level) / OneBillion / Time(self, EV, level)
def L2_Cache_Fill_BW(self, EV, level):
return 64 * EV("L2_LINES_IN.ALL", level) / OneBillion / Time(self, EV, level)
def L3_Cache_Fill_BW(self, EV, level):
return 64 * EV("LONGEST_LAT_CACHE.MISS", level) / OneBillion / Time(self, EV, level)
# Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses
def Page_Walks_Utilization(self, EV, level):
val = (EV("ITLB_MISSES.WALK_DURATION", level) + EV("DTLB_LOAD_MISSES.WALK_DURATION", level) + EV("DTLB_STORE_MISSES.WALK_DURATION", level)) / CORE_CLKS(self, EV, level)
self.thresh = (val > 0.5)
return val
# Average per-core data fill bandwidth to the L1 data cache [GB / sec]
def L1D_Cache_Fill_BW_2T(self, EV, level):
return L1D_Cache_Fill_BW(self, EV, level)
# Average per-core data fill bandwidth to the L2 cache [GB / sec]
def L2_Cache_Fill_BW_2T(self, EV, level):
return L2_Cache_Fill_BW(self, EV, level)
# Average per-core data fill bandwidth to the L3 cache [GB / sec]
def L3_Cache_Fill_BW_2T(self, EV, level):
return L3_Cache_Fill_BW(self, EV, level)
# Average Latency for L2 cache miss demand Loads
def Load_L2_Miss_Latency(self, EV, level):
return EV("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD", level) / EV("OFFCORE_REQUESTS.DEMAND_DATA_RD", level)
# Average Parallel L2 cache miss demand Loads
def Load_L2_MLP(self, EV, level):
return EV("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD", level) / EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD", level)
# Average Parallel L2 cache miss data reads
def Data_L2_MLP(self, EV, level):
return EV("OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD", level) / EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", level)
# Average CPU Utilization (percentage)
def CPU_Utilization(self, EV, level):
return CPUs_Utilized(self, EV, level) / Num_CPUs(self, EV, level)
# Average number of utilized CPUs
def CPUs_Utilized(self, EV, level):
return EV("CPU_CLK_UNHALTED.REF_TSC", level) / EV("msr/tsc/", 0)
# Measured Average Core Frequency for unhalted processors [GHz]
def Core_Frequency(self, EV, level):
return Turbo_Utilization(self, EV, level) * EV("msr/tsc/", 0) / OneBillion / Time(self, EV, level)
# Measured Average Uncore Frequency for the SoC [GHz]
def Uncore_Frequency(self, EV, level):
return Socket_CLKS(self, EV, level) / 1e9 / Time(self, EV, level)
# Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width
def GFLOPs(self, EV, level):
return (FLOP_Count(self, EV, level) / OneBillion) / Time(self, EV, level)
# Average Frequency Utilization relative nominal frequency
def Turbo_Utilization(self, EV, level):
return CLKS(self, EV, level) / EV("CPU_CLK_UNHALTED.REF_TSC", level)
# Fraction of cycles where both hardware Logical Processors were active
def SMT_2T_Utilization(self, EV, level):
return 1 - EV("CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE", level) / (EV("CPU_CLK_UNHALTED.REF_XCLK_ANY", level) / 2) if smt_enabled else 0
# Fraction of cycles spent in the Operating System (OS) Kernel mode
def Kernel_Utilization(self, EV, level):
val = EV("CPU_CLK_UNHALTED.THREAD_P:SUP", level) / EV("CPU_CLK_UNHALTED.THREAD", level)
self.thresh = (val > 0.05)
return val
# Cycles Per Instruction for the Operating System (OS) Kernel mode
def Kernel_CPI(self, EV, level):
return EV("CPU_CLK_UNHALTED.THREAD_P:SUP", level) / EV("INST_RETIRED.ANY_P:SUP", level)
# Average external Memory Bandwidth Use for reads and writes [GB / sec]
def DRAM_BW_Use(self, EV, level):
return (64 *(EV("UNC_M_CAS_COUNT.RD", level) + EV("UNC_M_CAS_COUNT.WR", level)) / OneBillion) / Time(self, EV, level)
# Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches.
def MEM_Read_Latency(self, EV, level):
return OneBillion *(EV("UNC_C_TOR_OCCUPANCY.MISS_OPCODE:Match=0x182", level) / EV("UNC_C_TOR_INSERTS.MISS_OPCODE:Match=0x182", level)) / (Socket_CLKS(self, EV, level) / Time(self, EV, level))
# Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches
def MEM_Parallel_Reads(self, EV, level):
return EV("UNC_C_TOR_OCCUPANCY.MISS_OPCODE:Match=0x182", level) / EV("UNC_C_TOR_OCCUPANCY.MISS_OPCODE:Match=0x182:c1", level)
# Run duration time in seconds
def Time(self, EV, level):
val = EV("interval-s", 0)
self.thresh = (val < 1)
return val
# Socket actual clocks when any core is active on that socket
def Socket_CLKS(self, EV, level):
return EV("UNC_C_CLOCKTICKS:one_unit", level)
# Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]
def IpFarBranch(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.FAR_BRANCH:USER", level)
self.thresh = (val < 1000000)
return val
# Event groups
class Frontend_Bound:
name = "Frontend_Bound"
domain = "Slots"
area = "FE"
level = 1
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvFB', 'BvIO', 'TmaL1', 'PGO'])
maxval = None
def compute(self, EV):
try:
self.val = EV("IDQ_UOPS_NOT_DELIVERED.CORE", 1) / SLOTS(self, EV, 1)
self.thresh = (self.val > 0.15)
except ZeroDivisionError:
handle_error(self, "Frontend_Bound zero division")
return self.val
desc = """
This category represents fraction of slots where the
processor's Frontend undersupplies its Backend. Frontend
denotes the first part of the processor core responsible to
fetch operations that are executed later on by the Backend
part. Within the Frontend; a branch predictor predicts the
next address to fetch; cache-lines are fetched from the
memory subsystem; parsed into instructions; and lastly
decoded into micro-operations (uops). Ideally the Frontend
can issue Pipeline_Width uops every cycle to the Backend.
Frontend Bound denotes unutilized issue-slots when there is
no Backend stall; i.e. bubbles where Frontend delivered no
uops while Backend could have accepted them. For example;
stalls due to instruction-cache misses would be categorized
under Frontend Bound."""
class Fetch_Latency:
name = "Fetch_Latency"
domain = "Slots"
area = "FE"
level = 2
htoff = False
sample = ['RS_EVENTS.EMPTY_END']
errcount = 0
sibling = None
metricgroup = frozenset(['Frontend', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = Pipeline_Width * Frontend_Latency_Cycles(self, EV, 2) / SLOTS(self, EV, 2)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Fetch_Latency zero division")
return self.val
desc = """
This metric represents fraction of slots the CPU was stalled
due to Frontend latency issues. For example; instruction-
cache misses; iTLB misses or fetch stalls after a branch
misprediction are categorized under Frontend Latency. In
such cases; the Frontend eventually delivers no uops for
some period."""
class ICache_Misses:
name = "ICache_Misses"
domain = "Clocks"
area = "FE"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BigFootprint', 'BvBC', 'FetchLat', 'IcMiss'])
maxval = None
def compute(self, EV):
try:
self.val = EV("ICACHE.IFETCH_STALL", 3) / CLKS(self, EV, 3) - self.ITLB_Misses.compute(EV)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "ICache_Misses zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due to instruction cache misses.. Using compiler's
Profile-Guided Optimization (PGO) can reduce i-cache misses
through improved hot code layout."""
class ITLB_Misses:
name = "ITLB_Misses"
domain = "Clocks"
area = "FE"
level = 3
htoff = False
sample = ['ITLB_MISSES.WALK_COMPLETED']
errcount = 0
sibling = None
metricgroup = frozenset(['BigFootprint', 'BvBC', 'FetchLat', 'MemoryTLB'])
maxval = None
def compute(self, EV):
try:
self.val = ITLB_Miss_Cycles(self, EV, 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "ITLB_Misses zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due to Instruction TLB (ITLB) misses.. Consider
large 2M pages for code (selectively prefer hot large-size
function, due to limited 2M entries). Linux options:
standard binaries use libhugetlbfs; Hfsort.. https://github.
com/libhugetlbfs/libhugetlbfs;https://research.fb.com/public
ations/optimizing-function-placement-for-large-scale-data-
center-applications-2/"""
class Branch_Resteers:
name = "Branch_Resteers"
domain = "Clocks"
area = "FE"
level = 3
htoff = False
sample = ['BR_MISP_RETIRED.ALL_BRANCHES']
errcount = 0
sibling = None
metricgroup = frozenset(['FetchLat'])
maxval = None
def compute(self, EV):
try:
self.val = BAClear_Cost *(EV("BR_MISP_RETIRED.ALL_BRANCHES", 3) + EV("MACHINE_CLEARS.COUNT", 3) + EV("BACLEARS.ANY", 3)) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Branch_Resteers zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due to Branch Resteers. Branch Resteers estimates
the Frontend delay in fetching operations from corrected
path; following all sorts of miss-predicted branches. For
example; branchy code with lots of miss-predictions might
get categorized under Branch Resteers. Note the value of
this node may overlap with its siblings."""
class MS_Switches:
name = "MS_Switches"
domain = "Clocks_Estimated"
area = "FE"
level = 3
htoff = False
sample = ['IDQ.MS_SWITCHES']
errcount = 0
sibling = None
metricgroup = frozenset(['FetchLat', 'MicroSeq'])
maxval = 1.0
def compute(self, EV):
try:
self.val = MS_Switches_Cost * EV("IDQ.MS_SWITCHES", 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "MS_Switches zero division")
return self.val
desc = """
This metric estimates the fraction of cycles when the CPU
was stalled due to switches of uop delivery to the Microcode
Sequencer (MS). Commonly used instructions are optimized for
delivery by the DSB (decoded i-cache) or MITE (legacy
instruction decode) pipelines. Certain operations cannot be
handled natively by the execution pipeline; and must be
performed by microcode (small programs injected into the
execution stream). Switching to the MS too often can
negatively impact performance. The MS is designated to
deliver long uop flows required by CISC instructions like
CPUID; or uncommon conditions like Floating Point Assists
when dealing with Denormals."""
class LCP:
name = "LCP"
domain = "Clocks"
area = "FE"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['FetchLat'])
maxval = None
def compute(self, EV):
try:
self.val = EV("ILD_STALL.LCP", 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "LCP zero division")
return self.val
desc = """
This metric represents fraction of cycles CPU was stalled
due to Length Changing Prefixes (LCPs). Using proper
compiler flags or Intel Compiler by default will certainly
avoid this."""
class DSB_Switches:
name = "DSB_Switches"
domain = "Clocks"
area = "FE"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['DSBmiss', 'FetchLat'])
maxval = None
def compute(self, EV):
try:
self.val = EV("DSB2MITE_SWITCHES.PENALTY_CYCLES", 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "DSB_Switches zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due to switches from DSB to MITE pipelines. The DSB
(decoded i-cache) is a Uop Cache where the front-end
directly delivers Uops (micro operations) avoiding heavy x86
decoding. The DSB pipeline has shorter latency and delivered
higher bandwidth than the MITE (legacy instruction decode
pipeline). Switching between the two pipelines can cause
penalties hence this metric measures the exposed penalty..
See section 'Optimization for Decoded Icache' in
Optimization Manual:. http://www.intel.com/content/www/us/en
/architecture-and-technology/64-ia-32-architectures-
optimization-manual.html"""
class Fetch_Bandwidth:
name = "Fetch_Bandwidth"
domain = "Slots"
area = "FE"
level = 2
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['FetchBW', 'Frontend', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = self.Frontend_Bound.compute(EV) - self.Fetch_Latency.compute(EV)
self.thresh = (self.val > 0.2)
except ZeroDivisionError:
handle_error(self, "Fetch_Bandwidth zero division")
return self.val
desc = """
This metric represents fraction of slots the CPU was stalled
due to Frontend bandwidth issues. For example;
inefficiencies at the instruction decoders; or restrictions
for caching in the DSB (decoded uops cache) are categorized
under Fetch Bandwidth. In such cases; the Frontend typically
delivers suboptimal amount of uops to the Backend."""
class MITE:
name = "MITE"
domain = "Slots_Estimated"
area = "FE"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['DSBmiss', 'FetchBW'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("IDQ.ALL_MITE_CYCLES_ANY_UOPS", 3) - EV("IDQ.ALL_MITE_CYCLES_4_UOPS", 3)) / CORE_CLKS(self, EV, 3) / 2
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "MITE zero division")
return self.val
desc = """
This metric represents Core fraction of cycles in which CPU
was likely limited due to the MITE pipeline (the legacy
decode pipeline). This pipeline is used for code that was
not pre-cached in the DSB or LSD. For example;
inefficiencies due to asymmetric decoders; use of long
immediate or LCP can manifest as MITE fetch bandwidth
bottleneck.. Consider tuning codegen of 'small hotspots'
that can fit in DSB. Read about 'Decoded ICache' in
Optimization Manual:. http://www.intel.com/content/www/us/en
/architecture-and-technology/64-ia-32-architectures-
optimization-manual.html"""
class DSB:
name = "DSB"
domain = "Slots_Estimated"
area = "FE"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['DSB', 'FetchBW'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("IDQ.ALL_DSB_CYCLES_ANY_UOPS", 3) - EV("IDQ.ALL_DSB_CYCLES_4_UOPS", 3)) / CORE_CLKS(self, EV, 3) / 2
self.thresh = (self.val > 0.15) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "DSB zero division")
return self.val
desc = """
This metric represents Core fraction of cycles in which CPU
was likely limited due to DSB (decoded uop cache) fetch
pipeline. For example; inefficient utilization of the DSB
cache structure or bank conflict when reading from it; are
categorized here."""
class Bad_Speculation:
name = "Bad_Speculation"
domain = "Slots"
area = "BAD"
level = 1
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['TmaL1'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("UOPS_ISSUED.ANY", 1) - Retired_Slots(self, EV, 1) + Pipeline_Width * Recovery_Cycles(self, EV, 1)) / SLOTS(self, EV, 1)
self.thresh = (self.val > 0.15)
except ZeroDivisionError:
handle_error(self, "Bad_Speculation zero division")
return self.val
desc = """
This category represents fraction of slots wasted due to
incorrect speculations. This include slots used to issue
uops that do not eventually get retired and slots for which
the issue-pipeline was blocked due to recovery from earlier
incorrect speculation. For example; wasted work due to miss-
predicted branches are categorized under Bad Speculation
category. Incorrect data speculation followed by Memory
Ordering Nukes is another example."""
class Branch_Mispredicts:
name = "Branch_Mispredicts"
domain = "Slots"
area = "BAD"
level = 2
htoff = False
sample = ['BR_MISP_RETIRED.ALL_BRANCHES:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['BadSpec', 'BrMispredicts', 'BvMP', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = Mispred_Clears_Fraction(self, EV, 2) * self.Bad_Speculation.compute(EV)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Branch_Mispredicts zero division")
return self.val
desc = """
This metric represents fraction of slots the CPU has wasted
due to Branch Misprediction. These slots are either wasted
by uops fetched from an incorrectly speculated program path;
or stalls when the out-of-order part of the machine needs to
recover its state from a speculative path.. Using profile
feedback in the compiler may help. Please see the
Optimization Manual for general strategies for addressing
branch misprediction issues..
http://www.intel.com/content/www/us/en/architecture-and-
technology/64-ia-32-architectures-optimization-manual.html"""
class Machine_Clears:
name = "Machine_Clears"
domain = "Slots"
area = "BAD"
level = 2
htoff = False
sample = ['MACHINE_CLEARS.COUNT']
errcount = 0
sibling = None
metricgroup = frozenset(['BadSpec', 'BvMS', 'MachineClears', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = self.Bad_Speculation.compute(EV) - self.Branch_Mispredicts.compute(EV)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Machine_Clears zero division")
return self.val
desc = """
This metric represents fraction of slots the CPU has wasted
due to Machine Clears. These slots are either wasted by
uops fetched prior to the clear; or stalls the out-of-order
portion of the machine needs to recover its state after the
clear. For example; this can happen due to memory ordering
Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code
(SMC) nukes.. See \"Memory Disambiguation\" in Optimization
Manual and:. https://software.intel.com/sites/default/files/
m/d/4/1/d/8/sma.pdf"""
class Backend_Bound:
name = "Backend_Bound"
domain = "Slots"
area = "BE"
level = 1
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvOB', 'TmaL1'])
maxval = None
def compute(self, EV):
try:
self.val = 1 -(self.Frontend_Bound.compute(EV) + self.Bad_Speculation.compute(EV) + self.Retiring.compute(EV))
self.thresh = (self.val > 0.2)
except ZeroDivisionError:
handle_error(self, "Backend_Bound zero division")
return self.val
desc = """
This category represents fraction of slots where no uops are
being delivered due to a lack of required resources for
accepting new uops in the Backend. Backend is the portion of
the processor core where the out-of-order scheduler
dispatches ready uops into their respective execution units;
and once completed these uops get retired according to
program order. For example; stalls due to data-cache misses
or stalls due to the divider unit being overloaded are both
categorized under Backend Bound. Backend Bound is further
divided into two main categories: Memory Bound and Core
Bound."""
class Memory_Bound:
name = "Memory_Bound"
domain = "Slots"
area = "BE/Mem"
level = 2
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Backend', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = Memory_Bound_Fraction(self, EV, 2) * self.Backend_Bound.compute(EV)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Memory_Bound zero division")
return self.val
desc = """
This metric represents fraction of slots the Memory
subsystem within the Backend was a bottleneck. Memory Bound
estimates fraction of slots where pipeline is likely stalled
due to demand load or store instructions. This accounts
mainly for (1) non-completed in-flight memory demand loads
which coincides with execution units starvation; in addition
to (2) cases where stores could impose backpressure on the
pipeline when many of them get buffered at the same time
(less common out of the two)."""
class L1_Bound:
name = "L1_Bound"
domain = "Stalls"
area = "BE/Mem"
level = 3
htoff = False
sample = ['MEM_LOAD_UOPS_RETIRED.L1_HIT:pp', 'MEM_LOAD_UOPS_RETIRED.HIT_LFB:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['CacheHits', 'MemoryBound', 'TmaL3mem'])
maxval = None
def compute(self, EV):
try:
self.val = max((STALLS_MEM_ANY(self, EV, 3) - EV("CYCLE_ACTIVITY.STALLS_L1D_PENDING", 3)) / CLKS(self, EV, 3) , 0 )
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "L1_Bound zero division")
return self.val
desc = """
This metric estimates how often the CPU was stalled without
loads missing the L1 data cache. The L1 data cache
typically has the shortest latency. However; in certain
cases like loads blocked on older stores; a load might
suffer due to high latency even though it is being satisfied
by the L1. Another example is loads who miss in the TLB.
These cases are characterized by execution unit stalls;
while some non-completed demand load lives in the machine
without having that demand load missing the L1 cache."""
class DTLB_Load:
name = "DTLB_Load"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_UOPS_RETIRED.STLB_MISS_LOADS:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['BvMT', 'MemoryTLB'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (Mem_STLB_Hit_Cost * EV("DTLB_LOAD_MISSES.STLB_HIT", 4) + EV("DTLB_LOAD_MISSES.WALK_DURATION", 4)) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "DTLB_Load zero division")
return self.val
desc = """
This metric roughly estimates the fraction of cycles where
the Data TLB (DTLB) was missed by load accesses. TLBs
(Translation Look-aside Buffers) are processor caches for
recently used entries out of the Page Tables that are used
to map virtual- to physical-addresses by the operating
system. This metric approximates the potential delay of
demand loads missing the first-level data TLB (assuming
worst case scenario with back to back misses to different
pages). This includes hitting in the second-level TLB (STLB)
as well as performing a hardware page walk on an STLB miss.."""
class Store_Fwd_Blk:
name = "Store_Fwd_Blk"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = 1.0
def compute(self, EV):
try:
self.val = 13 * EV("LD_BLOCKS.STORE_FORWARD", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Store_Fwd_Blk zero division")
return self.val
desc = """
This metric roughly estimates fraction of cycles when the
memory subsystem had loads blocked since they could not
forward data from earlier (in program order) overlapping
stores. To streamline memory operations in the pipeline; a
load can avoid waiting for memory if a prior in-flight store
is writing the data that the load wants to read (store
forwarding process). However; in some cases the load may be
blocked for a significant time pending the store forward.
For example; when the prior store is writing a smaller
region than the load is reading."""
class Lock_Latency:
name = "Lock_Latency"
domain = "Clocks"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_UOPS_RETIRED.LOCK_LOADS:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['Offcore'])
maxval = 1.0
def compute(self, EV):
try:
self.val = Mem_Lock_St_Fraction(self, EV, 4) * ORO_Demand_RFO_C1(self, EV, 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Lock_Latency zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU spent
handling cache misses due to lock operations. Due to the
microarchitecture handling of locks; they are classified as
L1_Bound regardless of what memory source satisfied them."""
class Split_Loads:
name = "Split_Loads"
domain = "Clocks_Calculated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_UOPS_RETIRED.SPLIT_LOADS:pp']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = 1.0
def compute(self, EV):
try:
self.val = 13 * EV("LD_BLOCKS.NO_SR", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Split_Loads zero division")
return self.val
desc = """
This metric estimates fraction of cycles handling memory
load split accesses - load that cross 64-byte cache line
boundary. . Consider aligning data or hot structure fields.
See the Optimization Manual for more details"""
class G4K_Aliasing:
name = "4K_Aliasing"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = EV("LD_BLOCKS_PARTIAL.ADDRESS_ALIAS", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "G4K_Aliasing zero division")
return self.val
desc = """
This metric estimates how often memory load accesses were
aliased by preceding stores (in program order) with a 4K
address offset. False match is possible; which incur a few
cycles load re-issue. However; the short re-issue duration
is often hidden by the out-of-order core and HW
optimizations; hence a user may safely ignore a high value
of this metric unless it manages to propagate up into parent
nodes of the hierarchy (e.g. to L1_Bound).. Consider
reducing independent loads/stores accesses with 4K offsets.
See the Optimization Manual for more details"""
class FB_Full:
name = "FB_Full"
domain = "Clocks_Calculated"
area = "BE/Mem"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvMB', 'MemoryBW'])
maxval = None
def compute(self, EV):
try:
self.val = Load_Miss_Real_Latency(self, EV, 4) * EV("L1D_PEND_MISS.FB_FULL:c1", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.3)
except ZeroDivisionError:
handle_error(self, "FB_Full zero division")
return self.val
desc = """
This metric does a *rough estimation* of how often L1D Fill
Buffer unavailability limited additional L1D miss memory
access requests to proceed. The higher the metric value; the
deeper the memory hierarchy level the misses are satisfied
from (metric values >1 are valid). Often it hints on
approaching bandwidth limits (to L2 cache; L3 cache or
external memory).. See $issueBW and $issueSL hints. Avoid
software prefetches if indeed memory BW limited."""
class L2_Bound:
name = "L2_Bound"
domain = "Stalls"
area = "BE/Mem"
level = 3
htoff = False
sample = ['MEM_LOAD_UOPS_RETIRED.L2_HIT:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['BvML', 'CacheHits', 'MemoryBound', 'TmaL3mem'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("CYCLE_ACTIVITY.STALLS_L1D_PENDING", 3) - EV("CYCLE_ACTIVITY.STALLS_L2_PENDING", 3)) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "L2_Bound zero division")
return self.val
desc = """
This metric estimates how often the CPU was stalled due to
L2 cache accesses by loads. Avoiding cache misses (i.e. L1
misses/L2 hits) can improve the latency and increase
performance."""
class L3_Bound:
name = "L3_Bound"
domain = "Stalls"
area = "BE/Mem"
level = 3
htoff = False
sample = ['MEM_LOAD_UOPS_RETIRED.LLC_HIT:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['CacheHits', 'MemoryBound', 'TmaL3mem'])
maxval = None
def compute(self, EV):
try:
self.val = Mem_L3_Hit_Fraction(self, EV, 3) * EV("CYCLE_ACTIVITY.STALLS_L2_PENDING", 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "L3_Bound zero division")
return self.val
desc = """
This metric estimates how often the CPU was stalled due to
loads accesses to L3 cache or contended with a sibling Core.
Avoiding cache misses (i.e. L2 misses/L3 hits) can improve
the latency and increase performance."""
class Contested_Accesses:
name = "Contested_Accesses"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM:pp', 'MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['BvMS', 'DataSharing', 'Offcore', 'Snoop'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (Mem_XSNP_HitM_Cost(self, EV, 4) * LOAD_XSNP_HITM(self, EV, 4) + Mem_XSNP_Hit_Cost(self, EV, 4) * LOAD_XSNP_MISS(self, EV, 4)) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Contested_Accesses zero division")
return self.val
desc = """
This metric estimates fraction of cycles while the memory
subsystem was handling synchronizations due to contested
accesses. Contested accesses occur when data written by one
Logical Processor are read by another Logical Processor on a
different Physical Core. Examples of contested accesses
include synchronizations such as locks; true data sharing
such as modified locked variables; and false sharing."""
class Data_Sharing:
name = "Data_Sharing"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['BvMS', 'Offcore', 'Snoop'])
maxval = 1.0
def compute(self, EV):
try:
self.val = Mem_XSNP_Hit_Cost(self, EV, 4) * LOAD_XSNP_HIT(self, EV, 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Data_Sharing zero division")
return self.val
desc = """
This metric estimates fraction of cycles while the memory
subsystem was handling synchronizations due to data-sharing
accesses. Data shared by multiple Logical Processors (even
just read shared) may cause increased access latency due to
cache coherency. Excessive data sharing can drastically harm
multithreaded performance."""
class L3_Hit_Latency:
name = "L3_Hit_Latency"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_LOAD_UOPS_RETIRED.LLC_HIT:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['BvML', 'MemoryLat'])
maxval = 1.0
def compute(self, EV):
try:
self.val = Mem_XSNP_None_Cost(self, EV, 4) * LOAD_L3_HIT(self, EV, 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "L3_Hit_Latency zero division")
return self.val
desc = """
This metric estimates fraction of cycles with demand load
accesses that hit the L3 cache under unloaded scenarios
(possibly L3 latency limited). Avoiding private cache
misses (i.e. L2 misses/L3 hits) will improve the latency;
reduce contention with sibling physical cores and increase
performance. Note the value of this node may overlap with
its siblings."""
class SQ_Full:
name = "SQ_Full"
domain = "Clocks"
area = "BE/Mem"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvMB', 'MemoryBW', 'Offcore'])
maxval = None
def compute(self, EV):
try:
self.val = SQ_Full_Cycles(self, EV, 4) / CORE_CLKS(self, EV, 4)
self.thresh = (self.val > 0.3) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "SQ_Full zero division")
return self.val
desc = """
This metric measures fraction of cycles where the Super
Queue (SQ) was full taking into account all request-types
and both hardware SMT threads (Logical Processors)."""
class DRAM_Bound:
name = "DRAM_Bound"
domain = "Stalls"
area = "BE/Mem"
level = 3
htoff = False
sample = ['MEM_LOAD_UOPS_RETIRED.LLC_MISS:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['MemoryBound', 'TmaL3mem'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (1 - Mem_L3_Hit_Fraction(self, EV, 3)) * EV("CYCLE_ACTIVITY.STALLS_L2_PENDING", 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "DRAM_Bound zero division")
return self.val
desc = """
This metric estimates how often the CPU was stalled on
accesses to external memory (DRAM) by loads. Better caching
can improve the latency and increase performance."""
class MEM_Bandwidth:
name = "MEM_Bandwidth"
domain = "Clocks"
area = "BE/Mem"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvMB', 'MemoryBW', 'Offcore'])
maxval = None
def compute(self, EV):
try:
self.val = ORO_DRD_BW_Cycles(self, EV, 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "MEM_Bandwidth zero division")
return self.val
desc = """
This metric estimates fraction of cycles where the core's
performance was likely hurt due to approaching bandwidth
limits of external memory - DRAM ([SPR-HBM] and/or HBM).
The underlying heuristic assumes that a similar off-core
traffic is generated by all IA cores. This metric does not
aggregate non-data-read requests by this logical processor;
requests from other IA Logical Processors/Physical
Cores/sockets; or other non-IA devices like GPU; hence the
maximum external memory bandwidth limits may or may not be
approached when this metric is flagged (see Uncore counters
for that).. Improve data accesses to reduce cacheline
transfers from/to memory. Examples: 1) Consume all bytes of
a each cacheline before it is evicted (e.g. reorder
structure elements and split non-hot ones), 2) merge
computed-limited with BW-limited loops, 3) NUMA
optimizations in multi-socket system. Note: software
prefetches will not help BW-limited application.."""
class MEM_Latency:
name = "MEM_Latency"
domain = "Clocks"
area = "BE/Mem"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvML', 'MemoryLat', 'Offcore'])
maxval = None
def compute(self, EV):
try:
self.val = ORO_DRD_Any_Cycles(self, EV, 4) / CLKS(self, EV, 4) - self.MEM_Bandwidth.compute(EV)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "MEM_Latency zero division")
return self.val
desc = """
This metric estimates fraction of cycles where the
performance was likely hurt due to latency from external
memory - DRAM ([SPR-HBM] and/or HBM). This metric does not
aggregate requests from other Logical Processors/Physical
Cores/sockets (see Uncore counters for that).. Improve data
accesses or interleave them with compute. Examples: 1) Data
layout re-structuring, 2) Software Prefetches (also through
the compiler).."""
class Local_MEM:
name = "Local_MEM"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 5
htoff = False
sample = ['MEM_LOAD_UOPS_RETIRED.LLC_MISS:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['Server'])
maxval = 1.0
def compute(self, EV):
try:
self.val = Mem_Local_DRAM_Cost(self, EV, 5) * LOAD_LCL_MEM(self, EV, 5) / CLKS(self, EV, 5)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Local_MEM zero division")
return self.val
desc = """
This metric estimates fraction of cycles while the memory
subsystem was handling loads from local memory. Caching will
improve the latency and increase performance."""
class Remote_MEM:
name = "Remote_MEM"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 5
htoff = False
sample = ['MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_DRAM:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['Server', 'Snoop'])
maxval = 1.0
def compute(self, EV):
try:
self.val = Mem_Remote_DRAM_Cost(self, EV, 5) * LOAD_RMT_MEM(self, EV, 5) / CLKS(self, EV, 5)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Remote_MEM zero division")
return self.val
desc = """
This metric estimates fraction of cycles while the memory
subsystem was handling loads from remote memory. This is
caused often due to non-optimal NUMA allocations."""
class Remote_Cache:
name = "Remote_Cache"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 5
htoff = False
sample = ['MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_HITM:pp', 'MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_FWD:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['Offcore', 'Server', 'Snoop'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (Mem_Remote_HitM_Cost(self, EV, 5) * LOAD_RMT_HITM(self, EV, 5) + Mem_Remote_Fwd_Cost(self, EV, 5) * LOAD_RMT_FWD(self, EV, 5)) / CLKS(self, EV, 5)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Remote_Cache zero division")
return self.val
desc = """
This metric estimates fraction of cycles while the memory
subsystem was handling loads from remote cache in other
sockets including synchronizations issues. This is caused
often due to non-optimal NUMA allocations."""
class Store_Bound:
name = "Store_Bound"
domain = "Stalls"
area = "BE/Mem"
level = 3
htoff = False
sample = ['MEM_UOPS_RETIRED.ALL_STORES:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['MemoryBound', 'TmaL3mem'])
maxval = None
def compute(self, EV):
try:
self.val = EV("RESOURCE_STALLS.SB", 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Store_Bound zero division")
return self.val
desc = """
This metric estimates how often CPU was stalled due to RFO
store memory accesses; RFO store issue a read-for-ownership
request before the write. Even though store accesses do not
typically stall out-of-order CPUs; there are few cases where
stores can lead to actual stalls. This metric will be
flagged should RFO stores be a bottleneck."""
class Store_Latency:
name = "Store_Latency"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvML', 'MemoryLat', 'Offcore'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (Store_L2_Hit_Cycles(self, EV, 4) + (1 - Mem_Lock_St_Fraction(self, EV, 4)) * ORO_Demand_RFO_C1(self, EV, 4)) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Store_Latency zero division")
return self.val
desc = """
This metric estimates fraction of cycles the CPU spent
handling L1D store misses. Store accesses usually less
impact out-of-order core performance; however; holding
resources for longer time can lead into undesired
implications (e.g. contention on L1D fill-buffer entries -
see FB_Full). Consider to avoid/reduce unnecessary (or
easily load-able/computable) memory store."""
class False_Sharing:
name = "False_Sharing"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM:pp', 'MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_HITM:pp', 'OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE', 'OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.REMOTE_HITM']
errcount = 0
sibling = None
metricgroup = frozenset(['BvMS', 'DataSharing', 'Offcore', 'Snoop'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (Mem_Remote_HitM_Cost(self, EV, 4) * EV("OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.REMOTE_HITM", 4) + Mem_XSNP_HitM_Cost(self, EV, 4) * EV("OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE", 4)) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "False_Sharing zero division")
return self.val
desc = """
This metric roughly estimates how often CPU was handling
synchronizations due to False Sharing. False Sharing is a
multithreading hiccup; where multiple Logical Processors
contend on different data-elements mapped into the same
cache line. . False Sharing can be easily avoided by padding
to make Logical Processors access different lines."""
class Split_Stores:
name = "Split_Stores"
domain = "Core_Utilization"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_UOPS_RETIRED.SPLIT_STORES:pp']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = 2 * EV("MEM_UOPS_RETIRED.SPLIT_STORES", 4) / CORE_CLKS(self, EV, 4)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Split_Stores zero division")
return self.val
desc = """
This metric represents rate of split store accesses.
Consider aligning your data to the 64-byte cache line
granularity."""
class DTLB_Store:
name = "DTLB_Store"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_UOPS_RETIRED.STLB_MISS_STORES:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['BvMT', 'MemoryTLB'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (Mem_STLB_Hit_Cost * EV("DTLB_STORE_MISSES.STLB_HIT", 4) + EV("DTLB_STORE_MISSES.WALK_DURATION", 4)) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "DTLB_Store zero division")
return self.val
desc = """
This metric roughly estimates the fraction of cycles spent
handling first-level data TLB store misses. As with
ordinary data caching; focus on improving data locality and
reducing working-set size to reduce DTLB overhead.
Additionally; consider using profile-guided optimization
(PGO) to collocate frequently-used data on the same page.
Try using larger page sizes for large amounts of frequently-
used data."""
class Core_Bound:
name = "Core_Bound"
domain = "Slots"
area = "BE/Core"
level = 2
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Backend', 'TmaL2', 'Compute'])
maxval = None
def compute(self, EV):
try:
self.val = self.Backend_Bound.compute(EV) - self.Memory_Bound.compute(EV)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Core_Bound zero division")
return self.val
desc = """
This metric represents fraction of slots where Core non-
memory issues were of a bottleneck. Shortage in hardware
compute resources; or dependencies in software's
instructions are both categorized under Core Bound. Hence it
may indicate the machine ran out of an out-of-order
resource; certain execution units are overloaded or
dependencies in program's data- or instruction-flow are
limiting the performance (e.g. FP-chained long-latency
arithmetic operations).. Tip: consider Port Saturation
analysis as next step."""
class Divider:
name = "Divider"
domain = "Clocks"
area = "BE/Core"
level = 3
htoff = False
sample = ['ARITH.FPU_DIV_ACTIVE']
errcount = 0
sibling = None
metricgroup = frozenset(['BvCB'])
maxval = 1.0
def compute(self, EV):
try:
self.val = EV("ARITH.FPU_DIV_ACTIVE", 3) / CORE_CLKS(self, EV, 3)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Divider zero division")
return self.val
desc = """
This metric represents fraction of cycles where the Divider
unit was active. Divide and square root instructions are
performed by the Divider unit and can take considerably
longer latency than integer or Floating Point addition;
subtraction; or multiplication."""
class Ports_Utilization:
name = "Ports_Utilization"
domain = "Clocks"
area = "BE/Core"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['PortsUtil'])
maxval = None
def compute(self, EV):
try:
self.val = (Backend_Bound_Cycles(self, EV, 3) - EV("RESOURCE_STALLS.SB", 3) - STALLS_MEM_ANY(self, EV, 3)) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.15) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Ports_Utilization zero division")
return self.val
desc = """
This metric estimates fraction of cycles the CPU performance
was potentially limited due to Core computation issues (non
divider-related). Two distinct categories can be attributed
into this metric: (1) heavy data-dependency among contiguous
instructions would manifest in this metric - such cases are
often referred to as low Instruction Level Parallelism
(ILP). (2) Contention on some hardware execution unit other
than Divider. For example; when there are too many multiply
operations.. Loop Vectorization -most compilers feature
auto-Vectorization options today- reduces pressure on the
execution ports as multiple elements are calculated with
same uop."""
class Ports_Utilized_0:
name = "Ports_Utilized_0"
domain = "Clocks"
area = "BE/Core"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['PortsUtil'])
maxval = None
def compute(self, EV):
try:
self.val = Cycles_0_Ports_Utilized(self, EV, 4) / CORE_CLKS(self, EV, 4)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Ports_Utilized_0 zero division")
return self.val
desc = """
This metric represents fraction of cycles CPU executed no
uops on any execution port (Logical Processor cycles since
ICL, Physical Core cycles otherwise). Long-latency
instructions like divides may contribute to this metric..
Check assembly view and Appendix C in Optimization Manual to
find out instructions with say 5 or more cycles latency..
http://www.intel.com/content/www/us/en/architecture-and-
technology/64-ia-32-architectures-optimization-manual.html"""
class Ports_Utilized_1:
name = "Ports_Utilized_1"
domain = "Clocks"
area = "BE/Core"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['PortsUtil'])
maxval = None
def compute(self, EV):
try:
self.val = Cycles_1_Port_Utilized(self, EV, 4) / CORE_CLKS(self, EV, 4)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Ports_Utilized_1 zero division")
return self.val
desc = """
This metric represents fraction of cycles where the CPU
executed total of 1 uop per cycle on all execution ports
(Logical Processor cycles since ICL, Physical Core cycles
otherwise). This can be due to heavy data-dependency among
software instructions; or over oversubscribing a particular
hardware resource. In some other cases with high
1_Port_Utilized and L1_Bound; this metric can point to L1
data-cache latency bottleneck that may not necessarily
manifest with complete execution starvation (due to the
short L1 latency e.g. walking a linked list) - looking at
the assembly can be helpful."""
class Ports_Utilized_2:
name = "Ports_Utilized_2"
domain = "Clocks"
area = "BE/Core"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['PortsUtil'])
maxval = None
def compute(self, EV):
try:
self.val = Cycles_2_Ports_Utilized(self, EV, 4) / CORE_CLKS(self, EV, 4)
self.thresh = (self.val > 0.15) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Ports_Utilized_2 zero division")
return self.val
desc = """
This metric represents fraction of cycles CPU executed total
of 2 uops per cycle on all execution ports (Logical
Processor cycles since ICL, Physical Core cycles otherwise).
Loop Vectorization -most compilers feature auto-
Vectorization options today- reduces pressure on the
execution ports as multiple elements are calculated with
same uop."""
class Ports_Utilized_3m:
name = "Ports_Utilized_3m"
domain = "Clocks"
area = "BE/Core"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvCB', 'PortsUtil'])
maxval = None
def compute(self, EV):
try:
self.val = Cycles_3m_Ports_Utilized(self, EV, 4) / CORE_CLKS(self, EV, 4)
self.thresh = (self.val > 0.4) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Ports_Utilized_3m zero division")
return self.val
desc = """
This metric represents fraction of cycles CPU executed total
of 3 or more uops per cycle on all execution ports (Logical
Processor cycles since ICL, Physical Core cycles otherwise)."""
class ALU_Op_Utilization:
name = "ALU_Op_Utilization"
domain = "Core_Execution"
area = "BE/Core"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = (EV("UOPS_DISPATCHED_PORT.PORT_0", 5) + EV("UOPS_DISPATCHED_PORT.PORT_1", 5) + EV("UOPS_DISPATCHED_PORT.PORT_5", 5)) / (3 * CORE_CLKS(self, EV, 5))
self.thresh = (self.val > 0.4)
except ZeroDivisionError:
handle_error(self, "ALU_Op_Utilization zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution ports for ALU operations."""
class Port_0:
name = "Port_0"
domain = "Core_Clocks"
area = "BE/Core"
level = 6
htoff = False
sample = ['UOPS_DISPATCHED_PORT.PORT_0']
errcount = 0
sibling = None
metricgroup = frozenset(['Compute'])
maxval = None
def compute(self, EV):
try:
self.val = EV("UOPS_DISPATCHED_PORT.PORT_0", 6) / CORE_CLKS(self, EV, 6)
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Port_0 zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution port 0 ALU"""
class Port_1:
name = "Port_1"
domain = "Core_Clocks"
area = "BE/Core"
level = 6
htoff = False
sample = ['UOPS_DISPATCHED_PORT.PORT_1']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = EV("UOPS_DISPATCHED_PORT.PORT_1", 6) / CORE_CLKS(self, EV, 6)
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Port_1 zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution port 1 (ALU)"""
class Port_5:
name = "Port_5"
domain = "Core_Clocks"
area = "BE/Core"
level = 6
htoff = False
sample = ['UOPS_DISPATCHED_PORT.PORT_5']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = EV("UOPS_DISPATCHED_PORT.PORT_5", 6) / CORE_CLKS(self, EV, 6)
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Port_5 zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution port 5 Branches and ALU. See
section 'Handling Port 5 Pressure' in Optimization Manual:.
http://www.intel.com/content/www/us/en/architecture-and-
technology/64-ia-32-architectures-optimization-manual.html"""
class Load_Op_Utilization:
name = "Load_Op_Utilization"
domain = "Core_Execution"
area = "BE/Core"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = (EV("UOPS_DISPATCHED_PORT.PORT_2", 5) + EV("UOPS_DISPATCHED_PORT.PORT_3", 5) - EV("UOPS_DISPATCHED_PORT.PORT_4", 5)) / (2 * CORE_CLKS(self, EV, 5))
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Load_Op_Utilization zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution port for Load operations"""
class Port_2:
name = "Port_2"
domain = "Core_Clocks"
area = "BE/Core"
level = 6
htoff = False
sample = ['UOPS_DISPATCHED_PORT.PORT_2']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = EV("UOPS_DISPATCHED_PORT.PORT_2", 6) / CORE_CLKS(self, EV, 6)
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Port_2 zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution port 2 Loads and Store-address"""
class Port_3:
name = "Port_3"
domain = "Core_Clocks"
area = "BE/Core"
level = 6
htoff = False
sample = ['UOPS_DISPATCHED_PORT.PORT_3']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = EV("UOPS_DISPATCHED_PORT.PORT_3", 6) / CORE_CLKS(self, EV, 6)
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Port_3 zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution port 3 Loads and Store-address"""
class Store_Op_Utilization:
name = "Store_Op_Utilization"
domain = "Core_Execution"
area = "BE/Core"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = EV("UOPS_DISPATCHED_PORT.PORT_4", 5) / CORE_CLKS(self, EV, 5)
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Store_Op_Utilization zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution port for Store operations"""
class Port_4:
name = "Port_4"
domain = "Core_Clocks"
area = "BE/Core"
level = 6
htoff = False
sample = ['UOPS_DISPATCHED_PORT.PORT_4']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = EV("UOPS_DISPATCHED_PORT.PORT_4", 6) / CORE_CLKS(self, EV, 6)
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Port_4 zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution port 4 (Store-data)"""
class Retiring:
name = "Retiring"
domain = "Slots"
area = "RET"
level = 1
htoff = False
sample = ['UOPS_RETIRED.RETIRE_SLOTS']
errcount = 0
sibling = None
metricgroup = frozenset(['BvUW', 'TmaL1'])
maxval = None
def compute(self, EV):
try:
self.val = Retired_Slots(self, EV, 1) / SLOTS(self, EV, 1)
self.thresh = (self.val > 0.7) or self.Heavy_Operations.thresh
except ZeroDivisionError:
handle_error(self, "Retiring zero division")
return self.val
desc = """
This category represents fraction of slots utilized by
useful work i.e. issued uops that eventually get retired.
Ideally; all pipeline slots would be attributed to the
Retiring category. Retiring of 100% would indicate the
maximum Pipeline_Width throughput was achieved. Maximizing
Retiring typically increases the Instructions-per-cycle (see
IPC metric). Note that a high Retiring value does not
necessary mean there is no room for more performance. For
example; Heavy-operations or Microcode Assists are
categorized under Retiring. They often indicate suboptimal
performance and can often be optimized or avoided. . A high
Retiring value for non-vectorized code may be a good hint
for programmer to consider vectorizing his code. Doing so
essentially lets more computations be done without
significantly increasing number of instructions thus
improving the performance."""
class Light_Operations:
name = "Light_Operations"
domain = "Slots"
area = "RET"
level = 2
htoff = False
sample = ['INST_RETIRED.PREC_DIST']
errcount = 0
sibling = None
metricgroup = frozenset(['Retire', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = self.Retiring.compute(EV) - self.Heavy_Operations.compute(EV)
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Light_Operations zero division")
return self.val
desc = """
This metric represents fraction of slots where the CPU was
retiring light-weight operations -- instructions that
require no more than one uop (micro-operation). This
correlates with total number of instructions used by the
program. A uops-per-instruction (see UopPI metric) ratio of
1 or less should be expected for decently optimized code
running on Intel Core/Xeon products. While this often
indicates efficient X86 instructions were executed; high
value does not necessarily mean better performance cannot be
achieved. . Focus on techniques that reduce instruction
count or result in more efficient instructions generation
such as vectorization."""
class FP_Arith:
name = "FP_Arith"
domain = "Uops"
area = "RET"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['HPC'])
maxval = None
def compute(self, EV):
try:
self.val = self.X87_Use.compute(EV) + self.FP_Scalar.compute(EV) + self.FP_Vector.compute(EV)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "FP_Arith zero division")
return self.val
desc = """
This metric represents overall arithmetic floating-point
(FP) operations fraction the CPU has executed (retired).
Note this metric's value may exceed its parent due to use of
\"Uops\" CountDomain and FMA double-counting."""
class X87_Use:
name = "X87_Use"
domain = "Uops"
area = "RET"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Compute'])
maxval = None
def compute(self, EV):
try:
self.val = Retired_Slots(self, EV, 4) * EV("FP_COMP_OPS_EXE.X87", 4) / EV("UOPS_EXECUTED.THREAD", 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "X87_Use zero division")
return self.val
desc = """
This metric serves as an approximation of legacy x87 usage.
It accounts for instructions beyond X87 FP arithmetic
operations; hence may be used as a thermometer to avoid X87
high usage and preferably upgrade to modern ISA. See Tip
under Tuning Hint.. Tip: consider compiler flags to generate
newer AVX (or SSE) instruction sets; which typically perform
better and feature vectors."""
class FP_Scalar:
name = "FP_Scalar"
domain = "Uops"
area = "RET"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Compute', 'Flops'])
maxval = None
def compute(self, EV):
try:
self.val = FP_Arith_Scalar(self, EV, 4) / EV("UOPS_EXECUTED.THREAD", 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "FP_Scalar zero division")
return self.val
desc = """
This metric approximates arithmetic floating-point (FP)
scalar uops fraction the CPU has retired. May overcount due
to FMA double counting.. Investigate what limits (compiler)
generation of vector code."""
class FP_Vector:
name = "FP_Vector"
domain = "Uops"
area = "RET"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Compute', 'Flops'])
maxval = 1.0
def compute(self, EV):
try:
self.val = FP_Arith_Vector(self, EV, 4) / EV("UOPS_EXECUTED.THREAD", 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "FP_Vector zero division")
return self.val
desc = """
This metric approximates arithmetic floating-point (FP)
vector uops fraction the CPU has retired aggregated across
all vector widths. May overcount due to FMA double
counting.. Check if vector width is expected"""
class FP_Vector_128b:
name = "FP_Vector_128b"
domain = "Uops"
area = "RET"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Compute', 'Flops'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (EV("FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE", 5) + EV("FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE", 5)) / EV("UOPS_EXECUTED.THREAD", 5)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "FP_Vector_128b zero division")
return self.val
desc = """
This metric approximates arithmetic FP vector uops fraction
the CPU has retired for 128-bit wide vectors. May overcount
due to FMA double counting.. Try to exploit wider vector
length"""
class FP_Vector_256b:
name = "FP_Vector_256b"
domain = "Uops"
area = "RET"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Compute', 'Flops'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (EV("SIMD_FP_256.PACKED_DOUBLE", 5) + EV("SIMD_FP_256.PACKED_SINGLE", 5)) / EV("UOPS_EXECUTED.THREAD", 5)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "FP_Vector_256b zero division")
return self.val
desc = """
This metric approximates arithmetic FP vector uops fraction
the CPU has retired for 256-bit wide vectors. May overcount
due to FMA double counting.. Try to exploit wider vector
length"""
class Heavy_Operations:
name = "Heavy_Operations"
domain = "Slots"
area = "RET"
level = 2
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Retire', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = self.Microcode_Sequencer.compute(EV)
self.thresh = (self.val > 0.1)
except ZeroDivisionError:
handle_error(self, "Heavy_Operations zero division")
return self.val
desc = """
This metric represents fraction of slots where the CPU was
retiring heavy-weight operations -- instructions that
require two or more uops or micro-coded sequences. This
highly-correlates with the uop length of these
instructions/sequences."""
class Microcode_Sequencer:
name = "Microcode_Sequencer"
domain = "Slots"
area = "RET"
level = 3
htoff = False
sample = ['IDQ.MS_UOPS']
errcount = 0
sibling = None
metricgroup = frozenset(['MicroSeq'])
maxval = None
def compute(self, EV):
try:
self.val = Retire_Fraction(self, EV, 3) * EV("IDQ.MS_UOPS", 3) / SLOTS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Microcode_Sequencer zero division")
return self.val
desc = """
This metric represents fraction of slots the CPU was
retiring uops fetched by the Microcode Sequencer (MS) unit.
The MS is used for CISC instructions not supported by the
default decoders (like repeat move strings; or CPUID); or by
microcode assists used to address some operation modes (like
in Floating Point assists). These cases can often be
avoided.."""
class Assists:
name = "Assists"
domain = "Slots_Estimated"
area = "RET"
level = 4
htoff = False
sample = ['OTHER_ASSISTS.ANY_WB_ASSIST']
errcount = 0
sibling = None
metricgroup = frozenset(['BvIO'])
maxval = 1.0
def compute(self, EV):
try:
self.val = Avg_Assist_Cost * EV("OTHER_ASSISTS.ANY_WB_ASSIST", 4) / SLOTS(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Assists zero division")
return self.val
desc = """
This metric estimates fraction of slots the CPU retired uops
delivered by the Microcode_Sequencer as a result of Assists.
Assists are long sequences of uops that are required in
certain corner-cases for operations that cannot be handled
natively by the execution pipeline. For example; when
working with very small floating point values (so-called
Denormals); the FP units are not set up to perform these
operations natively. Instead; a sequence of instructions to
perform the computation on the Denormals is injected into
the pipeline. Since these microcode sequences might be
dozens of uops long; Assists can be extremely deleterious to
performance and they can be avoided in many cases."""
class CISC:
name = "CISC"
domain = "Slots"
area = "RET"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = max(0 , self.Microcode_Sequencer.compute(EV) - self.Assists.compute(EV))
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "CISC zero division")
return self.val
desc = """
This metric estimates fraction of cycles the CPU retired
uops originated from CISC (complex instruction set computer)
instruction. A CISC instruction has multiple uops that are
required to perform the instruction's functionality as in
the case of read-modify-write as an example. Since these
instructions require multiple uops they may or may not imply
sub-optimal use of machine resources."""
class Metric_IPC:
name = "IPC"
domain = "Metric"
maxval = Pipeline_Width + 2
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['Ret', 'Summary'])
sibling = None
def compute(self, EV):
try:
self.val = IPC(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "IPC zero division")
desc = """
Instructions Per Cycle (per Logical Processor)"""
class Metric_UopPI:
name = "UopPI"
domain = "Metric"
maxval = 2.0
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['Pipeline', 'Ret', 'Retire'])
sibling = None
def compute(self, EV):
try:
self.val = UopPI(self, EV, 0)
self.thresh = (self.val > 1.05)
except ZeroDivisionError:
handle_error_metric(self, "UopPI zero division")
desc = """
Uops Per Instruction"""
class Metric_UpTB:
name = "UpTB"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['Branches', 'Fed', 'FetchBW'])
sibling = None
def compute(self, EV):
try:
self.val = UpTB(self, EV, 0)
self.thresh = self.val < Pipeline_Width * 1.5
except ZeroDivisionError:
handle_error_metric(self, "UpTB zero division")
desc = """
Uops per taken branch"""
class Metric_CPI:
name = "CPI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['Pipeline', 'Mem'])
sibling = None
def compute(self, EV):
try:
self.val = CPI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "CPI zero division")
desc = """
Cycles Per Instruction (per Logical Processor)"""
class Metric_CLKS:
name = "CLKS"
domain = "Count"
maxval = 0
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['Pipeline'])
sibling = None
def compute(self, EV):
try:
self.val = CLKS(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "CLKS zero division")
desc = """
Per-Logical Processor actual clocks when the Logical
Processor is active."""
class Metric_SLOTS:
name = "SLOTS"
domain = "Count"
maxval = 0
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['TmaL1'])
sibling = None
def compute(self, EV):
try:
self.val = SLOTS(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "SLOTS zero division")
desc = """
Total issue-pipeline slots (per-Physical Core till ICL; per-
Logical Processor ICL onward)"""
class Metric_Execute_per_Issue:
name = "Execute_per_Issue"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['Cor', 'Pipeline'])
sibling = None
def compute(self, EV):
try:
self.val = Execute_per_Issue(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Execute_per_Issue zero division")
desc = """
The ratio of Executed- by Issued-Uops. Ratio > 1 suggests
high rate of uop micro-fusions. Ratio < 1 suggest high rate
of \"execute\" at rename stage."""
class Metric_CoreIPC:
name = "CoreIPC"
domain = "Core_Metric"
maxval = Pipeline_Width + 2
errcount = 0
area = "Info.Core"
metricgroup = frozenset(['Ret', 'SMT', 'TmaL1'])
sibling = None
def compute(self, EV):
try:
self.val = CoreIPC(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "CoreIPC zero division")
desc = """
Instructions Per Cycle across hyper-threads (per physical
core)"""
class Metric_FLOPc:
name = "FLOPc"
domain = "Core_Metric"
maxval = 10.0
errcount = 0
area = "Info.Core"
metricgroup = frozenset(['Ret', 'Flops'])
sibling = None
def compute(self, EV):
try:
self.val = FLOPc(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "FLOPc zero division")
desc = """
Floating Point Operations Per Cycle"""
class Metric_ILP:
name = "ILP"
domain = "Metric"
maxval = Exe_Ports
errcount = 0
area = "Info.Core"
metricgroup = frozenset(['Backend', 'Cor', 'Pipeline', 'PortsUtil'])
sibling = None
def compute(self, EV):
try:
self.val = ILP(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "ILP zero division")
desc = """
Instruction-Level-Parallelism (average number of uops
executed when there is execution) per thread (logical-
processor)"""
class Metric_CORE_CLKS:
name = "CORE_CLKS"
domain = "Count"
maxval = 0
errcount = 0
area = "Info.Core"
metricgroup = frozenset(['SMT'])
sibling = None
def compute(self, EV):
try:
self.val = CORE_CLKS(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "CORE_CLKS zero division")
desc = """
Core actual clocks when any Logical Processor is active on
the Physical Core"""
class Metric_IpLoad:
name = "IpLoad"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpLoad(self, EV, 0)
self.thresh = (self.val < 3)
except ZeroDivisionError:
handle_error_metric(self, "IpLoad zero division")
desc = """
Instructions per Load (lower number means higher occurrence
rate). Tip: reduce memory accesses."""
class Metric_IpStore:
name = "IpStore"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpStore(self, EV, 0)
self.thresh = (self.val < 8)
except ZeroDivisionError:
handle_error_metric(self, "IpStore zero division")
desc = """
Instructions per Store (lower number means higher occurrence
rate). Tip: reduce memory accesses."""
class Metric_IpBranch:
name = "IpBranch"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Branches', 'Fed', 'InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpBranch(self, EV, 0)
self.thresh = (self.val < 8)
except ZeroDivisionError:
handle_error_metric(self, "IpBranch zero division")
desc = """
Instructions per Branch (lower number means higher
occurrence rate)"""
class Metric_IpCall:
name = "IpCall"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Branches', 'Fed', 'PGO'])
sibling = None
def compute(self, EV):
try:
self.val = IpCall(self, EV, 0)
self.thresh = (self.val < 200)
except ZeroDivisionError:
handle_error_metric(self, "IpCall zero division")
desc = """
Instructions per (near) call (lower number means higher
occurrence rate)"""
class Metric_IpTB:
name = "IpTB"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Branches', 'Fed', 'FetchBW', 'Frontend', 'PGO'])
sibling = None
def compute(self, EV):
try:
self.val = IpTB(self, EV, 0)
self.thresh = self.val < Pipeline_Width * 2 + 1
except ZeroDivisionError:
handle_error_metric(self, "IpTB zero division")
desc = """
Instructions per taken branch"""
class Metric_BpTkBranch:
name = "BpTkBranch"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Branches', 'Fed', 'PGO'])
sibling = None
def compute(self, EV):
try:
self.val = BpTkBranch(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "BpTkBranch zero division")
desc = """
Branch instructions per taken branch. . Can be used to
approximate PGO-likelihood for non-loopy codes."""
class Metric_IpArith:
name = "IpArith"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Flops', 'InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpArith(self, EV, 0)
self.thresh = (self.val < 10)
except ZeroDivisionError:
handle_error_metric(self, "IpArith zero division")
desc = """
Instructions per FP Arithmetic instruction (lower number
means higher occurrence rate). Values < 1 are possible due
to intentional FMA double counting. Approximated prior to
BDW."""
class Metric_Instructions:
name = "Instructions"
domain = "Count"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Summary', 'TmaL1'])
sibling = None
def compute(self, EV):
try:
self.val = Instructions(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Instructions zero division")
desc = """
Total number of retired Instructions"""
class Metric_Retire:
name = "Retire"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Pipeline"
metricgroup = frozenset(['Pipeline', 'Ret'])
sibling = None
def compute(self, EV):
try:
self.val = Retire(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Retire zero division")
desc = """
Average number of Uops retired in cycles where at least one
uop has retired."""
class Metric_Execute:
name = "Execute"
domain = "Metric"
maxval = Exe_Ports
errcount = 0
area = "Info.Pipeline"
metricgroup = frozenset(['Cor', 'Pipeline', 'PortsUtil', 'SMT'])
sibling = None
def compute(self, EV):
try:
self.val = Execute(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Execute zero division")
desc = """
"""
class Metric_DSB_Coverage:
name = "DSB_Coverage"
domain = "Metric"
maxval = 1.0
errcount = 0
area = "Info.Frontend"
metricgroup = frozenset(['DSB', 'Fed', 'FetchBW'])
sibling = None
def compute(self, EV):
try:
self.val = DSB_Coverage(self, EV, 0)
self.thresh = (self.val < 0.7) and HighIPC(self, EV, 1)
except ZeroDivisionError:
handle_error_metric(self, "DSB_Coverage zero division")
desc = """
Fraction of Uops delivered by the DSB (aka Decoded ICache;
or Uop Cache). See section 'Decoded ICache' in Optimization
Manual. http://www.intel.com/content/www/us/en/architecture-
and-technology/64-ia-32-architectures-optimization-
manual.html"""
class Metric_IpUnknown_Branch:
name = "IpUnknown_Branch"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Frontend"
metricgroup = frozenset(['Fed'])
sibling = None
def compute(self, EV):
try:
self.val = IpUnknown_Branch(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "IpUnknown_Branch zero division")
desc = """
Instructions per speculative Unknown Branch Misprediction
(BAClear) (lower number means higher occurrence rate)"""
class Metric_IpMispredict:
name = "IpMispredict"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Bad_Spec"
metricgroup = frozenset(['Bad', 'BadSpec', 'BrMispredicts'])
sibling = None
def compute(self, EV):
try:
self.val = IpMispredict(self, EV, 0)
self.thresh = (self.val < 200)
except ZeroDivisionError:
handle_error_metric(self, "IpMispredict zero division")
desc = """
Number of Instructions per non-speculative Branch
Misprediction (JEClear) (lower number means higher
occurrence rate)"""
class Metric_IpMisp_Indirect:
name = "IpMisp_Indirect"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Bad_Spec"
metricgroup = frozenset(['Bad', 'BrMispredicts'])
sibling = None
def compute(self, EV):
try:
self.val = IpMisp_Indirect(self, EV, 0)
self.thresh = (self.val < 1000)
except ZeroDivisionError:
handle_error_metric(self, "IpMisp_Indirect zero division")
desc = """
Instructions per retired Mispredicts for indirect CALL or
JMP branches (lower number means higher occurrence rate)."""
class Metric_Load_Miss_Real_Latency:
name = "Load_Miss_Real_Latency"
domain = "Clocks_Latency"
maxval = 1000
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['Mem', 'MemoryBound', 'MemoryLat'])
sibling = None
def compute(self, EV):
try:
self.val = Load_Miss_Real_Latency(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Load_Miss_Real_Latency zero division")
desc = """
Actual Average Latency for L1 data-cache miss demand load
operations (in core cycles)"""
class Metric_MLP:
name = "MLP"
domain = "Metric"
maxval = 10.0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['Mem', 'MemoryBound', 'MemoryBW'])
sibling = None
def compute(self, EV):
try:
self.val = MLP(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "MLP zero division")
desc = """
Memory-Level-Parallelism (average number of L1 miss demand
load when there is at least one such miss. Per-Logical
Processor)"""
class Metric_L1MPKI:
name = "L1MPKI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['CacheHits', 'Mem'])
sibling = None
def compute(self, EV):
try:
self.val = L1MPKI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L1MPKI zero division")
desc = """
L1 cache true misses per kilo instruction for retired demand
loads"""
class Metric_L2MPKI:
name = "L2MPKI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['Mem', 'Backend', 'CacheHits'])
sibling = None
def compute(self, EV):
try:
self.val = L2MPKI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2MPKI zero division")
desc = """
L2 cache true misses per kilo instruction for retired demand
loads"""
class Metric_L2MPKI_RFO:
name = "L2MPKI_RFO"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['CacheMisses', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = L2MPKI_RFO(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2MPKI_RFO zero division")
desc = """
Offcore requests (L2 cache miss) per kilo instruction for
demand RFOs"""
class Metric_L3MPKI:
name = "L3MPKI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['Mem'])
sibling = None
def compute(self, EV):
try:
self.val = L3MPKI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L3MPKI zero division")
desc = """
L3 cache true misses per kilo instruction for retired demand
loads"""
class Metric_L1D_Cache_Fill_BW:
name = "L1D_Cache_Fill_BW"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['Mem', 'MemoryBW'])
sibling = None
def compute(self, EV):
try:
self.val = L1D_Cache_Fill_BW(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L1D_Cache_Fill_BW zero division")
desc = """
"""
class Metric_L2_Cache_Fill_BW:
name = "L2_Cache_Fill_BW"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['Mem', 'MemoryBW'])
sibling = None
def compute(self, EV):
try:
self.val = L2_Cache_Fill_BW(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2_Cache_Fill_BW zero division")
desc = """
"""
class Metric_L3_Cache_Fill_BW:
name = "L3_Cache_Fill_BW"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['Mem', 'MemoryBW'])
sibling = None
def compute(self, EV):
try:
self.val = L3_Cache_Fill_BW(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L3_Cache_Fill_BW zero division")
desc = """
"""
class Metric_Page_Walks_Utilization:
name = "Page_Walks_Utilization"
domain = "Core_Metric"
maxval = 1.0
errcount = 0
area = "Info.Memory.TLB"
metricgroup = frozenset(['Mem', 'MemoryTLB'])
sibling = None
def compute(self, EV):
try:
self.val = Page_Walks_Utilization(self, EV, 0)
self.thresh = (self.val > 0.5)
except ZeroDivisionError:
handle_error_metric(self, "Page_Walks_Utilization zero division")
desc = """
Utilization of the core's Page Walker(s) serving STLB misses
triggered by instruction/Load/Store accesses"""
class Metric_L1D_Cache_Fill_BW_2T:
name = "L1D_Cache_Fill_BW_2T"
domain = "Core_Metric"
maxval = 0
errcount = 0
area = "Info.Memory.Core"
metricgroup = frozenset(['Mem', 'MemoryBW'])
sibling = None
def compute(self, EV):
try:
self.val = L1D_Cache_Fill_BW_2T(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L1D_Cache_Fill_BW_2T zero division")
desc = """
Average per-core data fill bandwidth to the L1 data cache
[GB / sec]"""
class Metric_L2_Cache_Fill_BW_2T:
name = "L2_Cache_Fill_BW_2T"
domain = "Core_Metric"
maxval = 0
errcount = 0
area = "Info.Memory.Core"
metricgroup = frozenset(['Mem', 'MemoryBW'])
sibling = None
def compute(self, EV):
try:
self.val = L2_Cache_Fill_BW_2T(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2_Cache_Fill_BW_2T zero division")
desc = """
Average per-core data fill bandwidth to the L2 cache [GB /
sec]"""
class Metric_L3_Cache_Fill_BW_2T:
name = "L3_Cache_Fill_BW_2T"
domain = "Core_Metric"
maxval = 0
errcount = 0
area = "Info.Memory.Core"
metricgroup = frozenset(['Mem', 'MemoryBW'])
sibling = None
def compute(self, EV):
try:
self.val = L3_Cache_Fill_BW_2T(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L3_Cache_Fill_BW_2T zero division")
desc = """
Average per-core data fill bandwidth to the L3 cache [GB /
sec]"""
class Metric_Load_L2_Miss_Latency:
name = "Load_L2_Miss_Latency"
domain = "Clocks_Latency"
maxval = 1000
errcount = 0
area = "Info.Memory.Latency"
metricgroup = frozenset(['Memory_Lat', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = Load_L2_Miss_Latency(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Load_L2_Miss_Latency zero division")
desc = """
Average Latency for L2 cache miss demand Loads"""
class Metric_Load_L2_MLP:
name = "Load_L2_MLP"
domain = "Metric"
maxval = 100
errcount = 0
area = "Info.Memory.Latency"
metricgroup = frozenset(['Memory_BW', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = Load_L2_MLP(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Load_L2_MLP zero division")
desc = """
Average Parallel L2 cache miss demand Loads"""
class Metric_Data_L2_MLP:
name = "Data_L2_MLP"
domain = "Metric"
maxval = 100
errcount = 0
area = "Info.Memory.Latency"
metricgroup = frozenset(['Memory_BW', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = Data_L2_MLP(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Data_L2_MLP zero division")
desc = """
Average Parallel L2 cache miss data reads"""
class Metric_CPU_Utilization:
name = "CPU_Utilization"
domain = "Metric"
maxval = 1
errcount = 0
area = "Info.System"
metricgroup = frozenset(['HPC', 'Summary'])
sibling = None
def compute(self, EV):
try:
self.val = CPU_Utilization(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "CPU_Utilization zero division")
desc = """
Average CPU Utilization (percentage)"""
class Metric_CPUs_Utilized:
name = "CPUs_Utilized"
domain = "Metric"
maxval = 300
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Summary'])
sibling = None
def compute(self, EV):
try:
self.val = CPUs_Utilized(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "CPUs_Utilized zero division")
desc = """
Average number of utilized CPUs"""
class Metric_Core_Frequency:
name = "Core_Frequency"
domain = "SystemMetric"
maxval = 0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Summary', 'Power'])
sibling = None
def compute(self, EV):
try:
self.val = Core_Frequency(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Core_Frequency zero division")
desc = """
Measured Average Core Frequency for unhalted processors
[GHz]"""
class Metric_Uncore_Frequency:
name = "Uncore_Frequency"
domain = "SystemMetric"
maxval = 0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['SoC'])
sibling = None
def compute(self, EV):
try:
self.val = Uncore_Frequency(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Uncore_Frequency zero division")
desc = """
Measured Average Uncore Frequency for the SoC [GHz]"""
class Metric_GFLOPs:
name = "GFLOPs"
domain = "Metric"
maxval = 200
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Cor', 'Flops', 'HPC'])
sibling = None
def compute(self, EV):
try:
self.val = GFLOPs(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "GFLOPs zero division")
desc = """
Giga Floating Point Operations Per Second. Aggregate across
all supported options of: FP precisions, scalar and vector
instructions, vector-width"""
class Metric_Turbo_Utilization:
name = "Turbo_Utilization"
domain = "Core_Metric"
maxval = 10.0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Power'])
sibling = None
def compute(self, EV):
try:
self.val = Turbo_Utilization(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Turbo_Utilization zero division")
desc = """
Average Frequency Utilization relative nominal frequency"""
class Metric_SMT_2T_Utilization:
name = "SMT_2T_Utilization"
domain = "Core_Metric"
maxval = 1.0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['SMT'])
sibling = None
def compute(self, EV):
try:
self.val = SMT_2T_Utilization(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "SMT_2T_Utilization zero division")
desc = """
Fraction of cycles where both hardware Logical Processors
were active"""
class Metric_Kernel_Utilization:
name = "Kernel_Utilization"
domain = "Metric"
maxval = 1.0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['OS'])
sibling = None
def compute(self, EV):
try:
self.val = Kernel_Utilization(self, EV, 0)
self.thresh = (self.val > 0.05)
except ZeroDivisionError:
handle_error_metric(self, "Kernel_Utilization zero division")
desc = """
Fraction of cycles spent in the Operating System (OS) Kernel
mode"""
class Metric_Kernel_CPI:
name = "Kernel_CPI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['OS'])
sibling = None
def compute(self, EV):
try:
self.val = Kernel_CPI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Kernel_CPI zero division")
desc = """
Cycles Per Instruction for the Operating System (OS) Kernel
mode"""
class Metric_DRAM_BW_Use:
name = "DRAM_BW_Use"
domain = "GB/sec"
maxval = 200
errcount = 0
area = "Info.System"
metricgroup = frozenset(['HPC', 'MemOffcore', 'MemoryBW', 'SoC'])
sibling = None
def compute(self, EV):
try:
self.val = DRAM_BW_Use(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "DRAM_BW_Use zero division")
desc = """
Average external Memory Bandwidth Use for reads and writes
[GB / sec]"""
class Metric_MEM_Read_Latency:
name = "MEM_Read_Latency"
domain = "NanoSeconds"
maxval = 1000
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Mem', 'MemoryLat', 'SoC'])
sibling = None
def compute(self, EV):
try:
self.val = MEM_Read_Latency(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "MEM_Read_Latency zero division")
desc = """
Average latency of data read request to external memory (in
nanoseconds). Accounts for demand loads and L1/L2
prefetches."""
class Metric_MEM_Parallel_Reads:
name = "MEM_Parallel_Reads"
domain = "SystemMetric"
maxval = 100
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Mem', 'MemoryBW', 'SoC'])
sibling = None
def compute(self, EV):
try:
self.val = MEM_Parallel_Reads(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "MEM_Parallel_Reads zero division")
desc = """
Average number of parallel data read requests to external
memory. Accounts for demand loads and L1/L2 prefetches"""
class Metric_Time:
name = "Time"
domain = "Seconds"
maxval = 0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Summary'])
sibling = None
def compute(self, EV):
try:
self.val = Time(self, EV, 0)
self.thresh = (self.val < 1)
except ZeroDivisionError:
handle_error_metric(self, "Time zero division")
desc = """
Run duration time in seconds"""
class Metric_Socket_CLKS:
name = "Socket_CLKS"
domain = "Count"
maxval = 0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['SoC'])
sibling = None
def compute(self, EV):
try:
self.val = Socket_CLKS(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Socket_CLKS zero division")
desc = """
Socket actual clocks when any core is active on that socket"""
class Metric_IpFarBranch:
name = "IpFarBranch"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Branches', 'OS'])
sibling = None
def compute(self, EV):
try:
self.val = IpFarBranch(self, EV, 0)
self.thresh = (self.val < 1000000)
except ZeroDivisionError:
handle_error_metric(self, "IpFarBranch zero division")
desc = """
Instructions per Far Branch ( Far Branches apply upon
transition from application to operating system, handling
interrupts, exceptions) [lower number means higher
occurrence rate]"""
# Schedule
class Setup:
def __init__(self, r):
o = dict()
n = Frontend_Bound() ; r.run(n) ; o["Frontend_Bound"] = n
n = Fetch_Latency() ; r.run(n) ; o["Fetch_Latency"] = n
n = ICache_Misses() ; r.run(n) ; o["ICache_Misses"] = n
n = ITLB_Misses() ; r.run(n) ; o["ITLB_Misses"] = n
n = Branch_Resteers() ; r.run(n) ; o["Branch_Resteers"] = n
n = MS_Switches() ; r.run(n) ; o["MS_Switches"] = n
n = LCP() ; r.run(n) ; o["LCP"] = n
n = DSB_Switches() ; r.run(n) ; o["DSB_Switches"] = n
n = Fetch_Bandwidth() ; r.run(n) ; o["Fetch_Bandwidth"] = n
n = MITE() ; r.run(n) ; o["MITE"] = n
n = DSB() ; r.run(n) ; o["DSB"] = n
n = Bad_Speculation() ; r.run(n) ; o["Bad_Speculation"] = n
n = Branch_Mispredicts() ; r.run(n) ; o["Branch_Mispredicts"] = n
n = Machine_Clears() ; r.run(n) ; o["Machine_Clears"] = n
n = Backend_Bound() ; r.run(n) ; o["Backend_Bound"] = n
n = Memory_Bound() ; r.run(n) ; o["Memory_Bound"] = n
n = L1_Bound() ; r.run(n) ; o["L1_Bound"] = n
n = DTLB_Load() ; r.run(n) ; o["DTLB_Load"] = n
n = Store_Fwd_Blk() ; r.run(n) ; o["Store_Fwd_Blk"] = n
n = Lock_Latency() ; r.run(n) ; o["Lock_Latency"] = n
n = Split_Loads() ; r.run(n) ; o["Split_Loads"] = n
n = G4K_Aliasing() ; r.run(n) ; o["G4K_Aliasing"] = n
n = FB_Full() ; r.run(n) ; o["FB_Full"] = n
n = L2_Bound() ; r.run(n) ; o["L2_Bound"] = n
n = L3_Bound() ; r.run(n) ; o["L3_Bound"] = n
n = Contested_Accesses() ; r.run(n) ; o["Contested_Accesses"] = n
n = Data_Sharing() ; r.run(n) ; o["Data_Sharing"] = n
n = L3_Hit_Latency() ; r.run(n) ; o["L3_Hit_Latency"] = n
n = SQ_Full() ; r.run(n) ; o["SQ_Full"] = n
n = DRAM_Bound() ; r.run(n) ; o["DRAM_Bound"] = n
n = MEM_Bandwidth() ; r.run(n) ; o["MEM_Bandwidth"] = n
n = MEM_Latency() ; r.run(n) ; o["MEM_Latency"] = n
n = Local_MEM() ; r.run(n) ; o["Local_MEM"] = n
n = Remote_MEM() ; r.run(n) ; o["Remote_MEM"] = n
n = Remote_Cache() ; r.run(n) ; o["Remote_Cache"] = n
n = Store_Bound() ; r.run(n) ; o["Store_Bound"] = n
n = Store_Latency() ; r.run(n) ; o["Store_Latency"] = n
n = False_Sharing() ; r.run(n) ; o["False_Sharing"] = n
n = Split_Stores() ; r.run(n) ; o["Split_Stores"] = n
n = DTLB_Store() ; r.run(n) ; o["DTLB_Store"] = n
n = Core_Bound() ; r.run(n) ; o["Core_Bound"] = n
n = Divider() ; r.run(n) ; o["Divider"] = n
n = Ports_Utilization() ; r.run(n) ; o["Ports_Utilization"] = n
n = Ports_Utilized_0() ; r.run(n) ; o["Ports_Utilized_0"] = n
n = Ports_Utilized_1() ; r.run(n) ; o["Ports_Utilized_1"] = n
n = Ports_Utilized_2() ; r.run(n) ; o["Ports_Utilized_2"] = n
n = Ports_Utilized_3m() ; r.run(n) ; o["Ports_Utilized_3m"] = n
n = ALU_Op_Utilization() ; r.run(n) ; o["ALU_Op_Utilization"] = n
n = Port_0() ; r.run(n) ; o["Port_0"] = n
n = Port_1() ; r.run(n) ; o["Port_1"] = n
n = Port_5() ; r.run(n) ; o["Port_5"] = n
n = Load_Op_Utilization() ; r.run(n) ; o["Load_Op_Utilization"] = n
n = Port_2() ; r.run(n) ; o["Port_2"] = n
n = Port_3() ; r.run(n) ; o["Port_3"] = n
n = Store_Op_Utilization() ; r.run(n) ; o["Store_Op_Utilization"] = n
n = Port_4() ; r.run(n) ; o["Port_4"] = n
n = Retiring() ; r.run(n) ; o["Retiring"] = n
n = Light_Operations() ; r.run(n) ; o["Light_Operations"] = n
n = FP_Arith() ; r.run(n) ; o["FP_Arith"] = n
n = X87_Use() ; r.run(n) ; o["X87_Use"] = n
n = FP_Scalar() ; r.run(n) ; o["FP_Scalar"] = n
n = FP_Vector() ; r.run(n) ; o["FP_Vector"] = n
n = FP_Vector_128b() ; r.run(n) ; o["FP_Vector_128b"] = n
n = FP_Vector_256b() ; r.run(n) ; o["FP_Vector_256b"] = n
n = Heavy_Operations() ; r.run(n) ; o["Heavy_Operations"] = n
n = Microcode_Sequencer() ; r.run(n) ; o["Microcode_Sequencer"] = n
n = Assists() ; r.run(n) ; o["Assists"] = n
n = CISC() ; r.run(n) ; o["CISC"] = n
# parents
o["Fetch_Latency"].parent = o["Frontend_Bound"]
o["ICache_Misses"].parent = o["Fetch_Latency"]
o["ITLB_Misses"].parent = o["Fetch_Latency"]
o["Branch_Resteers"].parent = o["Fetch_Latency"]
o["MS_Switches"].parent = o["Fetch_Latency"]
o["LCP"].parent = o["Fetch_Latency"]
o["DSB_Switches"].parent = o["Fetch_Latency"]
o["Fetch_Bandwidth"].parent = o["Frontend_Bound"]
o["MITE"].parent = o["Fetch_Bandwidth"]
o["DSB"].parent = o["Fetch_Bandwidth"]
o["Branch_Mispredicts"].parent = o["Bad_Speculation"]
o["Machine_Clears"].parent = o["Bad_Speculation"]
o["Memory_Bound"].parent = o["Backend_Bound"]
o["L1_Bound"].parent = o["Memory_Bound"]
o["DTLB_Load"].parent = o["L1_Bound"]
o["Store_Fwd_Blk"].parent = o["L1_Bound"]
o["Lock_Latency"].parent = o["L1_Bound"]
o["Split_Loads"].parent = o["L1_Bound"]
o["G4K_Aliasing"].parent = o["L1_Bound"]
o["FB_Full"].parent = o["L1_Bound"]
o["L2_Bound"].parent = o["Memory_Bound"]
o["L3_Bound"].parent = o["Memory_Bound"]
o["Contested_Accesses"].parent = o["L3_Bound"]
o["Data_Sharing"].parent = o["L3_Bound"]
o["L3_Hit_Latency"].parent = o["L3_Bound"]
o["SQ_Full"].parent = o["L3_Bound"]
o["DRAM_Bound"].parent = o["Memory_Bound"]
o["MEM_Bandwidth"].parent = o["DRAM_Bound"]
o["MEM_Latency"].parent = o["DRAM_Bound"]
o["Local_MEM"].parent = o["MEM_Latency"]
o["Remote_MEM"].parent = o["MEM_Latency"]
o["Remote_Cache"].parent = o["MEM_Latency"]
o["Store_Bound"].parent = o["Memory_Bound"]
o["Store_Latency"].parent = o["Store_Bound"]
o["False_Sharing"].parent = o["Store_Bound"]
o["Split_Stores"].parent = o["Store_Bound"]
o["DTLB_Store"].parent = o["Store_Bound"]
o["Core_Bound"].parent = o["Backend_Bound"]
o["Divider"].parent = o["Core_Bound"]
o["Ports_Utilization"].parent = o["Core_Bound"]
o["Ports_Utilized_0"].parent = o["Ports_Utilization"]
o["Ports_Utilized_1"].parent = o["Ports_Utilization"]
o["Ports_Utilized_2"].parent = o["Ports_Utilization"]
o["Ports_Utilized_3m"].parent = o["Ports_Utilization"]
o["ALU_Op_Utilization"].parent = o["Ports_Utilized_3m"]
o["Port_0"].parent = o["ALU_Op_Utilization"]
o["Port_1"].parent = o["ALU_Op_Utilization"]
o["Port_5"].parent = o["ALU_Op_Utilization"]
o["Load_Op_Utilization"].parent = o["Ports_Utilized_3m"]
o["Port_2"].parent = o["Load_Op_Utilization"]
o["Port_3"].parent = o["Load_Op_Utilization"]
o["Store_Op_Utilization"].parent = o["Ports_Utilized_3m"]
o["Port_4"].parent = o["Store_Op_Utilization"]
o["Light_Operations"].parent = o["Retiring"]
o["FP_Arith"].parent = o["Light_Operations"]
o["X87_Use"].parent = o["FP_Arith"]
o["FP_Scalar"].parent = o["FP_Arith"]
o["FP_Vector"].parent = o["FP_Arith"]
o["FP_Vector_128b"].parent = o["FP_Vector"]
o["FP_Vector_256b"].parent = o["FP_Vector"]
o["Heavy_Operations"].parent = o["Retiring"]
o["Microcode_Sequencer"].parent = o["Heavy_Operations"]
o["Assists"].parent = o["Microcode_Sequencer"]
o["CISC"].parent = o["Microcode_Sequencer"]
# user visible metrics
n = Metric_IPC() ; r.metric(n) ; o["IPC"] = n
n = Metric_UopPI() ; r.metric(n) ; o["UopPI"] = n
n = Metric_UpTB() ; r.metric(n) ; o["UpTB"] = n
n = Metric_CPI() ; r.metric(n) ; o["CPI"] = n
n = Metric_CLKS() ; r.metric(n) ; o["CLKS"] = n
n = Metric_SLOTS() ; r.metric(n) ; o["SLOTS"] = n
n = Metric_Execute_per_Issue() ; r.metric(n) ; o["Execute_per_Issue"] = n
n = Metric_CoreIPC() ; r.metric(n) ; o["CoreIPC"] = n
n = Metric_FLOPc() ; r.metric(n) ; o["FLOPc"] = n
n = Metric_ILP() ; r.metric(n) ; o["ILP"] = n
n = Metric_CORE_CLKS() ; r.metric(n) ; o["CORE_CLKS"] = n
n = Metric_IpLoad() ; r.metric(n) ; o["IpLoad"] = n
n = Metric_IpStore() ; r.metric(n) ; o["IpStore"] = n
n = Metric_IpBranch() ; r.metric(n) ; o["IpBranch"] = n
n = Metric_IpCall() ; r.metric(n) ; o["IpCall"] = n
n = Metric_IpTB() ; r.metric(n) ; o["IpTB"] = n
n = Metric_BpTkBranch() ; r.metric(n) ; o["BpTkBranch"] = n
n = Metric_IpArith() ; r.metric(n) ; o["IpArith"] = n
n = Metric_Instructions() ; r.metric(n) ; o["Instructions"] = n
n = Metric_Retire() ; r.metric(n) ; o["Retire"] = n
n = Metric_Execute() ; r.metric(n) ; o["Execute"] = n
n = Metric_DSB_Coverage() ; r.metric(n) ; o["DSB_Coverage"] = n
n = Metric_IpUnknown_Branch() ; r.metric(n) ; o["IpUnknown_Branch"] = n
n = Metric_IpMispredict() ; r.metric(n) ; o["IpMispredict"] = n
n = Metric_IpMisp_Indirect() ; r.metric(n) ; o["IpMisp_Indirect"] = n
n = Metric_Load_Miss_Real_Latency() ; r.metric(n) ; o["Load_Miss_Real_Latency"] = n
n = Metric_MLP() ; r.metric(n) ; o["MLP"] = n
n = Metric_L1MPKI() ; r.metric(n) ; o["L1MPKI"] = n
n = Metric_L2MPKI() ; r.metric(n) ; o["L2MPKI"] = n
n = Metric_L2MPKI_RFO() ; r.metric(n) ; o["L2MPKI_RFO"] = n
n = Metric_L3MPKI() ; r.metric(n) ; o["L3MPKI"] = n
n = Metric_L1D_Cache_Fill_BW() ; r.metric(n) ; o["L1D_Cache_Fill_BW"] = n
n = Metric_L2_Cache_Fill_BW() ; r.metric(n) ; o["L2_Cache_Fill_BW"] = n
n = Metric_L3_Cache_Fill_BW() ; r.metric(n) ; o["L3_Cache_Fill_BW"] = n
n = Metric_Page_Walks_Utilization() ; r.metric(n) ; o["Page_Walks_Utilization"] = n
n = Metric_L1D_Cache_Fill_BW_2T() ; r.metric(n) ; o["L1D_Cache_Fill_BW_2T"] = n
n = Metric_L2_Cache_Fill_BW_2T() ; r.metric(n) ; o["L2_Cache_Fill_BW_2T"] = n
n = Metric_L3_Cache_Fill_BW_2T() ; r.metric(n) ; o["L3_Cache_Fill_BW_2T"] = n
n = Metric_Load_L2_Miss_Latency() ; r.metric(n) ; o["Load_L2_Miss_Latency"] = n
n = Metric_Load_L2_MLP() ; r.metric(n) ; o["Load_L2_MLP"] = n
n = Metric_Data_L2_MLP() ; r.metric(n) ; o["Data_L2_MLP"] = n
n = Metric_CPU_Utilization() ; r.metric(n) ; o["CPU_Utilization"] = n
n = Metric_CPUs_Utilized() ; r.metric(n) ; o["CPUs_Utilized"] = n
n = Metric_Core_Frequency() ; r.metric(n) ; o["Core_Frequency"] = n
n = Metric_Uncore_Frequency() ; r.metric(n) ; o["Uncore_Frequency"] = n
n = Metric_GFLOPs() ; r.metric(n) ; o["GFLOPs"] = n
n = Metric_Turbo_Utilization() ; r.metric(n) ; o["Turbo_Utilization"] = n
n = Metric_SMT_2T_Utilization() ; r.metric(n) ; o["SMT_2T_Utilization"] = n
n = Metric_Kernel_Utilization() ; r.metric(n) ; o["Kernel_Utilization"] = n
n = Metric_Kernel_CPI() ; r.metric(n) ; o["Kernel_CPI"] = n
n = Metric_DRAM_BW_Use() ; r.metric(n) ; o["DRAM_BW_Use"] = n
n = Metric_MEM_Read_Latency() ; r.metric(n) ; o["MEM_Read_Latency"] = n
n = Metric_MEM_Parallel_Reads() ; r.metric(n) ; o["MEM_Parallel_Reads"] = n
n = Metric_Time() ; r.metric(n) ; o["Time"] = n
n = Metric_Socket_CLKS() ; r.metric(n) ; o["Socket_CLKS"] = n
n = Metric_IpFarBranch() ; r.metric(n) ; o["IpFarBranch"] = n
# references between groups
o["ICache_Misses"].ITLB_Misses = o["ITLB_Misses"]
o["Fetch_Bandwidth"].Frontend_Bound = o["Frontend_Bound"]
o["Fetch_Bandwidth"].Fetch_Latency = o["Fetch_Latency"]
o["Branch_Mispredicts"].Bad_Speculation = o["Bad_Speculation"]
o["Machine_Clears"].Bad_Speculation = o["Bad_Speculation"]
o["Machine_Clears"].Branch_Mispredicts = o["Branch_Mispredicts"]
o["Backend_Bound"].Retiring = o["Retiring"]
o["Backend_Bound"].Bad_Speculation = o["Bad_Speculation"]
o["Backend_Bound"].Frontend_Bound = o["Frontend_Bound"]
o["Memory_Bound"].Retiring = o["Retiring"]
o["Memory_Bound"].Bad_Speculation = o["Bad_Speculation"]
o["Memory_Bound"].Frontend_Bound = o["Frontend_Bound"]
o["Memory_Bound"].Backend_Bound = o["Backend_Bound"]
o["Memory_Bound"].Fetch_Latency = o["Fetch_Latency"]
o["MEM_Latency"].MEM_Bandwidth = o["MEM_Bandwidth"]
o["Core_Bound"].Retiring = o["Retiring"]
o["Core_Bound"].Frontend_Bound = o["Frontend_Bound"]
o["Core_Bound"].Memory_Bound = o["Memory_Bound"]
o["Core_Bound"].Backend_Bound = o["Backend_Bound"]
o["Core_Bound"].Bad_Speculation = o["Bad_Speculation"]
o["Core_Bound"].Fetch_Latency = o["Fetch_Latency"]
o["Ports_Utilization"].Fetch_Latency = o["Fetch_Latency"]
o["Ports_Utilized_0"].Fetch_Latency = o["Fetch_Latency"]
o["Retiring"].Heavy_Operations = o["Heavy_Operations"]
o["Light_Operations"].Retiring = o["Retiring"]
o["Light_Operations"].Heavy_Operations = o["Heavy_Operations"]
o["Light_Operations"].Microcode_Sequencer = o["Microcode_Sequencer"]
o["FP_Arith"].FP_Scalar = o["FP_Scalar"]
o["FP_Arith"].X87_Use = o["X87_Use"]
o["FP_Arith"].FP_Vector = o["FP_Vector"]
o["Heavy_Operations"].Microcode_Sequencer = o["Microcode_Sequencer"]
o["CISC"].Microcode_Sequencer = o["Microcode_Sequencer"]
o["CISC"].Assists = o["Assists"]
o["IpArith"].FP_Vector = o["FP_Vector"]
o["IpArith"].FP_Scalar = o["FP_Scalar"]
# siblings cross-tree
o["MS_Switches"].sibling = (o["Machine_Clears"], o["L1_Bound"], o["Microcode_Sequencer"],)
o["LCP"].sibling = (o["DSB_Switches"], o["Fetch_Bandwidth"],)
o["DSB_Switches"].sibling = (o["LCP"], o["Fetch_Bandwidth"],)
o["Fetch_Bandwidth"].sibling = (o["LCP"], o["DSB_Switches"],)
o["Machine_Clears"].sibling = (o["MS_Switches"], o["L1_Bound"], o["Contested_Accesses"], o["Data_Sharing"], o["Remote_Cache"], o["False_Sharing"], o["Microcode_Sequencer"],)
o["L1_Bound"].sibling = (o["MS_Switches"], o["Machine_Clears"], o["Ports_Utilized_1"], o["Microcode_Sequencer"],)
o["DTLB_Load"].sibling = (o["DTLB_Store"],)
o["Lock_Latency"].sibling = (o["Store_Latency"],)
o["FB_Full"].sibling = (o["SQ_Full"], o["MEM_Bandwidth"], o["Store_Latency"],)
o["Contested_Accesses"].sibling = (o["Machine_Clears"], o["Data_Sharing"], o["Remote_Cache"], o["False_Sharing"],)
o["Data_Sharing"].sibling = (o["Machine_Clears"], o["Contested_Accesses"], o["Remote_Cache"], o["False_Sharing"],)
o["L3_Hit_Latency"].sibling = (o["MEM_Latency"],)
o["L3_Hit_Latency"].overlap = True
o["SQ_Full"].sibling = (o["FB_Full"], o["MEM_Bandwidth"],)
o["MEM_Bandwidth"].sibling = (o["FB_Full"], o["SQ_Full"],)
o["MEM_Latency"].sibling = (o["L3_Hit_Latency"],)
o["Remote_Cache"].sibling = (o["Machine_Clears"], o["Contested_Accesses"], o["Data_Sharing"], o["False_Sharing"],)
o["Store_Latency"].sibling = (o["Lock_Latency"], o["FB_Full"],)
o["Store_Latency"].overlap = True
o["False_Sharing"].sibling = (o["Machine_Clears"], o["Contested_Accesses"], o["Data_Sharing"], o["Remote_Cache"],)
o["Split_Stores"].sibling = (o["Port_4"],)
o["DTLB_Store"].sibling = (o["DTLB_Load"],)
o["Ports_Utilized_1"].sibling = (o["L1_Bound"],)
o["Ports_Utilized_2"].sibling = (o["Port_0"], o["Port_1"], o["Port_5"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"],)
o["Port_0"].sibling = (o["Ports_Utilized_2"], o["Port_1"], o["Port_5"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"],)
o["Port_1"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_5"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"],)
o["Port_5"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"],)
o["Port_4"].sibling = (o["Split_Stores"],)
o["FP_Scalar"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"],)
o["FP_Vector"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"], o["FP_Scalar"], o["FP_Vector_128b"], o["FP_Vector_256b"],)
o["FP_Vector_128b"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_256b"],)
o["FP_Vector_256b"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"],)
o["Microcode_Sequencer"].sibling = (o["MS_Switches"], o["Machine_Clears"], o["L1_Bound"],)
o["IpTB"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],)
o["DSB_Coverage"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],)
o["DRAM_BW_Use"].sibling = (o["FB_Full"], o["SQ_Full"], o["MEM_Bandwidth"],)
| 133,732 | Python | .py | 3,304 | 34.814165 | 306 | 0.656987 | andikleen/pmu-tools | 1,984 | 331 | 178 | GPL-2.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,879 | pci.py | andikleen_pmu-tools/pci.py | #!/usr/bin/env python3
# library and tool to access PCI config space
import os
import struct
# no multiple domains, controllers so far
def probe(bus, dev, func):
fn = "/sys/devices/pci0000:%02x/0000:%02x:%02x.%01x/config" % (bus, bus, dev, func)
return os.path.isfile(fn)
def openpci(bus, dev, func, offset, mode):
fn = "/sys/devices/pci0000:%02x/0000:%02x:%02x.%01x/config" % (bus, bus, dev, func)
f = os.open(fn, mode)
os.lseek(f, offset, os.SEEK_SET)
return f
sizes = {8: "Q", 4: "I", 2: "H", 1: "B"}
def writepci(bus, device, func, offset, size, val):
f = openpci(bus, device, func, offset, os.O_WRONLY)
os.write(f, struct.pack(sizes[size], val))
os.close(f)
def readpci(bus, device, func, offset, size):
f = openpci(bus, device, func, offset, os.O_RDONLY)
v = struct.unpack(sizes[size], os.read(f, size))[0]
os.close(f)
return v
def changebit(bus, device, func, offset, bit, val):
f = openpci(bus, device, func, offset, os.O_RDWR)
v = struct.unpack("I", os.read(f, 4))[0]
if val:
v = v | (1 << bit)
else:
v = v & ~(1 << bit)
os.lseek(f, offset, os.SEEK_SET)
os.write(f, struct.pack('I', v))
os.close(f)
| 1,211 | Python | .py | 33 | 32.575758 | 87 | 0.625107 | andikleen/pmu-tools | 1,984 | 331 | 178 | GPL-2.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,880 | tl_stat.py | andikleen_pmu-tools/tl_stat.py | # Copyright (c) 2012-2020, Intel Corporation
# Author: Andi Kleen
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# Maintain error data on perf measurements
from __future__ import print_function
import math
from collections import namedtuple
from tl_io import warn, warn_test, inform
ValStat = namedtuple('ValStat', ['stddev', 'multiplex'])
def geoadd(l):
return math.sqrt(sum([x**2 for x in l]))
# use geomean of stddevs and minimum of multiplex ratios for combining
# XXX better way to combine multiplex ratios?
def combine_valstat(l):
if not l:
return []
return ValStat(geoadd([x.stddev for x in l]), min([x.multiplex for x in l]))
class ComputeStat:
"""Maintain statistics on measurement data."""
def __init__(self, quiet):
self.referenced = set()
self.already_warned = set()
self.errcount = 0
self.errors = set()
self.prev_errors = set()
self.mismeasured = set()
self.prev_mismeasured = set()
self.quiet = quiet
def referenced_check(self, res, evnum):
referenced = self.referenced
referenced = referenced - self.already_warned
if not referenced:
return
self.already_warned |= referenced
# sanity check: did we reference all results?
if len(res.keys()) > 0:
r = res[list(res.keys())[0]]
if len(r) != len(evnum):
warn("results len %d does not match event len %d" % (len(r), len(evnum)))
return
if len(referenced) != len(r):
dummies = {i for i, d in enumerate(evnum) if d == "dummy"}
notr = set(range(len(r))) - referenced - dummies
if notr:
warn_test("%d results not referenced: " % (len(notr)) +
" ".join(["%d" % x for x in sorted(notr)]))
def compute_errors(self):
if self.errcount > 0 and self.errors != self.prev_errors:
inform(("%d nodes had zero counts: " % (self.errcount)) +
" ".join(sorted(self.errors)))
self.errcount = 0
self.prev_errors = self.errors
self.errors = set()
if self.mismeasured and self.mismeasured > self.prev_mismeasured:
inform("Mismeasured (out of bound values):" +
" ".join(sorted(self.mismeasured)))
self.prev_mismeasured = self.mismeasured
| 2,835 | Python | .py | 66 | 34.924242 | 89 | 0.623823 | andikleen/pmu-tools | 1,984 | 331 | 178 | GPL-2.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,881 | icl_client_ratios.py | andikleen_pmu-tools/icl_client_ratios.py | # -*- coding: latin-1 -*-
#
# auto generated TopDown/TMA 4.8-full-perf description for Intel 10th gen Core (code name Icelake)
# Please see http://ark.intel.com for more details on these CPUs.
#
# References:
# http://bit.ly/tma-ispass14
# http://halobates.de/blog/p/262
# https://sites.google.com/site/analysismethods/yasin-pubs
# https://download.01.org/perfmon/
# https://github.com/andikleen/pmu-tools/wiki/toplev-manual
#
# Helpers
print_error = lambda msg: False
smt_enabled = False
ebs_mode = False
version = "4.8-full-perf"
base_frequency = -1.0
Memory = 0
Average_Frequency = 0.0
num_cores = 1
num_threads = 1
num_sockets = 1
topdown_use_fixed = False
def handle_error(obj, msg):
print_error(msg)
obj.errcount += 1
obj.val = 0
obj.thresh = False
def handle_error_metric(obj, msg):
print_error(msg)
obj.errcount += 1
obj.val = 0
# Constants
Exe_Ports = 10
Mem_L2_Store_Cost = 10
Mem_STLB_Hit_Cost = 7
BAClear_Cost = 10
MS_Switches_Cost = 3
Avg_Assist_Cost = 34
Pipeline_Width = 5
OneMillion = 1000000
OneBillion = 1000000000
Energy_Unit = 61
PERF_METRICS_MSR = 1
DS = 0
# Aux. formulas
def Backend_Bound_Cycles(self, EV, level):
return EV("CYCLE_ACTIVITY.STALLS_TOTAL", level) + Few_Uops_Executed_Threshold(self, EV, level) + EV("EXE_ACTIVITY.BOUND_ON_STORES", level)
def Br_DoI_Jumps(self, EV, level):
return EV("BR_INST_RETIRED.NEAR_TAKEN", level) - EV("BR_INST_RETIRED.COND_TAKEN", level) - 2 * EV("BR_INST_RETIRED.NEAR_CALL", level)
def Branching_Retired(self, EV, level):
return (EV("BR_INST_RETIRED.ALL_BRANCHES", level) + 2 * EV("BR_INST_RETIRED.NEAR_CALL", level) + EV("INST_RETIRED.NOP", level)) / SLOTS(self, EV, level)
def Serialize_Core(self, EV, level):
return self.Core_Bound.compute(EV) * (self.Serializing_Operation.compute(EV) + self.Core_Bound.compute(EV) * EV("RS_EVENTS.EMPTY_CYCLES", level) / CLKS(self, EV, level) * self.Ports_Utilized_0.compute(EV)) / (self.Serializing_Operation.compute(EV) + self.Ports_Utilization.compute(EV) + self.Divider.compute(EV))
def Umisp(self, EV, level):
return 10 * self.Microcode_Sequencer.compute(EV) * self.Other_Mispredicts.compute(EV) / self.Branch_Mispredicts.compute(EV)
def Assist(self, EV, level):
return (self.Microcode_Sequencer.compute(EV) / (self.Microcode_Sequencer.compute(EV) + self.Few_Uops_Instructions.compute(EV))) * (self.Assists.compute(EV) / self.Microcode_Sequencer.compute(EV))
def Assist_Frontend(self, EV, level):
return Assist(self, EV, level) * self.Fetch_Latency.compute(EV) * (self.MS_Switches.compute(EV) + self.Branch_Resteers.compute(EV) * (self.Clears_Resteers.compute(EV) + self.Mispredicts_Resteers.compute(EV) * Umisp(self, EV, level)) / (self.Clears_Resteers.compute(EV) + self.Unknown_Branches.compute(EV) + self.Mispredicts_Resteers.compute(EV))) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV))
def Assist_Retired(self, EV, level):
return Assist(self, EV, level) * self.Heavy_Operations.compute(EV)
def Core_Bound_Cycles(self, EV, level):
return self.Ports_Utilized_0.compute(EV) * CLKS(self, EV, level) + Few_Uops_Executed_Threshold(self, EV, level)
def DurationTimeInSeconds(self, EV, level):
return EV("interval-ms", 0) / 1000
def Execute_Cycles(self, EV, level):
return (EV("UOPS_EXECUTED.CORE_CYCLES_GE_1", level) / 2) if smt_enabled else EV("UOPS_EXECUTED.THREAD:c1", level)
# factor used for metrics associating fixed costs for FB Hits - according to probability theory if all FB Hits come at a random rate in original L1_Miss cost interval then the average cost for each one is 0.5 of the fixed cost
def FB_Factor(self, EV, level):
return 1 + FBHit_per_L1Miss(self, EV, level) / 2
def FBHit_per_L1Miss(self, EV, level):
return EV("MEM_LOAD_RETIRED.FB_HIT", level) / EV("MEM_LOAD_RETIRED.L1_MISS", level)
def Fetched_Uops(self, EV, level):
return EV("UOPS_ISSUED.ANY", level)
def Few_Uops_Executed_Threshold(self, EV, level):
return EV("EXE_ACTIVITY.1_PORTS_UTIL", level) + self.Retiring.compute(EV) * EV("EXE_ACTIVITY.2_PORTS_UTIL", level)
# Floating Point computational (arithmetic) Operations Count
def FLOP_Count(self, EV, level):
return EV("FP_ARITH_INST_RETIRED.SCALAR", level) + 2 * EV("FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", level) + 4 * EV("FP_ARITH_INST_RETIRED.4_FLOPS", level) + 8 * EV("FP_ARITH_INST_RETIRED.8_FLOPS", level) + 16 * EV("FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE", level)
# Floating Point computational (arithmetic) Operations Count
def FP_Arith_Scalar(self, EV, level):
return EV("FP_ARITH_INST_RETIRED.SCALAR", level)
# Floating Point computational (arithmetic) Operations Count
def FP_Arith_Vector(self, EV, level):
return EV("FP_ARITH_INST_RETIRED.VECTOR", level)
def HighIPC(self, EV, level):
val = IPC(self, EV, level) / Pipeline_Width
return val
def L2_Bound_Ratio(self, EV, level):
return (EV("CYCLE_ACTIVITY.STALLS_L1D_MISS", level) - EV("CYCLE_ACTIVITY.STALLS_L2_MISS", level)) / CLKS(self, EV, level)
def Light_Ops_Sum(self, EV, level):
return self.FP_Arith.compute(EV) + self.Memory_Operations.compute(EV) + self.Branch_Instructions.compute(EV)
def LOAD_L2_HIT(self, EV, level):
return EV("MEM_LOAD_RETIRED.L2_HIT", level) * (1 + FBHit_per_L1Miss(self, EV, level))
def LOAD_L3_HIT(self, EV, level):
return EV("MEM_LOAD_RETIRED.L3_HIT", level) * FB_Factor(self, EV, level)
def LOAD_XSNP_HIT(self, EV, level):
return EV("MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT", level)
def LOAD_XSNP_HITM(self, EV, level):
return EV("MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM", level)
def LOAD_XSNP_MISS(self, EV, level):
return EV("MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS", level)
def MEM_Bound_Ratio(self, EV, level):
return EV("CYCLE_ACTIVITY.STALLS_L3_MISS", level) / CLKS(self, EV, level) + L2_Bound_Ratio(self, EV, level) - self.L2_Bound.compute(EV)
def Mem_Lock_St_Fraction(self, EV, level):
return EV("MEM_INST_RETIRED.LOCK_LOADS", level) / EV("MEM_INST_RETIRED.ALL_STORES", level)
def Memory_Bound_Fraction(self, EV, level):
return (EV("CYCLE_ACTIVITY.STALLS_MEM_ANY", level) + EV("EXE_ACTIVITY.BOUND_ON_STORES", level)) / Backend_Bound_Cycles(self, EV, level)
def Mispred_Clears_Fraction(self, EV, level):
return EV("BR_MISP_RETIRED.ALL_BRANCHES", level) / (EV("BR_MISP_RETIRED.ALL_BRANCHES", level) + EV("MACHINE_CLEARS.COUNT", level))
def ORO_Demand_RFO_C1(self, EV, level):
return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO", level)) , level )
def ORO_DRD_Any_Cycles(self, EV, level):
return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", level)) , level )
def ORO_DRD_BW_Cycles(self, EV, level):
return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD:c4", level)) , level )
def Store_L2_Hit_Cycles(self, EV, level):
return EV("L2_RQSTS.RFO_HIT", level) * Mem_L2_Store_Cost *(1 - Mem_Lock_St_Fraction(self, EV, level))
def Mem_XSNP_HitM_Cost(self, EV, level):
return 32.5 * Core_Frequency(self, EV, level)
def Mem_XSNP_Hit_Cost(self, EV, level):
return 27 * Core_Frequency(self, EV, level)
def Mem_XSNP_None_Cost(self, EV, level):
return 12.5 * Core_Frequency(self, EV, level)
def Mem_L2_Hit_Cost(self, EV, level):
return 3.5 * Core_Frequency(self, EV, level)
def PERF_METRICS_SUM(self, EV, level):
return (EV("PERF_METRICS.FRONTEND_BOUND", level) / EV("TOPDOWN.SLOTS", level)) + (EV("PERF_METRICS.BAD_SPECULATION", level) / EV("TOPDOWN.SLOTS", level)) + (EV("PERF_METRICS.RETIRING", level) / EV("TOPDOWN.SLOTS", level)) + (EV("PERF_METRICS.BACKEND_BOUND", level) / EV("TOPDOWN.SLOTS", level))
def Retire_Fraction(self, EV, level):
return EV("UOPS_RETIRED.SLOTS", level) / EV("UOPS_ISSUED.ANY", level)
# Retired slots per Logical Processor
def Retired_Slots(self, EV, level):
return self.Retiring.compute(EV) * SLOTS(self, EV, level)
# Number of logical processors (enabled or online) on the target system
def Num_CPUs(self, EV, level):
return 8 if smt_enabled else 4
# A system parameter for dependent-loads (pointer chasing like access pattern) of the workload. An integer fraction in range from 0 (no dependent loads) to 100 (all loads are dependent loads)
def Dependent_Loads_Weight(self, EV, level):
return 20
# Total pipeline cost of Branch Misprediction related bottlenecks
def Mispredictions(self, EV, level):
val = 100 *(1 - Umisp(self, EV, level)) * (self.Branch_Mispredicts.compute(EV) + self.Fetch_Latency.compute(EV) * self.Mispredicts_Resteers.compute(EV) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV)))
self.thresh = (val > 20)
return val
# Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)
def Big_Code(self, EV, level):
val = 100 * self.Fetch_Latency.compute(EV) * (self.ITLB_Misses.compute(EV) + self.ICache_Misses.compute(EV) + self.Unknown_Branches.compute(EV)) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV))
self.thresh = (val > 20)
return val
# Total pipeline cost of instruction fetch bandwidth related bottlenecks (when the front-end could not sustain operations delivery to the back-end)
def Instruction_Fetch_BW(self, EV, level):
val = 100 *(self.Frontend_Bound.compute(EV) - (1 - Umisp(self, EV, level)) * self.Fetch_Latency.compute(EV) * self.Mispredicts_Resteers.compute(EV) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV)) - Assist_Frontend(self, EV, level)) - Big_Code(self, EV, level)
self.thresh = (val > 20)
return val
# Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks
def Cache_Memory_Bandwidth(self, EV, level):
val = 100 *((self.Memory_Bound.compute(EV) * (self.DRAM_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * (self.MEM_Bandwidth.compute(EV) / (self.MEM_Latency.compute(EV) + self.MEM_Bandwidth.compute(EV)))) + (self.Memory_Bound.compute(EV) * (self.L3_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * (self.SQ_Full.compute(EV) / (self.L3_Hit_Latency.compute(EV) + self.Contested_Accesses.compute(EV) + self.SQ_Full.compute(EV) + self.Data_Sharing.compute(EV)))) + (self.Memory_Bound.compute(EV) * (self.L1_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * (self.FB_Full.compute(EV) / (self.Store_Fwd_Blk.compute(EV) + self.L1_Hit_Latency.compute(EV) + self.DTLB_Load.compute(EV) + self.G4K_Aliasing.compute(EV) + self.Lock_Latency.compute(EV) + self.Split_Loads.compute(EV) + self.FB_Full.compute(EV)))))
self.thresh = (val > 20)
return val
# Total pipeline cost of external Memory- or Cache-Latency related bottlenecks
def Cache_Memory_Latency(self, EV, level):
val = 100 *((self.Memory_Bound.compute(EV) * (self.DRAM_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * (self.MEM_Latency.compute(EV) / (self.MEM_Latency.compute(EV) + self.MEM_Bandwidth.compute(EV)))) + (self.Memory_Bound.compute(EV) * (self.L3_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * (self.L3_Hit_Latency.compute(EV) / (self.L3_Hit_Latency.compute(EV) + self.Contested_Accesses.compute(EV) + self.SQ_Full.compute(EV) + self.Data_Sharing.compute(EV)))) + (self.Memory_Bound.compute(EV) * self.L2_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) + (self.Memory_Bound.compute(EV) * (self.Store_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * (self.Store_Latency.compute(EV) / (self.Split_Stores.compute(EV) + self.DTLB_Store.compute(EV) + self.Streaming_Stores.compute(EV) + self.Store_Latency.compute(EV) + self.False_Sharing.compute(EV)))) + (self.Memory_Bound.compute(EV) * (self.L1_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * (self.L1_Hit_Latency.compute(EV) / (self.Store_Fwd_Blk.compute(EV) + self.L1_Hit_Latency.compute(EV) + self.DTLB_Load.compute(EV) + self.G4K_Aliasing.compute(EV) + self.Lock_Latency.compute(EV) + self.Split_Loads.compute(EV) + self.FB_Full.compute(EV)))))
self.thresh = (val > 20)
return val
# Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)
def Memory_Data_TLBs(self, EV, level):
val = 100 *(self.Memory_Bound.compute(EV) * (self.L1_Bound.compute(EV) / max(self.Memory_Bound.compute(EV) , (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV)))) * (self.DTLB_Load.compute(EV) / max(self.L1_Bound.compute(EV) , (self.Store_Fwd_Blk.compute(EV) + self.L1_Hit_Latency.compute(EV) + self.DTLB_Load.compute(EV) + self.G4K_Aliasing.compute(EV) + self.Lock_Latency.compute(EV) + self.Split_Loads.compute(EV) + self.FB_Full.compute(EV)))) + (self.Memory_Bound.compute(EV) * (self.Store_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * (self.DTLB_Store.compute(EV) / (self.Split_Stores.compute(EV) + self.DTLB_Store.compute(EV) + self.Streaming_Stores.compute(EV) + self.Store_Latency.compute(EV) + self.False_Sharing.compute(EV)))))
self.thresh = (val > 20)
return val
# Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors)
def Memory_Synchronization(self, EV, level):
val = 100 *(self.Memory_Bound.compute(EV) * ((self.L3_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * (self.Contested_Accesses.compute(EV) + self.Data_Sharing.compute(EV)) / (self.L3_Hit_Latency.compute(EV) + self.Contested_Accesses.compute(EV) + self.SQ_Full.compute(EV) + self.Data_Sharing.compute(EV)) + (self.Store_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * self.False_Sharing.compute(EV) / ((self.Split_Stores.compute(EV) + self.DTLB_Store.compute(EV) + self.Streaming_Stores.compute(EV) + self.Store_Latency.compute(EV) + self.False_Sharing.compute(EV)) - self.Store_Latency.compute(EV))) + self.Machine_Clears.compute(EV) * (1 - self.Other_Nukes.compute(EV) / (self.Other_Nukes.compute(EV))))
self.thresh = (val > 10)
return val
# Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy.
def Compute_Bound_Est(self, EV, level):
val = 100 *((self.Core_Bound.compute(EV) * self.Divider.compute(EV) / (self.Serializing_Operation.compute(EV) + self.Ports_Utilization.compute(EV) + self.Divider.compute(EV))) + (self.Core_Bound.compute(EV) * (self.Ports_Utilization.compute(EV) / (self.Serializing_Operation.compute(EV) + self.Ports_Utilization.compute(EV) + self.Divider.compute(EV))) * (self.Ports_Utilized_3m.compute(EV) / (self.Ports_Utilized_0.compute(EV) + self.Ports_Utilized_1.compute(EV) + self.Ports_Utilized_2.compute(EV) + self.Ports_Utilized_3m.compute(EV)))))
self.thresh = (val > 20)
return val
# Total pipeline cost of irregular execution (e.g. FP-assists in HPC, Wait time with work imbalance multithreaded workloads, overhead in system services or virtualized environments)
def Irregular_Overhead(self, EV, level):
val = 100 *(Assist_Frontend(self, EV, level) + Umisp(self, EV, level) * self.Branch_Mispredicts.compute(EV) + (self.Machine_Clears.compute(EV) * self.Other_Nukes.compute(EV) / (self.Other_Nukes.compute(EV))) + Serialize_Core(self, EV, level) + Assist_Retired(self, EV, level))
self.thresh = (val > 10)
return val
# Total pipeline cost of remaining bottlenecks in the back-end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls.
def Other_Bottlenecks(self, EV, level):
val = 100 -(Big_Code(self, EV, level) + Instruction_Fetch_BW(self, EV, level) + Mispredictions(self, EV, level) + Cache_Memory_Bandwidth(self, EV, level) + Cache_Memory_Latency(self, EV, level) + Memory_Data_TLBs(self, EV, level) + Memory_Synchronization(self, EV, level) + Compute_Bound_Est(self, EV, level) + Irregular_Overhead(self, EV, level) + Branching_Overhead(self, EV, level) + Useful_Work(self, EV, level))
self.thresh = (val > 20)
return val
# Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA. Examples include function calls; loops and alignments. (A lower bound). Consider Loop Unrolling or function inlining optimizations
def Branching_Overhead(self, EV, level):
val = 100 * Branching_Retired(self, EV, level)
self.thresh = (val > 5)
return val
# Total pipeline cost of "useful operations" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead.
def Useful_Work(self, EV, level):
val = 100 *(self.Retiring.compute(EV) - Branching_Retired(self, EV, level) - Assist_Retired(self, EV, level))
self.thresh = (val > 20)
return val
# Probability of Core Bound bottleneck hidden by SMT-profiling artifacts. Tip: consider analysis with SMT disabled
def Core_Bound_Likely(self, EV, level):
val = 100 *(1 - self.Core_Bound.compute(EV) / self.Ports_Utilization.compute(EV) if self.Core_Bound.compute(EV)< self.Ports_Utilization.compute(EV) else 1) if SMT_2T_Utilization(self, EV, level)> 0.5 else 0
self.thresh = (val > 0.5)
return val
# Instructions Per Cycle (per Logical Processor)
def IPC(self, EV, level):
return EV("INST_RETIRED.ANY", level) / CLKS(self, EV, level)
# Uops Per Instruction
def UopPI(self, EV, level):
val = Retired_Slots(self, EV, level) / EV("INST_RETIRED.ANY", level)
self.thresh = (val > 1.05)
return val
# Uops per taken branch
def UpTB(self, EV, level):
val = Retired_Slots(self, EV, level) / EV("BR_INST_RETIRED.NEAR_TAKEN", level)
self.thresh = val < Pipeline_Width * 1.5
return val
# Cycles Per Instruction (per Logical Processor)
def CPI(self, EV, level):
return 1 / IPC(self, EV, level)
# Per-Logical Processor actual clocks when the Logical Processor is active.
def CLKS(self, EV, level):
return EV("CPU_CLK_UNHALTED.THREAD", level)
# Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward)
def SLOTS(self, EV, level):
return EV("TOPDOWN.SLOTS", level) if topdown_use_fixed else EV("TOPDOWN.SLOTS", level)
# Fraction of Physical Core issue-slots utilized by this Logical Processor
def Slots_Utilization(self, EV, level):
return SLOTS(self, EV, level) / (EV("TOPDOWN.SLOTS:percore", level) / 2) if smt_enabled else 1
# The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of "execute" at rename stage.
def Execute_per_Issue(self, EV, level):
return EV("UOPS_EXECUTED.THREAD", level) / EV("UOPS_ISSUED.ANY", level)
# Instructions Per Cycle across hyper-threads (per physical core)
def CoreIPC(self, EV, level):
return EV("INST_RETIRED.ANY", level) / CORE_CLKS(self, EV, level)
# Floating Point Operations Per Cycle
def FLOPc(self, EV, level):
return FLOP_Count(self, EV, level) / CORE_CLKS(self, EV, level)
# Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to Fused-Multiply Add FMA counting - common.
def FP_Arith_Utilization(self, EV, level):
return (FP_Arith_Scalar(self, EV, level) + FP_Arith_Vector(self, EV, level)) / (2 * CORE_CLKS(self, EV, level))
# Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor)
def ILP(self, EV, level):
return EV("UOPS_EXECUTED.THREAD", level) / EV("UOPS_EXECUTED.THREAD:c1", level)
# uops Executed per Cycle
def EPC(self, EV, level):
return EV("UOPS_EXECUTED.THREAD", level) / CLKS(self, EV, level)
# Core actual clocks when any Logical Processor is active on the Physical Core
def CORE_CLKS(self, EV, level):
return EV("CPU_CLK_UNHALTED.DISTRIBUTED", level) if smt_enabled else CLKS(self, EV, level)
# Instructions per Load (lower number means higher occurrence rate). Tip: reduce memory accesses. #Link Opt Guide section: Minimize Register Spills
def IpLoad(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("MEM_INST_RETIRED.ALL_LOADS", level)
self.thresh = (val < 3)
return val
# Instructions per Store (lower number means higher occurrence rate). Tip: reduce memory accesses. #Link Opt Guide section: Minimize Register Spills
def IpStore(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("MEM_INST_RETIRED.ALL_STORES", level)
self.thresh = (val < 8)
return val
# Instructions per Branch (lower number means higher occurrence rate)
def IpBranch(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.ALL_BRANCHES", level)
self.thresh = (val < 8)
return val
# Instructions per (near) call (lower number means higher occurrence rate)
def IpCall(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.NEAR_CALL", level)
self.thresh = (val < 200)
return val
# Instructions per taken branch
def IpTB(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.NEAR_TAKEN", level)
self.thresh = val < Pipeline_Width * 2 + 1
return val
# Branch instructions per taken branch. . Can be used to approximate PGO-likelihood for non-loopy codes.
def BpTkBranch(self, EV, level):
return EV("BR_INST_RETIRED.ALL_BRANCHES", level) / EV("BR_INST_RETIRED.NEAR_TAKEN", level)
# Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate). Reference: Tuning Performance via Metrics with Expectations. https://doi.org/10.1109/LCA.2019.2916408
def IpFLOP(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / FLOP_Count(self, EV, level)
self.thresh = (val < 10)
return val
# Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW.
def IpArith(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / (FP_Arith_Scalar(self, EV, level) + FP_Arith_Vector(self, EV, level))
self.thresh = (val < 10)
return val
# Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.
def IpArith_Scalar_SP(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("FP_ARITH_INST_RETIRED.SCALAR_SINGLE", level)
self.thresh = (val < 10)
return val
# Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.
def IpArith_Scalar_DP(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("FP_ARITH_INST_RETIRED.SCALAR_DOUBLE", level)
self.thresh = (val < 10)
return val
# Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.
def IpArith_AVX128(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / (EV("FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", level) + EV("FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE", level))
self.thresh = (val < 10)
return val
# Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.
def IpArith_AVX256(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / (EV("FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE", level) + EV("FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE", level))
self.thresh = (val < 10)
return val
# Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.
def IpArith_AVX512(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / (EV("FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE", level) + EV("FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE", level))
self.thresh = (val < 10)
return val
# Instructions per PAUSE (lower number means higher occurrence rate)
def IpPause(self, EV, level):
return Instructions(self, EV, level) / EV("MISC_RETIRED.PAUSE_INST", level)
# Instructions per Software prefetch instruction (of any type: NTA/T0/T1/T2/Prefetch) (lower number means higher occurrence rate)
def IpSWPF(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("SW_PREFETCH_ACCESS.T0:u0xF", level)
self.thresh = (val < 100)
return val
# Total number of retired Instructions
def Instructions(self, EV, level):
return EV("INST_RETIRED.ANY", level)
# Average number of Uops retired in cycles where at least one uop has retired.
def Retire(self, EV, level):
return Retired_Slots(self, EV, level) / EV("UOPS_RETIRED.SLOTS:c1", level)
# Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate)
def IpAssist(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("ASSISTS.ANY", level)
self.thresh = (val < 100000)
return val
def Execute(self, EV, level):
return EV("UOPS_EXECUTED.THREAD", level) / Execute_Cycles(self, EV, level)
# Average number of uops fetched from LSD per cycle
def Fetch_LSD(self, EV, level):
return EV("LSD.UOPS", level) / EV("LSD.CYCLES_ACTIVE", level)
# Average number of uops fetched from DSB per cycle
def Fetch_DSB(self, EV, level):
return EV("IDQ.DSB_UOPS", level) / EV("IDQ.DSB_CYCLES_ANY", level)
# Average number of uops fetched from MITE per cycle
def Fetch_MITE(self, EV, level):
return EV("IDQ.MITE_UOPS", level) / EV("IDQ.MITE_CYCLES_ANY", level)
# Average number of Uops issued by front-end when it issued something
def Fetch_UpC(self, EV, level):
return EV("UOPS_ISSUED.ANY", level) / EV("UOPS_ISSUED.ANY:c1", level)
# Fraction of Uops delivered by the LSD (Loop Stream Detector; aka Loop Cache)
def LSD_Coverage(self, EV, level):
return EV("LSD.UOPS", level) / Fetched_Uops(self, EV, level)
# Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). See section 'Decoded ICache' in Optimization Manual. http://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-optimization-manual.html
def DSB_Coverage(self, EV, level):
val = EV("IDQ.DSB_UOPS", level) / Fetched_Uops(self, EV, level)
self.thresh = (val < 0.7) and HighIPC(self, EV, 1)
return val
# Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.
def DSB_Switch_Cost(self, EV, level):
return EV("DSB2MITE_SWITCHES.PENALTY_CYCLES", level) / EV("DSB2MITE_SWITCHES.PENALTY_CYCLES:c1:e1", level)
# Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck.
def DSB_Misses(self, EV, level):
val = 100 *(self.Fetch_Latency.compute(EV) * self.DSB_Switches.compute(EV) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV)) + self.Fetch_Bandwidth.compute(EV) * self.MITE.compute(EV) / (self.LSD.compute(EV) + self.MITE.compute(EV) + self.DSB.compute(EV)))
self.thresh = (val > 10)
return val
# Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck.
def DSB_Bandwidth(self, EV, level):
val = 100 *(self.Frontend_Bound.compute(EV) * (self.Fetch_Bandwidth.compute(EV) / (self.Fetch_Bandwidth.compute(EV) + self.Fetch_Latency.compute(EV))) * (self.DSB.compute(EV) / (self.LSD.compute(EV) + self.MITE.compute(EV) + self.DSB.compute(EV))))
self.thresh = (val > 10)
return val
# Average Latency for L1 instruction cache misses
def ICache_Miss_Latency(self, EV, level):
return EV("ICACHE_16B.IFDATA_STALL", level) / EV("ICACHE_16B.IFDATA_STALL:c1:e1", level)
# Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck.
def IC_Misses(self, EV, level):
val = 100 *(self.Fetch_Latency.compute(EV) * self.ICache_Misses.compute(EV) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV)))
self.thresh = (val > 5)
return val
# Instructions per non-speculative DSB miss (lower number means higher occurrence rate)
def IpDSB_Miss_Ret(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("FRONTEND_RETIRED.ANY_DSB_MISS", level)
self.thresh = (val < 50)
return val
# Instructions per speculative Unknown Branch Misprediction (BAClear) (lower number means higher occurrence rate)
def IpUnknown_Branch(self, EV, level):
return Instructions(self, EV, level) / EV("BACLEARS.ANY", level)
# L2 cache true code cacheline misses per kilo instruction
def L2MPKI_Code(self, EV, level):
return 1000 * EV("FRONTEND_RETIRED.L2_MISS", level) / EV("INST_RETIRED.ANY", level)
# L2 cache speculative code cacheline misses per kilo instruction
def L2MPKI_Code_All(self, EV, level):
return 1000 * EV("L2_RQSTS.CODE_RD_MISS", level) / EV("INST_RETIRED.ANY", level)
# Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)
def IpMispredict(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("BR_MISP_RETIRED.ALL_BRANCHES", level)
self.thresh = (val < 200)
return val
# Instructions per retired Mispredicts for conditional non-taken branches (lower number means higher occurrence rate).
def IpMisp_Cond_Ntaken(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("BR_MISP_RETIRED.COND_NTAKEN", level)
self.thresh = (val < 200)
return val
# Instructions per retired Mispredicts for conditional taken branches (lower number means higher occurrence rate).
def IpMisp_Cond_Taken(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("BR_MISP_RETIRED.COND_TAKEN", level)
self.thresh = (val < 200)
return val
# Instructions per retired Mispredicts for return branches (lower number means higher occurrence rate).
def IpMisp_Ret(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("BR_MISP_RETIRED.RET", level)
self.thresh = (val < 500)
return val
# Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).
def IpMisp_Indirect(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("BR_MISP_RETIRED.INDIRECT", level)
self.thresh = (val < 1000)
return val
# Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)
def Branch_Misprediction_Cost(self, EV, level):
return Mispredictions(self, EV, level) * SLOTS(self, EV, level) / EV("BR_MISP_RETIRED.ALL_BRANCHES", level) / 100
# Speculative to Retired ratio of all clears (covering Mispredicts and nukes)
def Spec_Clears_Ratio(self, EV, level):
return EV("INT_MISC.CLEARS_COUNT", level) / (EV("BR_MISP_RETIRED.ALL_BRANCHES", level) + EV("MACHINE_CLEARS.COUNT", level))
# Fraction of branches that are non-taken conditionals
def Cond_NT(self, EV, level):
return EV("BR_INST_RETIRED.COND_NTAKEN", level) / EV("BR_INST_RETIRED.ALL_BRANCHES", level)
# Fraction of branches that are taken conditionals
def Cond_TK(self, EV, level):
return EV("BR_INST_RETIRED.COND_TAKEN", level) / EV("BR_INST_RETIRED.ALL_BRANCHES", level)
# Fraction of branches that are CALL or RET
def CallRet(self, EV, level):
return (EV("BR_INST_RETIRED.NEAR_CALL", level) + EV("BR_INST_RETIRED.NEAR_RETURN", level)) / EV("BR_INST_RETIRED.ALL_BRANCHES", level)
# Fraction of branches that are unconditional (direct or indirect) jumps
def Jump(self, EV, level):
return Br_DoI_Jumps(self, EV, level) / EV("BR_INST_RETIRED.ALL_BRANCHES", level)
# Fraction of branches of other types (not individually covered by other metrics in Info.Branches group)
def Other_Branches(self, EV, level):
return 1 -(Cond_NT(self, EV, level) + Cond_TK(self, EV, level) + CallRet(self, EV, level) + Jump(self, EV, level))
# Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)
def Load_Miss_Real_Latency(self, EV, level):
return EV("L1D_PEND_MISS.PENDING", level) / (EV("MEM_LOAD_RETIRED.L1_MISS", level) + EV("MEM_LOAD_RETIRED.FB_HIT", level))
# Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)
def MLP(self, EV, level):
return EV("L1D_PEND_MISS.PENDING", level) / EV("L1D_PEND_MISS.PENDING_CYCLES", level)
# L1 cache true misses per kilo instruction for retired demand loads
def L1MPKI(self, EV, level):
return 1000 * EV("MEM_LOAD_RETIRED.L1_MISS", level) / EV("INST_RETIRED.ANY", level)
# L1 cache true misses per kilo instruction for all demand loads (including speculative)
def L1MPKI_Load(self, EV, level):
return 1000 * EV("L2_RQSTS.ALL_DEMAND_DATA_RD", level) / EV("INST_RETIRED.ANY", level)
# L2 cache true misses per kilo instruction for retired demand loads
def L2MPKI(self, EV, level):
return 1000 * EV("MEM_LOAD_RETIRED.L2_MISS", level) / EV("INST_RETIRED.ANY", level)
# L2 cache misses per kilo instruction for all request types (including speculative)
def L2MPKI_All(self, EV, level):
return 1000 *((EV("OFFCORE_REQUESTS.ALL_DATA_RD", level) - EV("OFFCORE_REQUESTS.DEMAND_DATA_RD", level)) + EV("L2_RQSTS.ALL_DEMAND_MISS", level) + EV("L2_RQSTS.SWPF_MISS", level)) / Instructions(self, EV, level)
# L2 cache misses per kilo instruction for all demand loads (including speculative)
def L2MPKI_Load(self, EV, level):
return 1000 * EV("L2_RQSTS.DEMAND_DATA_RD_MISS", level) / EV("INST_RETIRED.ANY", level)
# Offcore requests (L2 cache miss) per kilo instruction for demand RFOs
def L2MPKI_RFO(self, EV, level):
return 1000 * EV("L2_RQSTS.RFO_MISS", level) / EV("INST_RETIRED.ANY", level)
# L2 cache hits per kilo instruction for all demand loads (including speculative)
def L2HPKI_Load(self, EV, level):
return 1000 * EV("L2_RQSTS.DEMAND_DATA_RD_HIT", level) / EV("INST_RETIRED.ANY", level)
# L3 cache true misses per kilo instruction for retired demand loads
def L3MPKI(self, EV, level):
return 1000 * EV("MEM_LOAD_RETIRED.L3_MISS", level) / EV("INST_RETIRED.ANY", level)
# Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries)
def FB_HPKI(self, EV, level):
return 1000 * EV("MEM_LOAD_RETIRED.FB_HIT", level) / EV("INST_RETIRED.ANY", level)
def L1D_Cache_Fill_BW(self, EV, level):
return 64 * EV("L1D.REPLACEMENT", level) / OneBillion / Time(self, EV, level)
def L2_Cache_Fill_BW(self, EV, level):
return 64 * EV("L2_LINES_IN.ALL", level) / OneBillion / Time(self, EV, level)
def L3_Cache_Fill_BW(self, EV, level):
return 64 * EV("LONGEST_LAT_CACHE.MISS", level) / OneBillion / Time(self, EV, level)
def L3_Cache_Access_BW(self, EV, level):
return 64 * EV("OFFCORE_REQUESTS.ALL_REQUESTS", level) / OneBillion / Time(self, EV, level)
# Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses
def Page_Walks_Utilization(self, EV, level):
val = (EV("ITLB_MISSES.WALK_PENDING", level) + EV("DTLB_LOAD_MISSES.WALK_PENDING", level) + EV("DTLB_STORE_MISSES.WALK_PENDING", level)) / (2 * CORE_CLKS(self, EV, level))
self.thresh = (val > 0.5)
return val
# STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)
def Code_STLB_MPKI(self, EV, level):
return 1000 * EV("ITLB_MISSES.WALK_COMPLETED", level) / EV("INST_RETIRED.ANY", level)
# STLB (2nd level TLB) data load speculative misses per kilo instruction (misses of any page-size that complete the page walk)
def Load_STLB_MPKI(self, EV, level):
return 1000 * EV("DTLB_LOAD_MISSES.WALK_COMPLETED", level) / EV("INST_RETIRED.ANY", level)
# STLB (2nd level TLB) data store speculative misses per kilo instruction (misses of any page-size that complete the page walk)
def Store_STLB_MPKI(self, EV, level):
return 1000 * EV("DTLB_STORE_MISSES.WALK_COMPLETED", level) / EV("INST_RETIRED.ANY", level)
# Average per-core data fill bandwidth to the L1 data cache [GB / sec]
def L1D_Cache_Fill_BW_2T(self, EV, level):
return L1D_Cache_Fill_BW(self, EV, level)
# Average per-core data fill bandwidth to the L2 cache [GB / sec]
def L2_Cache_Fill_BW_2T(self, EV, level):
return L2_Cache_Fill_BW(self, EV, level)
# Average per-core data fill bandwidth to the L3 cache [GB / sec]
def L3_Cache_Fill_BW_2T(self, EV, level):
return L3_Cache_Fill_BW(self, EV, level)
# Average per-core data access bandwidth to the L3 cache [GB / sec]
def L3_Cache_Access_BW_2T(self, EV, level):
return L3_Cache_Access_BW(self, EV, level)
# Average Latency for L2 cache miss demand Loads
def Load_L2_Miss_Latency(self, EV, level):
return EV("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD", level) / EV("OFFCORE_REQUESTS.DEMAND_DATA_RD", level)
# Average Latency for L3 cache miss demand Loads
def Load_L3_Miss_Latency(self, EV, level):
return EV("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD:u0x10", level) / EV("OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD", level)
# Average Parallel L2 cache miss demand Loads
def Load_L2_MLP(self, EV, level):
return EV("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD", level) / EV("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD:c1", level)
# Average Parallel L2 cache miss data reads
def Data_L2_MLP(self, EV, level):
return EV("OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD", level) / EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", level)
# Un-cacheable retired load per kilo instruction
def UC_Load_PKI(self, EV, level):
return 1000 * EV("MEM_LOAD_MISC_RETIRED.UC", level) / EV("INST_RETIRED.ANY", level)
# "Bus lock" per kilo instruction
def Bus_Lock_PKI(self, EV, level):
return 1000 * EV("SQ_MISC.BUS_LOCK", level) / EV("INST_RETIRED.ANY", level)
# Average CPU Utilization (percentage)
def CPU_Utilization(self, EV, level):
return CPUs_Utilized(self, EV, level) / Num_CPUs(self, EV, level)
# Average number of utilized CPUs
def CPUs_Utilized(self, EV, level):
return EV("CPU_CLK_UNHALTED.REF_TSC", level) / EV("msr/tsc/", 0)
# Measured Average Core Frequency for unhalted processors [GHz]
def Core_Frequency(self, EV, level):
return Turbo_Utilization(self, EV, level) * EV("msr/tsc/", 0) / OneBillion / Time(self, EV, level)
# Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width
def GFLOPs(self, EV, level):
return (FLOP_Count(self, EV, level) / OneBillion) / Time(self, EV, level)
# Average Frequency Utilization relative nominal frequency
def Turbo_Utilization(self, EV, level):
return CLKS(self, EV, level) / EV("CPU_CLK_UNHALTED.REF_TSC", level)
# Fraction of Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes.
def Power_License0_Utilization(self, EV, level):
return EV("CORE_POWER.LVL0_TURBO_LICENSE", level) / CORE_CLKS(self, EV, level)
# Fraction of Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions.
def Power_License1_Utilization(self, EV, level):
val = EV("CORE_POWER.LVL1_TURBO_LICENSE", level) / CORE_CLKS(self, EV, level)
self.thresh = (val > 0.5)
return val
# Fraction of Core cycles where the core was running with power-delivery for license level 2 (introduced in SKX). This includes high current AVX 512-bit instructions.
def Power_License2_Utilization(self, EV, level):
val = EV("CORE_POWER.LVL2_TURBO_LICENSE", level) / CORE_CLKS(self, EV, level)
self.thresh = (val > 0.5)
return val
# Fraction of cycles where both hardware Logical Processors were active
def SMT_2T_Utilization(self, EV, level):
return 1 - EV("CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE", level) / EV("CPU_CLK_UNHALTED.REF_DISTRIBUTED", level) if smt_enabled else 0
# Fraction of cycles spent in the Operating System (OS) Kernel mode
def Kernel_Utilization(self, EV, level):
val = EV("CPU_CLK_UNHALTED.THREAD_P:SUP", level) / EV("CPU_CLK_UNHALTED.THREAD", level)
self.thresh = (val > 0.05)
return val
# Cycles Per Instruction for the Operating System (OS) Kernel mode
def Kernel_CPI(self, EV, level):
return EV("CPU_CLK_UNHALTED.THREAD_P:SUP", level) / EV("INST_RETIRED.ANY_P:SUP", level)
# Average external Memory Bandwidth Use for reads and writes [GB / sec]
def DRAM_BW_Use(self, EV, level):
return 64 *(EV("UNC_ARB_TRK_REQUESTS.ALL", level) + EV("UNC_ARB_COH_TRK_REQUESTS.ALL", level)) / OneMillion / Time(self, EV, level) / 1000
# Total package Power in Watts
def Power(self, EV, level):
return EV("UNC_PKG_ENERGY_STATUS", level) * Energy_Unit /(Time(self, EV, level) * OneMillion )
# Run duration time in seconds
def Time(self, EV, level):
val = EV("interval-s", 0)
self.thresh = (val < 1)
return val
# Socket actual clocks when any core is active on that socket
def Socket_CLKS(self, EV, level):
return EV("UNC_CLOCK.SOCKET", level)
# Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]
def IpFarBranch(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.FAR_BRANCH:USER", level)
self.thresh = (val < 1000000)
return val
# Event groups
class Frontend_Bound:
name = "Frontend_Bound"
domain = "Slots"
area = "FE"
level = 1
htoff = False
sample = ['FRONTEND_RETIRED.LATENCY_GE_4:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['BvFB', 'BvIO', 'TmaL1', 'PGO'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("PERF_METRICS.FRONTEND_BOUND", 1) / EV("TOPDOWN.SLOTS", 1)) / PERF_METRICS_SUM(self, EV, 1) - EV("INT_MISC.UOP_DROPPING", 1) / SLOTS(self, EV, 1) if topdown_use_fixed else(EV("IDQ_UOPS_NOT_DELIVERED.CORE", 1) - EV("INT_MISC.UOP_DROPPING", 1)) / SLOTS(self, EV, 1)
self.thresh = (self.val > 0.15)
except ZeroDivisionError:
handle_error(self, "Frontend_Bound zero division")
return self.val
desc = """
This category represents fraction of slots where the
processor's Frontend undersupplies its Backend. Frontend
denotes the first part of the processor core responsible to
fetch operations that are executed later on by the Backend
part. Within the Frontend; a branch predictor predicts the
next address to fetch; cache-lines are fetched from the
memory subsystem; parsed into instructions; and lastly
decoded into micro-operations (uops). Ideally the Frontend
can issue Pipeline_Width uops every cycle to the Backend.
Frontend Bound denotes unutilized issue-slots when there is
no Backend stall; i.e. bubbles where Frontend delivered no
uops while Backend could have accepted them. For example;
stalls due to instruction-cache misses would be categorized
under Frontend Bound."""
class Fetch_Latency:
name = "Fetch_Latency"
domain = "Slots"
area = "FE"
level = 2
htoff = False
sample = ['FRONTEND_RETIRED.LATENCY_GE_16:pp', 'FRONTEND_RETIRED.LATENCY_GE_8:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['Frontend', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = (Pipeline_Width * EV("IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE", 2) - EV("INT_MISC.UOP_DROPPING", 2)) / SLOTS(self, EV, 2)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Fetch_Latency zero division")
return self.val
desc = """
This metric represents fraction of slots the CPU was stalled
due to Frontend latency issues. For example; instruction-
cache misses; iTLB misses or fetch stalls after a branch
misprediction are categorized under Frontend Latency. In
such cases; the Frontend eventually delivers no uops for
some period."""
class ICache_Misses:
name = "ICache_Misses"
domain = "Clocks"
area = "FE"
level = 3
htoff = False
sample = ['FRONTEND_RETIRED.L2_MISS:pp', 'FRONTEND_RETIRED.L1I_MISS:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['BigFootprint', 'BvBC', 'FetchLat', 'IcMiss'])
maxval = None
def compute(self, EV):
try:
self.val = EV("ICACHE_DATA.STALLS", 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "ICache_Misses zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due to instruction cache misses.. Using compiler's
Profile-Guided Optimization (PGO) can reduce i-cache misses
through improved hot code layout."""
class ITLB_Misses:
name = "ITLB_Misses"
domain = "Clocks"
area = "FE"
level = 3
htoff = False
sample = ['FRONTEND_RETIRED.STLB_MISS:pp', 'FRONTEND_RETIRED.ITLB_MISS:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['BigFootprint', 'BvBC', 'FetchLat', 'MemoryTLB'])
maxval = None
def compute(self, EV):
try:
self.val = EV("ICACHE_TAG.STALLS", 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "ITLB_Misses zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due to Instruction TLB (ITLB) misses.. Consider
large 2M pages for code (selectively prefer hot large-size
function, due to limited 2M entries). Linux options:
standard binaries use libhugetlbfs; Hfsort.. https://github.
com/libhugetlbfs/libhugetlbfs;https://research.fb.com/public
ations/optimizing-function-placement-for-large-scale-data-
center-applications-2/"""
class Branch_Resteers:
name = "Branch_Resteers"
domain = "Clocks"
area = "FE"
level = 3
htoff = False
sample = ['BR_MISP_RETIRED.ALL_BRANCHES']
errcount = 0
sibling = None
metricgroup = frozenset(['FetchLat'])
maxval = None
def compute(self, EV):
try:
self.val = EV("INT_MISC.CLEAR_RESTEER_CYCLES", 3) / CLKS(self, EV, 3) + self.Unknown_Branches.compute(EV)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Branch_Resteers zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due to Branch Resteers. Branch Resteers estimates
the Frontend delay in fetching operations from corrected
path; following all sorts of miss-predicted branches. For
example; branchy code with lots of miss-predictions might
get categorized under Branch Resteers. Note the value of
this node may overlap with its siblings."""
class Mispredicts_Resteers:
name = "Mispredicts_Resteers"
domain = "Clocks"
area = "FE"
level = 4
htoff = False
sample = ['INT_MISC.CLEAR_RESTEER_CYCLES']
errcount = 0
sibling = None
metricgroup = frozenset(['BadSpec', 'BrMispredicts', 'BvMP'])
maxval = None
def compute(self, EV):
try:
self.val = Mispred_Clears_Fraction(self, EV, 4) * EV("INT_MISC.CLEAR_RESTEER_CYCLES", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Mispredicts_Resteers zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due to Branch Resteers as a result of Branch
Misprediction at execution stage."""
class Clears_Resteers:
name = "Clears_Resteers"
domain = "Clocks"
area = "FE"
level = 4
htoff = False
sample = ['INT_MISC.CLEAR_RESTEER_CYCLES']
errcount = 0
sibling = None
metricgroup = frozenset(['BadSpec', 'MachineClears'])
maxval = None
def compute(self, EV):
try:
self.val = (1 - Mispred_Clears_Fraction(self, EV, 4)) * EV("INT_MISC.CLEAR_RESTEER_CYCLES", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Clears_Resteers zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due to Branch Resteers as a result of Machine
Clears."""
class Unknown_Branches:
name = "Unknown_Branches"
domain = "Clocks"
area = "FE"
level = 4
htoff = False
sample = ['BACLEARS.ANY']
errcount = 0
sibling = None
metricgroup = frozenset(['BigFootprint', 'BvBC', 'FetchLat'])
maxval = None
def compute(self, EV):
try:
self.val = BAClear_Cost * EV("BACLEARS.ANY", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Unknown_Branches zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due to new branch address clears. These are fetched
branches the Branch Prediction Unit was unable to recognize
(e.g. first time the branch is fetched or hitting BTB
capacity limit) hence called Unknown Branches"""
class MS_Switches:
name = "MS_Switches"
domain = "Clocks_Estimated"
area = "FE"
level = 3
htoff = False
sample = ['IDQ.MS_SWITCHES']
errcount = 0
sibling = None
metricgroup = frozenset(['FetchLat', 'MicroSeq'])
maxval = 1.0
def compute(self, EV):
try:
self.val = MS_Switches_Cost * EV("IDQ.MS_SWITCHES", 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "MS_Switches zero division")
return self.val
desc = """
This metric estimates the fraction of cycles when the CPU
was stalled due to switches of uop delivery to the Microcode
Sequencer (MS). Commonly used instructions are optimized for
delivery by the DSB (decoded i-cache) or MITE (legacy
instruction decode) pipelines. Certain operations cannot be
handled natively by the execution pipeline; and must be
performed by microcode (small programs injected into the
execution stream). Switching to the MS too often can
negatively impact performance. The MS is designated to
deliver long uop flows required by CISC instructions like
CPUID; or uncommon conditions like Floating Point Assists
when dealing with Denormals."""
class LCP:
name = "LCP"
domain = "Clocks"
area = "FE"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['FetchLat'])
maxval = None
def compute(self, EV):
try:
self.val = EV("DECODE.LCP", 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "LCP zero division")
return self.val
desc = """
This metric represents fraction of cycles CPU was stalled
due to Length Changing Prefixes (LCPs). Using proper
compiler flags or Intel Compiler by default will certainly
avoid this."""
class DSB_Switches:
name = "DSB_Switches"
domain = "Clocks"
area = "FE"
level = 3
htoff = False
sample = ['FRONTEND_RETIRED.DSB_MISS:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['DSBmiss', 'FetchLat'])
maxval = None
def compute(self, EV):
try:
self.val = EV("DSB2MITE_SWITCHES.PENALTY_CYCLES", 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "DSB_Switches zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due to switches from DSB to MITE pipelines. The DSB
(decoded i-cache) is a Uop Cache where the front-end
directly delivers Uops (micro operations) avoiding heavy x86
decoding. The DSB pipeline has shorter latency and delivered
higher bandwidth than the MITE (legacy instruction decode
pipeline). Switching between the two pipelines can cause
penalties hence this metric measures the exposed penalty..
See section 'Optimization for Decoded Icache' in
Optimization Manual:. http://www.intel.com/content/www/us/en
/architecture-and-technology/64-ia-32-architectures-
optimization-manual.html"""
class Fetch_Bandwidth:
name = "Fetch_Bandwidth"
domain = "Slots"
area = "FE"
level = 2
htoff = False
sample = ['FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1:pp', 'FRONTEND_RETIRED.LATENCY_GE_1:pp', 'FRONTEND_RETIRED.LATENCY_GE_2:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['FetchBW', 'Frontend', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = max(0 , self.Frontend_Bound.compute(EV) - self.Fetch_Latency.compute(EV))
self.thresh = (self.val > 0.2)
except ZeroDivisionError:
handle_error(self, "Fetch_Bandwidth zero division")
return self.val
desc = """
This metric represents fraction of slots the CPU was stalled
due to Frontend bandwidth issues. For example;
inefficiencies at the instruction decoders; or restrictions
for caching in the DSB (decoded uops cache) are categorized
under Fetch Bandwidth. In such cases; the Frontend typically
delivers suboptimal amount of uops to the Backend."""
class MITE:
name = "MITE"
domain = "Slots_Estimated"
area = "FE"
level = 3
htoff = False
sample = ['FRONTEND_RETIRED.ANY_DSB_MISS']
errcount = 0
sibling = None
metricgroup = frozenset(['DSBmiss', 'FetchBW'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("IDQ.MITE_CYCLES_ANY", 3) - EV("IDQ.MITE_CYCLES_OK", 3)) / CORE_CLKS(self, EV, 3) / 2
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "MITE zero division")
return self.val
desc = """
This metric represents Core fraction of cycles in which CPU
was likely limited due to the MITE pipeline (the legacy
decode pipeline). This pipeline is used for code that was
not pre-cached in the DSB or LSD. For example;
inefficiencies due to asymmetric decoders; use of long
immediate or LCP can manifest as MITE fetch bandwidth
bottleneck.. Consider tuning codegen of 'small hotspots'
that can fit in DSB. Read about 'Decoded ICache' in
Optimization Manual:. http://www.intel.com/content/www/us/en
/architecture-and-technology/64-ia-32-architectures-
optimization-manual.html"""
class Decoder0_Alone:
name = "Decoder0_Alone"
domain = "Slots_Estimated"
area = "FE"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['DSBmiss', 'FetchBW'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("INST_DECODED.DECODERS:c1", 4) - EV("INST_DECODED.DECODERS:c2", 4)) / CORE_CLKS(self, EV, 4) / 2
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Decoder0_Alone zero division")
return self.val
desc = """
This metric represents fraction of cycles where decoder-0
was the only active decoder"""
class MITE_4wide:
name = "MITE_4wide"
domain = "Core_Clocks"
area = "FE"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['DSBmiss', 'FetchBW'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("IDQ.MITE_UOPS:c4", 4) - EV("IDQ.MITE_UOPS:c5", 4)) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "MITE_4wide zero division")
return self.val
desc = """
This metric represents fraction of cycles where (only) 4
uops were delivered by the MITE pipeline"""
class DSB:
name = "DSB"
domain = "Slots_Estimated"
area = "FE"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['DSB', 'FetchBW'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("IDQ.DSB_CYCLES_ANY", 3) - EV("IDQ.DSB_CYCLES_OK", 3)) / CORE_CLKS(self, EV, 3) / 2
self.thresh = (self.val > 0.15) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "DSB zero division")
return self.val
desc = """
This metric represents Core fraction of cycles in which CPU
was likely limited due to DSB (decoded uop cache) fetch
pipeline. For example; inefficient utilization of the DSB
cache structure or bank conflict when reading from it; are
categorized here."""
class LSD:
name = "LSD"
domain = "Slots_Estimated"
area = "FE"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['FetchBW', 'LSD'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("LSD.CYCLES_ACTIVE", 3) - EV("LSD.CYCLES_OK", 3)) / CORE_CLKS(self, EV, 3) / 2
self.thresh = (self.val > 0.15) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "LSD zero division")
return self.val
desc = """
This metric represents Core fraction of cycles in which CPU
was likely limited due to LSD (Loop Stream Detector) unit.
LSD typically does well sustaining Uop supply. However; in
some rare cases; optimal uop-delivery could not be reached
for small loops whose size (in terms of number of uops) does
not suit well the LSD structure."""
class Bad_Speculation:
name = "Bad_Speculation"
domain = "Slots"
area = "BAD"
level = 1
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['TmaL1'])
maxval = None
def compute(self, EV):
try:
self.val = max(1 -(self.Frontend_Bound.compute(EV) + self.Backend_Bound.compute(EV) + self.Retiring.compute(EV)) , 0 )
self.thresh = (self.val > 0.15)
except ZeroDivisionError:
handle_error(self, "Bad_Speculation zero division")
return self.val
desc = """
This category represents fraction of slots wasted due to
incorrect speculations. This include slots used to issue
uops that do not eventually get retired and slots for which
the issue-pipeline was blocked due to recovery from earlier
incorrect speculation. For example; wasted work due to miss-
predicted branches are categorized under Bad Speculation
category. Incorrect data speculation followed by Memory
Ordering Nukes is another example."""
class Branch_Mispredicts:
name = "Branch_Mispredicts"
domain = "Slots"
area = "BAD"
level = 2
htoff = False
sample = ['BR_MISP_RETIRED.ALL_BRANCHES']
errcount = 0
sibling = None
metricgroup = frozenset(['BadSpec', 'BrMispredicts', 'BvMP', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = Mispred_Clears_Fraction(self, EV, 2) * self.Bad_Speculation.compute(EV)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Branch_Mispredicts zero division")
return self.val
desc = """
This metric represents fraction of slots the CPU has wasted
due to Branch Misprediction. These slots are either wasted
by uops fetched from an incorrectly speculated program path;
or stalls when the out-of-order part of the machine needs to
recover its state from a speculative path.. Using profile
feedback in the compiler may help. Please see the
Optimization Manual for general strategies for addressing
branch misprediction issues..
http://www.intel.com/content/www/us/en/architecture-and-
technology/64-ia-32-architectures-optimization-manual.html"""
class Other_Mispredicts:
name = "Other_Mispredicts"
domain = "Slots"
area = "BAD"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvIO', 'BrMispredicts'])
maxval = None
def compute(self, EV):
try:
self.val = max(self.Branch_Mispredicts.compute(EV) * (1 - EV("BR_MISP_RETIRED.ALL_BRANCHES", 3) / (EV("INT_MISC.CLEARS_COUNT", 3) - EV("MACHINE_CLEARS.COUNT", 3))) , 0.0001 )
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Other_Mispredicts zero division")
return self.val
desc = """
This metric estimates fraction of slots the CPU was stalled
due to other cases of misprediction (non-retired x86
branches or other types)."""
class Machine_Clears:
name = "Machine_Clears"
domain = "Slots"
area = "BAD"
level = 2
htoff = False
sample = ['MACHINE_CLEARS.COUNT']
errcount = 0
sibling = None
metricgroup = frozenset(['BadSpec', 'BvMS', 'MachineClears', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = max(0 , self.Bad_Speculation.compute(EV) - self.Branch_Mispredicts.compute(EV))
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Machine_Clears zero division")
return self.val
desc = """
This metric represents fraction of slots the CPU has wasted
due to Machine Clears. These slots are either wasted by
uops fetched prior to the clear; or stalls the out-of-order
portion of the machine needs to recover its state after the
clear. For example; this can happen due to memory ordering
Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code
(SMC) nukes.. See \"Memory Disambiguation\" in Optimization
Manual and:. https://software.intel.com/sites/default/files/
m/d/4/1/d/8/sma.pdf"""
class Other_Nukes:
name = "Other_Nukes"
domain = "Slots"
area = "BAD"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvIO', 'Machine_Clears'])
maxval = None
def compute(self, EV):
try:
self.val = max(self.Machine_Clears.compute(EV) * (1 - EV("MACHINE_CLEARS.MEMORY_ORDERING", 3) / EV("MACHINE_CLEARS.COUNT", 3)) , 0.0001 )
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Other_Nukes zero division")
return self.val
desc = """
This metric represents fraction of slots the CPU has wasted
due to Nukes (Machine Clears) not related to memory
ordering."""
class Backend_Bound:
name = "Backend_Bound"
domain = "Slots"
area = "BE"
level = 1
htoff = False
sample = ['TOPDOWN.BACKEND_BOUND_SLOTS']
errcount = 0
sibling = None
metricgroup = frozenset(['BvOB', 'TmaL1'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("PERF_METRICS.BACKEND_BOUND", 1) / EV("TOPDOWN.SLOTS", 1)) / PERF_METRICS_SUM(self, EV, 1) + (Pipeline_Width * EV("INT_MISC.CLEARS_COUNT", 1)) / SLOTS(self, EV, 1) if topdown_use_fixed else(EV("TOPDOWN.BACKEND_BOUND_SLOTS", 1) + Pipeline_Width * EV("INT_MISC.CLEARS_COUNT", 1)) / SLOTS(self, EV, 1)
self.thresh = (self.val > 0.2)
except ZeroDivisionError:
handle_error(self, "Backend_Bound zero division")
return self.val
desc = """
This category represents fraction of slots where no uops are
being delivered due to a lack of required resources for
accepting new uops in the Backend. Backend is the portion of
the processor core where the out-of-order scheduler
dispatches ready uops into their respective execution units;
and once completed these uops get retired according to
program order. For example; stalls due to data-cache misses
or stalls due to the divider unit being overloaded are both
categorized under Backend Bound. Backend Bound is further
divided into two main categories: Memory Bound and Core
Bound."""
class Memory_Bound:
name = "Memory_Bound"
domain = "Slots"
area = "BE/Mem"
level = 2
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Backend', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = Memory_Bound_Fraction(self, EV, 2) * self.Backend_Bound.compute(EV)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Memory_Bound zero division")
return self.val
desc = """
This metric represents fraction of slots the Memory
subsystem within the Backend was a bottleneck. Memory Bound
estimates fraction of slots where pipeline is likely stalled
due to demand load or store instructions. This accounts
mainly for (1) non-completed in-flight memory demand loads
which coincides with execution units starvation; in addition
to (2) cases where stores could impose backpressure on the
pipeline when many of them get buffered at the same time
(less common out of the two)."""
class L1_Bound:
name = "L1_Bound"
domain = "Stalls"
area = "BE/Mem"
level = 3
htoff = False
sample = ['MEM_LOAD_RETIRED.L1_HIT:pp', 'MEM_LOAD_RETIRED.FB_HIT:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['CacheHits', 'MemoryBound', 'TmaL3mem'])
maxval = None
def compute(self, EV):
try:
self.val = max((EV("CYCLE_ACTIVITY.STALLS_MEM_ANY", 3) - EV("CYCLE_ACTIVITY.STALLS_L1D_MISS", 3)) / CLKS(self, EV, 3) , 0 )
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "L1_Bound zero division")
return self.val
desc = """
This metric estimates how often the CPU was stalled without
loads missing the L1 data cache. The L1 data cache
typically has the shortest latency. However; in certain
cases like loads blocked on older stores; a load might
suffer due to high latency even though it is being satisfied
by the L1. Another example is loads who miss in the TLB.
These cases are characterized by execution unit stalls;
while some non-completed demand load lives in the machine
without having that demand load missing the L1 cache."""
class DTLB_Load:
name = "DTLB_Load"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_INST_RETIRED.STLB_MISS_LOADS:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['BvMT', 'MemoryTLB'])
maxval = 1.0
def compute(self, EV):
try:
self.val = min(Mem_STLB_Hit_Cost * EV("DTLB_LOAD_MISSES.STLB_HIT:c1", 4) + EV("DTLB_LOAD_MISSES.WALK_ACTIVE", 4) , max(EV("CYCLE_ACTIVITY.CYCLES_MEM_ANY", 4) - EV("CYCLE_ACTIVITY.CYCLES_L1D_MISS", 4) , 0)) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "DTLB_Load zero division")
return self.val
desc = """
This metric roughly estimates the fraction of cycles where
the Data TLB (DTLB) was missed by load accesses. TLBs
(Translation Look-aside Buffers) are processor caches for
recently used entries out of the Page Tables that are used
to map virtual- to physical-addresses by the operating
system. This metric approximates the potential delay of
demand loads missing the first-level data TLB (assuming
worst case scenario with back to back misses to different
pages). This includes hitting in the second-level TLB (STLB)
as well as performing a hardware page walk on an STLB miss.."""
class Load_STLB_Hit:
name = "Load_STLB_Hit"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['MemoryTLB'])
maxval = 1.0
def compute(self, EV):
try:
self.val = self.DTLB_Load.compute(EV) - self.Load_STLB_Miss.compute(EV)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Load_STLB_Hit zero division")
return self.val
desc = """
This metric roughly estimates the fraction of cycles where
the (first level) DTLB was missed by load accesses, that
later on hit in second-level TLB (STLB)"""
class Load_STLB_Miss:
name = "Load_STLB_Miss"
domain = "Clocks_Calculated"
area = "BE/Mem"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['MemoryTLB'])
maxval = 1.0
def compute(self, EV):
try:
self.val = EV("DTLB_LOAD_MISSES.WALK_ACTIVE", 5) / CLKS(self, EV, 5)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Load_STLB_Miss zero division")
return self.val
desc = """
This metric estimates the fraction of cycles where the
Second-level TLB (STLB) was missed by load accesses,
performing a hardware page walk"""
class Store_Fwd_Blk:
name = "Store_Fwd_Blk"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = 1.0
def compute(self, EV):
try:
self.val = 13 * EV("LD_BLOCKS.STORE_FORWARD", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Store_Fwd_Blk zero division")
return self.val
desc = """
This metric roughly estimates fraction of cycles when the
memory subsystem had loads blocked since they could not
forward data from earlier (in program order) overlapping
stores. To streamline memory operations in the pipeline; a
load can avoid waiting for memory if a prior in-flight store
is writing the data that the load wants to read (store
forwarding process). However; in some cases the load may be
blocked for a significant time pending the store forward.
For example; when the prior store is writing a smaller
region than the load is reading."""
class L1_Hit_Latency:
name = "L1_Hit_Latency"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_LOAD_RETIRED.L1_HIT']
errcount = 0
sibling = None
metricgroup = frozenset(['BvML', 'MemoryLat'])
maxval = 1.0
def compute(self, EV):
try:
self.val = min(2 *(EV("MEM_INST_RETIRED.ALL_LOADS", 4) - EV("MEM_LOAD_RETIRED.FB_HIT", 4) - EV("MEM_LOAD_RETIRED.L1_MISS", 4)) * Dependent_Loads_Weight(self, EV, 4) / 100 , max(EV("CYCLE_ACTIVITY.CYCLES_MEM_ANY", 4) - EV("CYCLE_ACTIVITY.CYCLES_L1D_MISS", 4) , 0)) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "L1_Hit_Latency zero division")
return self.val
desc = """
This metric roughly estimates fraction of cycles with demand
load accesses that hit the L1 cache. The short latency of
the L1 data cache may be exposed in pointer-chasing memory
access patterns as an example."""
class Lock_Latency:
name = "Lock_Latency"
domain = "Clocks"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_INST_RETIRED.LOCK_LOADS']
errcount = 0
sibling = None
metricgroup = frozenset(['Offcore'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (16 * max(0 , EV("MEM_INST_RETIRED.LOCK_LOADS", 4) - EV("L2_RQSTS.ALL_RFO", 4)) + Mem_Lock_St_Fraction(self, EV, 4) * (Mem_L2_Store_Cost * EV("L2_RQSTS.RFO_HIT", 4) + ORO_Demand_RFO_C1(self, EV, 4))) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Lock_Latency zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU spent
handling cache misses due to lock operations. Due to the
microarchitecture handling of locks; they are classified as
L1_Bound regardless of what memory source satisfied them."""
class Split_Loads:
name = "Split_Loads"
domain = "Clocks_Calculated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_INST_RETIRED.SPLIT_LOADS:pp']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = 1.0
def compute(self, EV):
try:
self.val = Load_Miss_Real_Latency(self, EV, 4) * EV("LD_BLOCKS.NO_SR", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Split_Loads zero division")
return self.val
desc = """
This metric estimates fraction of cycles handling memory
load split accesses - load that cross 64-byte cache line
boundary. . Consider aligning data or hot structure fields.
See the Optimization Manual for more details"""
class G4K_Aliasing:
name = "4K_Aliasing"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = EV("LD_BLOCKS_PARTIAL.ADDRESS_ALIAS", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "G4K_Aliasing zero division")
return self.val
desc = """
This metric estimates how often memory load accesses were
aliased by preceding stores (in program order) with a 4K
address offset. False match is possible; which incur a few
cycles load re-issue. However; the short re-issue duration
is often hidden by the out-of-order core and HW
optimizations; hence a user may safely ignore a high value
of this metric unless it manages to propagate up into parent
nodes of the hierarchy (e.g. to L1_Bound).. Consider
reducing independent loads/stores accesses with 4K offsets.
See the Optimization Manual for more details"""
class FB_Full:
name = "FB_Full"
domain = "Clocks_Calculated"
area = "BE/Mem"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvMB', 'MemoryBW'])
maxval = None
def compute(self, EV):
try:
self.val = EV("L1D_PEND_MISS.FB_FULL", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.3)
except ZeroDivisionError:
handle_error(self, "FB_Full zero division")
return self.val
desc = """
This metric does a *rough estimation* of how often L1D Fill
Buffer unavailability limited additional L1D miss memory
access requests to proceed. The higher the metric value; the
deeper the memory hierarchy level the misses are satisfied
from (metric values >1 are valid). Often it hints on
approaching bandwidth limits (to L2 cache; L3 cache or
external memory).. See $issueBW and $issueSL hints. Avoid
software prefetches if indeed memory BW limited."""
class L2_Bound:
name = "L2_Bound"
domain = "Stalls"
area = "BE/Mem"
level = 3
htoff = False
sample = ['MEM_LOAD_RETIRED.L2_HIT:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['BvML', 'CacheHits', 'MemoryBound', 'TmaL3mem'])
maxval = None
def compute(self, EV):
try:
self.val = (LOAD_L2_HIT(self, EV, 3) / (LOAD_L2_HIT(self, EV, 3) + EV("L1D_PEND_MISS.FB_FULL_PERIODS", 3))) * L2_Bound_Ratio(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "L2_Bound zero division")
return self.val
desc = """
This metric estimates how often the CPU was stalled due to
L2 cache accesses by loads. Avoiding cache misses (i.e. L1
misses/L2 hits) can improve the latency and increase
performance."""
class L3_Bound:
name = "L3_Bound"
domain = "Stalls"
area = "BE/Mem"
level = 3
htoff = False
sample = ['MEM_LOAD_RETIRED.L3_HIT:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['CacheHits', 'MemoryBound', 'TmaL3mem'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("CYCLE_ACTIVITY.STALLS_L2_MISS", 3) - EV("CYCLE_ACTIVITY.STALLS_L3_MISS", 3)) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "L3_Bound zero division")
return self.val
desc = """
This metric estimates how often the CPU was stalled due to
loads accesses to L3 cache or contended with a sibling Core.
Avoiding cache misses (i.e. L2 misses/L3 hits) can improve
the latency and increase performance."""
class Contested_Accesses:
name = "Contested_Accesses"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM:pp', 'MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['BvMS', 'DataSharing', 'Offcore', 'Snoop'])
maxval = 1.0
def compute(self, EV):
try:
self.val = ((Mem_XSNP_HitM_Cost(self, EV, 4) - Mem_L2_Hit_Cost(self, EV, 4)) * LOAD_XSNP_HITM(self, EV, 4) + (Mem_XSNP_Hit_Cost(self, EV, 4) - Mem_L2_Hit_Cost(self, EV, 4)) * LOAD_XSNP_MISS(self, EV, 4)) * FB_Factor(self, EV, 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Contested_Accesses zero division")
return self.val
desc = """
This metric estimates fraction of cycles while the memory
subsystem was handling synchronizations due to contested
accesses. Contested accesses occur when data written by one
Logical Processor are read by another Logical Processor on a
different Physical Core. Examples of contested accesses
include synchronizations such as locks; true data sharing
such as modified locked variables; and false sharing."""
class Data_Sharing:
name = "Data_Sharing"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['BvMS', 'Offcore', 'Snoop'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (Mem_XSNP_Hit_Cost(self, EV, 4) - Mem_L2_Hit_Cost(self, EV, 4)) * LOAD_XSNP_HIT(self, EV, 4) * FB_Factor(self, EV, 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Data_Sharing zero division")
return self.val
desc = """
This metric estimates fraction of cycles while the memory
subsystem was handling synchronizations due to data-sharing
accesses. Data shared by multiple Logical Processors (even
just read shared) may cause increased access latency due to
cache coherency. Excessive data sharing can drastically harm
multithreaded performance."""
class L3_Hit_Latency:
name = "L3_Hit_Latency"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_LOAD_RETIRED.L3_HIT:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['BvML', 'MemoryLat'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (Mem_XSNP_None_Cost(self, EV, 4) - Mem_L2_Hit_Cost(self, EV, 4)) * LOAD_L3_HIT(self, EV, 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "L3_Hit_Latency zero division")
return self.val
desc = """
This metric estimates fraction of cycles with demand load
accesses that hit the L3 cache under unloaded scenarios
(possibly L3 latency limited). Avoiding private cache
misses (i.e. L2 misses/L3 hits) will improve the latency;
reduce contention with sibling physical cores and increase
performance. Note the value of this node may overlap with
its siblings."""
class SQ_Full:
name = "SQ_Full"
domain = "Clocks"
area = "BE/Mem"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvMB', 'MemoryBW', 'Offcore'])
maxval = None
def compute(self, EV):
try:
self.val = EV("L1D_PEND_MISS.L2_STALL", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.3) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "SQ_Full zero division")
return self.val
desc = """
This metric measures fraction of cycles where the Super
Queue (SQ) was full taking into account all request-types
and both hardware SMT threads (Logical Processors)."""
class DRAM_Bound:
name = "DRAM_Bound"
domain = "Stalls"
area = "BE/Mem"
level = 3
htoff = False
sample = ['MEM_LOAD_RETIRED.L3_MISS:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['MemoryBound', 'TmaL3mem'])
maxval = 1.0
def compute(self, EV):
try:
self.val = MEM_Bound_Ratio(self, EV, 3)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "DRAM_Bound zero division")
return self.val
desc = """
This metric estimates how often the CPU was stalled on
accesses to external memory (DRAM) by loads. Better caching
can improve the latency and increase performance."""
class MEM_Bandwidth:
name = "MEM_Bandwidth"
domain = "Clocks"
area = "BE/Mem"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvMB', 'MemoryBW', 'Offcore'])
maxval = None
def compute(self, EV):
try:
self.val = ORO_DRD_BW_Cycles(self, EV, 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "MEM_Bandwidth zero division")
return self.val
desc = """
This metric estimates fraction of cycles where the core's
performance was likely hurt due to approaching bandwidth
limits of external memory - DRAM ([SPR-HBM] and/or HBM).
The underlying heuristic assumes that a similar off-core
traffic is generated by all IA cores. This metric does not
aggregate non-data-read requests by this logical processor;
requests from other IA Logical Processors/Physical
Cores/sockets; or other non-IA devices like GPU; hence the
maximum external memory bandwidth limits may or may not be
approached when this metric is flagged (see Uncore counters
for that).. Improve data accesses to reduce cacheline
transfers from/to memory. Examples: 1) Consume all bytes of
a each cacheline before it is evicted (e.g. reorder
structure elements and split non-hot ones), 2) merge
computed-limited with BW-limited loops, 3) NUMA
optimizations in multi-socket system. Note: software
prefetches will not help BW-limited application.."""
class MEM_Latency:
name = "MEM_Latency"
domain = "Clocks"
area = "BE/Mem"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvML', 'MemoryLat', 'Offcore'])
maxval = None
def compute(self, EV):
try:
self.val = ORO_DRD_Any_Cycles(self, EV, 4) / CLKS(self, EV, 4) - self.MEM_Bandwidth.compute(EV)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "MEM_Latency zero division")
return self.val
desc = """
This metric estimates fraction of cycles where the
performance was likely hurt due to latency from external
memory - DRAM ([SPR-HBM] and/or HBM). This metric does not
aggregate requests from other Logical Processors/Physical
Cores/sockets (see Uncore counters for that).. Improve data
accesses or interleave them with compute. Examples: 1) Data
layout re-structuring, 2) Software Prefetches (also through
the compiler).."""
class Store_Bound:
name = "Store_Bound"
domain = "Stalls"
area = "BE/Mem"
level = 3
htoff = False
sample = ['MEM_INST_RETIRED.ALL_STORES:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['MemoryBound', 'TmaL3mem'])
maxval = None
def compute(self, EV):
try:
self.val = EV("EXE_ACTIVITY.BOUND_ON_STORES", 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Store_Bound zero division")
return self.val
desc = """
This metric estimates how often CPU was stalled due to RFO
store memory accesses; RFO store issue a read-for-ownership
request before the write. Even though store accesses do not
typically stall out-of-order CPUs; there are few cases where
stores can lead to actual stalls. This metric will be
flagged should RFO stores be a bottleneck."""
class Store_Latency:
name = "Store_Latency"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvML', 'MemoryLat', 'Offcore'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (Store_L2_Hit_Cycles(self, EV, 4) + (1 - Mem_Lock_St_Fraction(self, EV, 4)) * ORO_Demand_RFO_C1(self, EV, 4)) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Store_Latency zero division")
return self.val
desc = """
This metric estimates fraction of cycles the CPU spent
handling L1D store misses. Store accesses usually less
impact out-of-order core performance; however; holding
resources for longer time can lead into undesired
implications (e.g. contention on L1D fill-buffer entries -
see FB_Full). Consider to avoid/reduce unnecessary (or
easily load-able/computable) memory store."""
class False_Sharing:
name = "False_Sharing"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM']
errcount = 0
sibling = None
metricgroup = frozenset(['BvMS', 'DataSharing', 'Offcore', 'Snoop'])
maxval = 1.0
def compute(self, EV):
try:
self.val = Mem_XSNP_HitM_Cost(self, EV, 4) * EV("OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "False_Sharing zero division")
return self.val
desc = """
This metric roughly estimates how often CPU was handling
synchronizations due to False Sharing. False Sharing is a
multithreading hiccup; where multiple Logical Processors
contend on different data-elements mapped into the same
cache line. . False Sharing can be easily avoided by padding
to make Logical Processors access different lines."""
class Split_Stores:
name = "Split_Stores"
domain = "Core_Utilization"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_INST_RETIRED.SPLIT_STORES:pp']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = EV("MEM_INST_RETIRED.SPLIT_STORES", 4) / CORE_CLKS(self, EV, 4)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Split_Stores zero division")
return self.val
desc = """
This metric represents rate of split store accesses.
Consider aligning your data to the 64-byte cache line
granularity."""
class Streaming_Stores:
name = "Streaming_Stores"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['OCR.STREAMING_WR.ANY_RESPONSE']
errcount = 0
sibling = None
metricgroup = frozenset(['MemoryBW', 'Offcore'])
maxval = 1.0
def compute(self, EV):
try:
self.val = 9 * EV("OCR.STREAMING_WR.ANY_RESPONSE", 4) / CLKS(self, EV, 4) if DS else 0
EV("OCR.STREAMING_WR.ANY_RESPONSE", 4)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Streaming_Stores zero division")
return self.val
desc = """
This metric estimates how often CPU was stalled due to
Streaming store memory accesses; Streaming store optimize
out a read request required by RFO stores. Even though store
accesses do not typically stall out-of-order CPUs; there are
few cases where stores can lead to actual stalls. This
metric will be flagged should Streaming stores be a
bottleneck."""
class DTLB_Store:
name = "DTLB_Store"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_INST_RETIRED.STLB_MISS_STORES:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['BvMT', 'MemoryTLB'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (Mem_STLB_Hit_Cost * EV("DTLB_STORE_MISSES.STLB_HIT:c1", 4) + EV("DTLB_STORE_MISSES.WALK_ACTIVE", 4)) / CORE_CLKS(self, EV, 4)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "DTLB_Store zero division")
return self.val
desc = """
This metric roughly estimates the fraction of cycles spent
handling first-level data TLB store misses. As with
ordinary data caching; focus on improving data locality and
reducing working-set size to reduce DTLB overhead.
Additionally; consider using profile-guided optimization
(PGO) to collocate frequently-used data on the same page.
Try using larger page sizes for large amounts of frequently-
used data."""
class Store_STLB_Hit:
name = "Store_STLB_Hit"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['MemoryTLB'])
maxval = 1.0
def compute(self, EV):
try:
self.val = self.DTLB_Store.compute(EV) - self.Store_STLB_Miss.compute(EV)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Store_STLB_Hit zero division")
return self.val
desc = """
This metric roughly estimates the fraction of cycles where
the TLB was missed by store accesses, hitting in the second-
level TLB (STLB)"""
class Store_STLB_Miss:
name = "Store_STLB_Miss"
domain = "Clocks_Calculated"
area = "BE/Mem"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['MemoryTLB'])
maxval = 1.0
def compute(self, EV):
try:
self.val = EV("DTLB_STORE_MISSES.WALK_ACTIVE", 5) / CORE_CLKS(self, EV, 5)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Store_STLB_Miss zero division")
return self.val
desc = """
This metric estimates the fraction of cycles where the STLB
was missed by store accesses, performing a hardware page
walk"""
class Core_Bound:
name = "Core_Bound"
domain = "Slots"
area = "BE/Core"
level = 2
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Backend', 'TmaL2', 'Compute'])
maxval = None
def compute(self, EV):
try:
self.val = max(0 , self.Backend_Bound.compute(EV) - self.Memory_Bound.compute(EV))
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Core_Bound zero division")
return self.val
desc = """
This metric represents fraction of slots where Core non-
memory issues were of a bottleneck. Shortage in hardware
compute resources; or dependencies in software's
instructions are both categorized under Core Bound. Hence it
may indicate the machine ran out of an out-of-order
resource; certain execution units are overloaded or
dependencies in program's data- or instruction-flow are
limiting the performance (e.g. FP-chained long-latency
arithmetic operations).. Tip: consider Port Saturation
analysis as next step."""
class Divider:
name = "Divider"
domain = "Clocks"
area = "BE/Core"
level = 3
htoff = False
sample = ['ARITH.DIVIDER_ACTIVE']
errcount = 0
sibling = None
metricgroup = frozenset(['BvCB'])
maxval = 1.0
def compute(self, EV):
try:
self.val = EV("ARITH.DIVIDER_ACTIVE", 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Divider zero division")
return self.val
desc = """
This metric represents fraction of cycles where the Divider
unit was active. Divide and square root instructions are
performed by the Divider unit and can take considerably
longer latency than integer or Floating Point addition;
subtraction; or multiplication."""
class Serializing_Operation:
name = "Serializing_Operation"
domain = "Clocks"
area = "BE/Core"
level = 3
htoff = False
sample = ['RESOURCE_STALLS.SCOREBOARD']
errcount = 0
sibling = None
metricgroup = frozenset(['BvIO', 'PortsUtil'])
maxval = None
def compute(self, EV):
try:
self.val = EV("RESOURCE_STALLS.SCOREBOARD", 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Serializing_Operation zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU issue-
pipeline was stalled due to serializing operations.
Instructions like CPUID; WRMSR or LFENCE serialize the out-
of-order execution which may limit performance."""
class Slow_Pause:
name = "Slow_Pause"
domain = "Clocks"
area = "BE/Core"
level = 4
htoff = False
sample = ['MISC_RETIRED.PAUSE_INST']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = 140 * EV("MISC_RETIRED.PAUSE_INST", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Slow_Pause zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due to PAUSE Instructions."""
class Ports_Utilization:
name = "Ports_Utilization"
domain = "Clocks"
area = "BE/Core"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['PortsUtil'])
maxval = None
def compute(self, EV):
try:
self.val = Core_Bound_Cycles(self, EV, 3) / CLKS(self, EV, 3) if (EV("ARITH.DIVIDER_ACTIVE", 3)<(EV("CYCLE_ACTIVITY.STALLS_TOTAL", 3) - EV("CYCLE_ACTIVITY.STALLS_MEM_ANY", 3))) else Few_Uops_Executed_Threshold(self, EV, 3) / CLKS(self, EV, 3)
EV("CYCLE_ACTIVITY.STALLS_MEM_ANY", 3)
EV("ARITH.DIVIDER_ACTIVE", 3)
EV("CYCLE_ACTIVITY.STALLS_TOTAL", 3)
self.thresh = (self.val > 0.15) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Ports_Utilization zero division")
return self.val
desc = """
This metric estimates fraction of cycles the CPU performance
was potentially limited due to Core computation issues (non
divider-related). Two distinct categories can be attributed
into this metric: (1) heavy data-dependency among contiguous
instructions would manifest in this metric - such cases are
often referred to as low Instruction Level Parallelism
(ILP). (2) Contention on some hardware execution unit other
than Divider. For example; when there are too many multiply
operations.. Loop Vectorization -most compilers feature
auto-Vectorization options today- reduces pressure on the
execution ports as multiple elements are calculated with
same uop."""
class Ports_Utilized_0:
name = "Ports_Utilized_0"
domain = "Clocks"
area = "BE/Core"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['PortsUtil'])
maxval = None
def compute(self, EV):
try:
self.val = EV("EXE_ACTIVITY.3_PORTS_UTIL:u0x80", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Ports_Utilized_0 zero division")
return self.val
desc = """
This metric represents fraction of cycles CPU executed no
uops on any execution port (Logical Processor cycles since
ICL, Physical Core cycles otherwise). Long-latency
instructions like divides may contribute to this metric..
Check assembly view and Appendix C in Optimization Manual to
find out instructions with say 5 or more cycles latency..
http://www.intel.com/content/www/us/en/architecture-and-
technology/64-ia-32-architectures-optimization-manual.html"""
class Mixing_Vectors:
name = "Mixing_Vectors"
domain = "Clocks"
area = "BE/Core"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = 1.0
def compute(self, EV):
try:
self.val = EV("UOPS_ISSUED.VECTOR_WIDTH_MISMATCH", 5) / EV("UOPS_ISSUED.ANY", 5)
self.thresh = (self.val > 0.05)
except ZeroDivisionError:
handle_error(self, "Mixing_Vectors zero division")
return self.val
desc = """
This metric estimates penalty in terms of percentage of
injected blend uops out of all Uops Issued -- the Count
Domain. Usually a Mixing_Vectors over 5% is worth
investigating. Read more in Appendix B1 of the Optimizations
Guide for this topic."""
class Ports_Utilized_1:
name = "Ports_Utilized_1"
domain = "Clocks"
area = "BE/Core"
level = 4
htoff = False
sample = ['EXE_ACTIVITY.1_PORTS_UTIL']
errcount = 0
sibling = None
metricgroup = frozenset(['PortsUtil'])
maxval = None
def compute(self, EV):
try:
self.val = EV("EXE_ACTIVITY.1_PORTS_UTIL", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Ports_Utilized_1 zero division")
return self.val
desc = """
This metric represents fraction of cycles where the CPU
executed total of 1 uop per cycle on all execution ports
(Logical Processor cycles since ICL, Physical Core cycles
otherwise). This can be due to heavy data-dependency among
software instructions; or over oversubscribing a particular
hardware resource. In some other cases with high
1_Port_Utilized and L1_Bound; this metric can point to L1
data-cache latency bottleneck that may not necessarily
manifest with complete execution starvation (due to the
short L1 latency e.g. walking a linked list) - looking at
the assembly can be helpful."""
class Ports_Utilized_2:
name = "Ports_Utilized_2"
domain = "Clocks"
area = "BE/Core"
level = 4
htoff = False
sample = ['EXE_ACTIVITY.2_PORTS_UTIL']
errcount = 0
sibling = None
metricgroup = frozenset(['PortsUtil'])
maxval = None
def compute(self, EV):
try:
self.val = EV("EXE_ACTIVITY.2_PORTS_UTIL", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.15) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Ports_Utilized_2 zero division")
return self.val
desc = """
This metric represents fraction of cycles CPU executed total
of 2 uops per cycle on all execution ports (Logical
Processor cycles since ICL, Physical Core cycles otherwise).
Loop Vectorization -most compilers feature auto-
Vectorization options today- reduces pressure on the
execution ports as multiple elements are calculated with
same uop."""
class Ports_Utilized_3m:
name = "Ports_Utilized_3m"
domain = "Clocks"
area = "BE/Core"
level = 4
htoff = False
sample = ['UOPS_EXECUTED.CYCLES_GE_3']
errcount = 0
sibling = None
metricgroup = frozenset(['BvCB', 'PortsUtil'])
maxval = None
def compute(self, EV):
try:
self.val = EV("UOPS_EXECUTED.CYCLES_GE_3", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.4) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Ports_Utilized_3m zero division")
return self.val
desc = """
This metric represents fraction of cycles CPU executed total
of 3 or more uops per cycle on all execution ports (Logical
Processor cycles since ICL, Physical Core cycles otherwise)."""
class ALU_Op_Utilization:
name = "ALU_Op_Utilization"
domain = "Core_Execution"
area = "BE/Core"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = (EV("UOPS_DISPATCHED.PORT_0", 5) + EV("UOPS_DISPATCHED.PORT_1", 5) + EV("UOPS_DISPATCHED.PORT_5", 5) + EV("UOPS_DISPATCHED.PORT_6", 5)) / (4 * CORE_CLKS(self, EV, 5))
self.thresh = (self.val > 0.4)
except ZeroDivisionError:
handle_error(self, "ALU_Op_Utilization zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution ports for ALU operations."""
class Port_0:
name = "Port_0"
domain = "Core_Clocks"
area = "BE/Core"
level = 6
htoff = False
sample = ['UOPS_DISPATCHED.PORT_0']
errcount = 0
sibling = None
metricgroup = frozenset(['Compute'])
maxval = None
def compute(self, EV):
try:
self.val = EV("UOPS_DISPATCHED.PORT_0", 6) / CORE_CLKS(self, EV, 6)
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Port_0 zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution port 0 ALU and 2nd branch"""
class Port_1:
name = "Port_1"
domain = "Core_Clocks"
area = "BE/Core"
level = 6
htoff = False
sample = ['UOPS_DISPATCHED.PORT_1']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = EV("UOPS_DISPATCHED.PORT_1", 6) / CORE_CLKS(self, EV, 6)
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Port_1 zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution port 1 (ALU)"""
class Port_5:
name = "Port_5"
domain = "Core_Clocks"
area = "BE/Core"
level = 6
htoff = False
sample = ['UOPS_DISPATCHED.PORT_5']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = EV("UOPS_DISPATCHED.PORT_5", 6) / CORE_CLKS(self, EV, 6)
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Port_5 zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution port 5 ALU. See section
'Handling Port 5 Pressure' in Optimization Manual:.
http://www.intel.com/content/www/us/en/architecture-and-
technology/64-ia-32-architectures-optimization-manual.html"""
class Port_6:
name = "Port_6"
domain = "Core_Clocks"
area = "BE/Core"
level = 6
htoff = False
sample = ['UOPS_DISPATCHED.PORT_6']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = EV("UOPS_DISPATCHED.PORT_6", 6) / CORE_CLKS(self, EV, 6)
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Port_6 zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution port 6 Primary Branch and
simple ALU"""
class Load_Op_Utilization:
name = "Load_Op_Utilization"
domain = "Core_Execution"
area = "BE/Core"
level = 5
htoff = False
sample = ['UOPS_DISPATCHED.PORT_2_3']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = EV("UOPS_DISPATCHED.PORT_2_3", 5) / (2 * CORE_CLKS(self, EV, 5))
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Load_Op_Utilization zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution port for Load operations"""
class Store_Op_Utilization:
name = "Store_Op_Utilization"
domain = "Core_Execution"
area = "BE/Core"
level = 5
htoff = False
sample = ['UOPS_DISPATCHED.PORT_7_8']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = (EV("UOPS_DISPATCHED.PORT_4_9", 5) + EV("UOPS_DISPATCHED.PORT_7_8", 5)) / (4 * CORE_CLKS(self, EV, 5))
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Store_Op_Utilization zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution port for Store operations"""
class Retiring:
name = "Retiring"
domain = "Slots"
area = "RET"
level = 1
htoff = False
sample = ['UOPS_RETIRED.SLOTS']
errcount = 0
sibling = None
metricgroup = frozenset(['BvUW', 'TmaL1'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("PERF_METRICS.RETIRING", 1) / EV("TOPDOWN.SLOTS", 1)) / PERF_METRICS_SUM(self, EV, 1) if topdown_use_fixed else EV("UOPS_RETIRED.SLOTS", 1) / SLOTS(self, EV, 1)
self.thresh = (self.val > 0.7) or self.Heavy_Operations.thresh
except ZeroDivisionError:
handle_error(self, "Retiring zero division")
return self.val
desc = """
This category represents fraction of slots utilized by
useful work i.e. issued uops that eventually get retired.
Ideally; all pipeline slots would be attributed to the
Retiring category. Retiring of 100% would indicate the
maximum Pipeline_Width throughput was achieved. Maximizing
Retiring typically increases the Instructions-per-cycle (see
IPC metric). Note that a high Retiring value does not
necessary mean there is no room for more performance. For
example; Heavy-operations or Microcode Assists are
categorized under Retiring. They often indicate suboptimal
performance and can often be optimized or avoided. . A high
Retiring value for non-vectorized code may be a good hint
for programmer to consider vectorizing his code. Doing so
essentially lets more computations be done without
significantly increasing number of instructions thus
improving the performance."""
class Light_Operations:
name = "Light_Operations"
domain = "Slots"
area = "RET"
level = 2
htoff = False
sample = ['INST_RETIRED.PREC_DIST']
errcount = 0
sibling = None
metricgroup = frozenset(['Retire', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = max(0 , self.Retiring.compute(EV) - self.Heavy_Operations.compute(EV))
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Light_Operations zero division")
return self.val
desc = """
This metric represents fraction of slots where the CPU was
retiring light-weight operations -- instructions that
require no more than one uop (micro-operation). This
correlates with total number of instructions used by the
program. A uops-per-instruction (see UopPI metric) ratio of
1 or less should be expected for decently optimized code
running on Intel Core/Xeon products. While this often
indicates efficient X86 instructions were executed; high
value does not necessarily mean better performance cannot be
achieved. Note this may undercount due to approximation
using indirect events. Focus on techniques that reduce
instruction count or result in more efficient instructions
generation such as vectorization."""
class FP_Arith:
name = "FP_Arith"
domain = "Uops"
area = "RET"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['HPC'])
maxval = None
def compute(self, EV):
try:
self.val = self.X87_Use.compute(EV) + self.FP_Scalar.compute(EV) + self.FP_Vector.compute(EV)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "FP_Arith zero division")
return self.val
desc = """
This metric represents overall arithmetic floating-point
(FP) operations fraction the CPU has executed (retired).
Note this metric's value may exceed its parent due to use of
\"Uops\" CountDomain and FMA double-counting."""
class X87_Use:
name = "X87_Use"
domain = "Uops"
area = "RET"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Compute'])
maxval = None
def compute(self, EV):
try:
self.val = self.Retiring.compute(EV) * EV("UOPS_EXECUTED.X87", 4) / EV("UOPS_EXECUTED.THREAD", 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "X87_Use zero division")
return self.val
desc = """
This metric serves as an approximation of legacy x87 usage.
It accounts for instructions beyond X87 FP arithmetic
operations; hence may be used as a thermometer to avoid X87
high usage and preferably upgrade to modern ISA. See Tip
under Tuning Hint.. Tip: consider compiler flags to generate
newer AVX (or SSE) instruction sets; which typically perform
better and feature vectors."""
class FP_Scalar:
name = "FP_Scalar"
domain = "Uops"
area = "RET"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Compute', 'Flops'])
maxval = None
def compute(self, EV):
try:
self.val = FP_Arith_Scalar(self, EV, 4) / Retired_Slots(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "FP_Scalar zero division")
return self.val
desc = """
This metric approximates arithmetic floating-point (FP)
scalar uops fraction the CPU has retired. May overcount due
to FMA double counting.. Investigate what limits (compiler)
generation of vector code."""
class FP_Vector:
name = "FP_Vector"
domain = "Uops"
area = "RET"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Compute', 'Flops'])
maxval = 1.0
def compute(self, EV):
try:
self.val = FP_Arith_Vector(self, EV, 4) / Retired_Slots(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "FP_Vector zero division")
return self.val
desc = """
This metric approximates arithmetic floating-point (FP)
vector uops fraction the CPU has retired aggregated across
all vector widths. May overcount due to FMA double
counting.. Check if vector width is expected"""
class FP_Vector_128b:
name = "FP_Vector_128b"
domain = "Uops"
area = "RET"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Compute', 'Flops'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (EV("FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", 5) + EV("FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE", 5)) / Retired_Slots(self, EV, 5)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "FP_Vector_128b zero division")
return self.val
desc = """
This metric approximates arithmetic FP vector uops fraction
the CPU has retired for 128-bit wide vectors. May overcount
due to FMA double counting.. Try to exploit wider vector
length"""
class FP_Vector_256b:
name = "FP_Vector_256b"
domain = "Uops"
area = "RET"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Compute', 'Flops'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (EV("FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE", 5) + EV("FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE", 5)) / Retired_Slots(self, EV, 5)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "FP_Vector_256b zero division")
return self.val
desc = """
This metric approximates arithmetic FP vector uops fraction
the CPU has retired for 256-bit wide vectors. May overcount
due to FMA double counting.. Try to exploit wider vector
length"""
class FP_Vector_512b:
name = "FP_Vector_512b"
domain = "Uops"
area = "RET"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Compute', 'Flops'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (EV("FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE", 5) + EV("FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE", 5)) / Retired_Slots(self, EV, 5)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "FP_Vector_512b zero division")
return self.val
desc = """
This metric approximates arithmetic FP vector uops fraction
the CPU has retired for 512-bit wide vectors. May overcount
due to FMA double counting."""
class Memory_Operations:
name = "Memory_Operations"
domain = "Slots"
area = "RET"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Pipeline'])
maxval = None
def compute(self, EV):
try:
self.val = self.Light_Operations.compute(EV) * EV("MEM_INST_RETIRED.ANY", 3) / EV("INST_RETIRED.ANY", 3)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Memory_Operations zero division")
return self.val
desc = """
This metric represents fraction of slots where the CPU was
retiring memory operations -- uops for memory load or store
accesses."""
class Branch_Instructions:
name = "Branch_Instructions"
domain = "Slots"
area = "RET"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Branches', 'BvBO', 'Pipeline'])
maxval = None
def compute(self, EV):
try:
self.val = self.Light_Operations.compute(EV) * EV("BR_INST_RETIRED.ALL_BRANCHES", 3) / Retired_Slots(self, EV, 3)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Branch_Instructions zero division")
return self.val
desc = """
This metric represents fraction of slots where the CPU was
retiring branch instructions."""
class Other_Light_Ops:
name = "Other_Light_Ops"
domain = "Slots"
area = "RET"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Pipeline'])
maxval = None
def compute(self, EV):
try:
self.val = max(0 , self.Light_Operations.compute(EV) - Light_Ops_Sum(self, EV, 3))
self.thresh = (self.val > 0.3) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Other_Light_Ops zero division")
return self.val
desc = """
This metric represents the remaining light uops fraction the
CPU has executed - remaining means not covered by other
sibling nodes. May undercount due to FMA double counting"""
class Nop_Instructions:
name = "Nop_Instructions"
domain = "Slots"
area = "RET"
level = 4
htoff = False
sample = ['INST_RETIRED.NOP']
errcount = 0
sibling = None
metricgroup = frozenset(['BvBO', 'Pipeline'])
maxval = None
def compute(self, EV):
try:
self.val = self.Light_Operations.compute(EV) * EV("INST_RETIRED.NOP", 4) / Retired_Slots(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Nop_Instructions zero division")
return self.val
desc = """
This metric represents fraction of slots where the CPU was
retiring NOP (no op) instructions. Compilers often use NOPs
for certain address alignments - e.g. start address of a
function or loop body.. Improve Codegen by correctly placing
NOPs outside hot sections (e.g. outside loop body)."""
class Heavy_Operations:
name = "Heavy_Operations"
domain = "Slots"
area = "RET"
level = 2
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Retire', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = self.Microcode_Sequencer.compute(EV) + self.Retiring.compute(EV) * (EV("UOPS_DECODED.DEC0", 2) - EV("UOPS_DECODED.DEC0:c1", 2)) / EV("IDQ.MITE_UOPS", 2)
self.thresh = (self.val > 0.1)
except ZeroDivisionError:
handle_error(self, "Heavy_Operations zero division")
return self.val
desc = """
This metric represents fraction of slots where the CPU was
retiring heavy-weight operations -- instructions that
require two or more uops or micro-coded sequences. This
highly-correlates with the uop length of these
instructions/sequences. Note this may overcount due to
approximation using indirect events"""
class Few_Uops_Instructions:
name = "Few_Uops_Instructions"
domain = "Slots"
area = "RET"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = self.Heavy_Operations.compute(EV) - self.Microcode_Sequencer.compute(EV)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Few_Uops_Instructions zero division")
return self.val
desc = """
This metric represents fraction of slots where the CPU was
retiring instructions that that are decoder into two or up
to four uops. This highly-correlates with the number of
uops in such instructions."""
class Microcode_Sequencer:
name = "Microcode_Sequencer"
domain = "Slots"
area = "RET"
level = 3
htoff = False
sample = ['IDQ.MS_UOPS']
errcount = 0
sibling = None
metricgroup = frozenset(['MicroSeq'])
maxval = None
def compute(self, EV):
try:
self.val = Retire_Fraction(self, EV, 3) * EV("IDQ.MS_UOPS", 3) / SLOTS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Microcode_Sequencer zero division")
return self.val
desc = """
This metric represents fraction of slots the CPU was
retiring uops fetched by the Microcode Sequencer (MS) unit.
The MS is used for CISC instructions not supported by the
default decoders (like repeat move strings; or CPUID); or by
microcode assists used to address some operation modes (like
in Floating Point assists). These cases can often be
avoided.."""
class Assists:
name = "Assists"
domain = "Slots_Estimated"
area = "RET"
level = 4
htoff = False
sample = ['ASSISTS.ANY']
errcount = 0
sibling = None
metricgroup = frozenset(['BvIO'])
maxval = 1.0
def compute(self, EV):
try:
self.val = Avg_Assist_Cost * EV("ASSISTS.ANY", 4) / SLOTS(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Assists zero division")
return self.val
desc = """
This metric estimates fraction of slots the CPU retired uops
delivered by the Microcode_Sequencer as a result of Assists.
Assists are long sequences of uops that are required in
certain corner-cases for operations that cannot be handled
natively by the execution pipeline. For example; when
working with very small floating point values (so-called
Denormals); the FP units are not set up to perform these
operations natively. Instead; a sequence of instructions to
perform the computation on the Denormals is injected into
the pipeline. Since these microcode sequences might be
dozens of uops long; Assists can be extremely deleterious to
performance and they can be avoided in many cases."""
class FP_Assists:
name = "FP_Assists"
domain = "Slots_Estimated"
area = "RET"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['HPC'])
maxval = None
def compute(self, EV):
try:
self.val = 34 * EV("ASSISTS.FP", 5) / SLOTS(self, EV, 5)
self.thresh = (self.val > 0.1)
except ZeroDivisionError:
handle_error(self, "FP_Assists zero division")
return self.val
desc = """
This metric roughly estimates fraction of slots the CPU
retired uops as a result of handing Floating Point (FP)
Assists. FP Assist may apply when working with very small
floating point values (so-called Denormals).. Consider DAZ
(Denormals Are Zero) and/or FTZ (Flush To Zero) options in
your compiler; \"-ffast-math\" with -O2 in GCC for example.
This option may improve performance if the denormal values
are not critical in your application. Also note that the DAZ
and FTZ modes are not compatible with the IEEE Standard
754.. https://www.intel.com/content/www/us/en/develop/docume
ntation/vtune-help/top/reference/cpu-metrics-reference/bad-
speculation-back-end-bound-pipeline-slots/fp-assists.html"""
class CISC:
name = "CISC"
domain = "Slots"
area = "RET"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = max(0 , self.Microcode_Sequencer.compute(EV) - self.Assists.compute(EV))
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "CISC zero division")
return self.val
desc = """
This metric estimates fraction of cycles the CPU retired
uops originated from CISC (complex instruction set computer)
instruction. A CISC instruction has multiple uops that are
required to perform the instruction's functionality as in
the case of read-modify-write as an example. Since these
instructions require multiple uops they may or may not imply
sub-optimal use of machine resources."""
class Metric_Mispredictions:
name = "Mispredictions"
domain = "Scaled_Slots"
maxval = 0
errcount = 0
area = "Info.Bottleneck"
metricgroup = frozenset(['Bad', 'BadSpec', 'BrMispredicts', 'BvMP'])
sibling = None
def compute(self, EV):
try:
self.val = Mispredictions(self, EV, 0)
self.thresh = (self.val > 20)
except ZeroDivisionError:
handle_error_metric(self, "Mispredictions zero division")
desc = """
Total pipeline cost of Branch Misprediction related
bottlenecks"""
class Metric_Big_Code:
name = "Big_Code"
domain = "Scaled_Slots"
maxval = 0
errcount = 0
area = "Info.Bottleneck"
metricgroup = frozenset(['BvBC', 'BigFootprint', 'Fed', 'Frontend', 'IcMiss', 'MemoryTLB'])
sibling = None
def compute(self, EV):
try:
self.val = Big_Code(self, EV, 0)
self.thresh = (self.val > 20)
except ZeroDivisionError:
handle_error_metric(self, "Big_Code zero division")
desc = """
Total pipeline cost of instruction fetch related bottlenecks
by large code footprint programs (i-side cache; TLB and BTB
misses)"""
class Metric_Instruction_Fetch_BW:
name = "Instruction_Fetch_BW"
domain = "Scaled_Slots"
maxval = 0
errcount = 0
area = "Info.Bottleneck"
metricgroup = frozenset(['BvFB', 'Fed', 'FetchBW', 'Frontend'])
sibling = None
def compute(self, EV):
try:
self.val = Instruction_Fetch_BW(self, EV, 0)
self.thresh = (self.val > 20)
except ZeroDivisionError:
handle_error_metric(self, "Instruction_Fetch_BW zero division")
desc = """
Total pipeline cost of instruction fetch bandwidth related
bottlenecks (when the front-end could not sustain operations
delivery to the back-end)"""
class Metric_Cache_Memory_Bandwidth:
name = "Cache_Memory_Bandwidth"
domain = "Scaled_Slots"
maxval = 0
errcount = 0
area = "Info.Bottleneck"
metricgroup = frozenset(['BvMB', 'Mem', 'MemoryBW', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = Cache_Memory_Bandwidth(self, EV, 0)
self.thresh = (self.val > 20)
except ZeroDivisionError:
handle_error_metric(self, "Cache_Memory_Bandwidth zero division")
desc = """
Total pipeline cost of external Memory- or Cache-Bandwidth
related bottlenecks"""
class Metric_Cache_Memory_Latency:
name = "Cache_Memory_Latency"
domain = "Scaled_Slots"
maxval = 0
errcount = 0
area = "Info.Bottleneck"
metricgroup = frozenset(['BvML', 'Mem', 'MemoryLat', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = Cache_Memory_Latency(self, EV, 0)
self.thresh = (self.val > 20)
except ZeroDivisionError:
handle_error_metric(self, "Cache_Memory_Latency zero division")
desc = """
Total pipeline cost of external Memory- or Cache-Latency
related bottlenecks"""
class Metric_Memory_Data_TLBs:
name = "Memory_Data_TLBs"
domain = "Scaled_Slots"
maxval = 0
errcount = 0
area = "Info.Bottleneck"
metricgroup = frozenset(['BvMT', 'Mem', 'MemoryTLB', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = Memory_Data_TLBs(self, EV, 0)
self.thresh = (self.val > 20)
except ZeroDivisionError:
handle_error_metric(self, "Memory_Data_TLBs zero division")
desc = """
Total pipeline cost of Memory Address Translation related
bottlenecks (data-side TLBs)"""
class Metric_Memory_Synchronization:
name = "Memory_Synchronization"
domain = "Scaled_Slots"
maxval = 0
errcount = 0
area = "Info.Bottleneck"
metricgroup = frozenset(['BvMS', 'Mem', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = Memory_Synchronization(self, EV, 0)
self.thresh = (self.val > 10)
except ZeroDivisionError:
handle_error_metric(self, "Memory_Synchronization zero division")
desc = """
Total pipeline cost of Memory Synchronization related
bottlenecks (data transfers and coherency updates across
processors)"""
class Metric_Compute_Bound_Est:
name = "Compute_Bound_Est"
domain = "Scaled_Slots"
maxval = 0
errcount = 0
area = "Info.Bottleneck"
metricgroup = frozenset(['BvCB', 'Cor'])
sibling = None
def compute(self, EV):
try:
self.val = Compute_Bound_Est(self, EV, 0)
self.thresh = (self.val > 20)
except ZeroDivisionError:
handle_error_metric(self, "Compute_Bound_Est zero division")
desc = """
Total pipeline cost when the execution is compute-bound - an
estimation. Covers Core Bound when High ILP as well as when
long-latency execution units are busy."""
class Metric_Irregular_Overhead:
name = "Irregular_Overhead"
domain = "Scaled_Slots"
maxval = 0
errcount = 0
area = "Info.Bottleneck"
metricgroup = frozenset(['Bad', 'BvIO', 'Cor', 'Ret'])
sibling = None
def compute(self, EV):
try:
self.val = Irregular_Overhead(self, EV, 0)
self.thresh = (self.val > 10)
except ZeroDivisionError:
handle_error_metric(self, "Irregular_Overhead zero division")
desc = """
Total pipeline cost of irregular execution (e.g. FP-assists
in HPC, Wait time with work imbalance multithreaded
workloads, overhead in system services or virtualized
environments)"""
class Metric_Other_Bottlenecks:
name = "Other_Bottlenecks"
domain = "Scaled_Slots"
maxval = 0
errcount = 0
area = "Info.Bottleneck"
metricgroup = frozenset(['BvOB', 'Cor', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = Other_Bottlenecks(self, EV, 0)
self.thresh = (self.val > 20)
except ZeroDivisionError:
handle_error_metric(self, "Other_Bottlenecks zero division")
desc = """
Total pipeline cost of remaining bottlenecks in the back-
end. Examples include data-dependencies (Core Bound when Low
ILP) and other unlisted memory-related stalls."""
class Metric_Branching_Overhead:
name = "Branching_Overhead"
domain = "Scaled_Slots"
maxval = 0
errcount = 0
area = "Info.Bottleneck"
metricgroup = frozenset(['BvBO', 'Ret'])
sibling = None
def compute(self, EV):
try:
self.val = Branching_Overhead(self, EV, 0)
self.thresh = (self.val > 5)
except ZeroDivisionError:
handle_error_metric(self, "Branching_Overhead zero division")
desc = """
Total pipeline cost of instructions used for program
control-flow - a subset of the Retiring category in TMA.
Examples include function calls; loops and alignments. (A
lower bound). Consider Loop Unrolling or function inlining
optimizations"""
class Metric_Useful_Work:
name = "Useful_Work"
domain = "Scaled_Slots"
maxval = 0
errcount = 0
area = "Info.Bottleneck"
metricgroup = frozenset(['BvUW', 'Ret'])
sibling = None
def compute(self, EV):
try:
self.val = Useful_Work(self, EV, 0)
self.thresh = (self.val > 20)
except ZeroDivisionError:
handle_error_metric(self, "Useful_Work zero division")
desc = """
Total pipeline cost of \"useful operations\" - the portion
of Retiring category not covered by Branching_Overhead nor
Irregular_Overhead."""
class Metric_Core_Bound_Likely:
name = "Core_Bound_Likely"
domain = "Metric"
maxval = 1.0
errcount = 0
area = "Info.Botlnk.L0"
metricgroup = frozenset(['Cor', 'SMT'])
sibling = None
def compute(self, EV):
try:
self.val = Core_Bound_Likely(self, EV, 0)
self.thresh = (self.val > 0.5)
except ZeroDivisionError:
handle_error_metric(self, "Core_Bound_Likely zero division")
desc = """
Probability of Core Bound bottleneck hidden by SMT-profiling
artifacts. Tip: consider analysis with SMT disabled"""
class Metric_IPC:
name = "IPC"
domain = "Metric"
maxval = Pipeline_Width + 2
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['Ret', 'Summary'])
sibling = None
def compute(self, EV):
try:
self.val = IPC(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "IPC zero division")
desc = """
Instructions Per Cycle (per Logical Processor)"""
class Metric_UopPI:
name = "UopPI"
domain = "Metric"
maxval = 2.0
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['Pipeline', 'Ret', 'Retire'])
sibling = None
def compute(self, EV):
try:
self.val = UopPI(self, EV, 0)
self.thresh = (self.val > 1.05)
except ZeroDivisionError:
handle_error_metric(self, "UopPI zero division")
desc = """
Uops Per Instruction"""
class Metric_UpTB:
name = "UpTB"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['Branches', 'Fed', 'FetchBW'])
sibling = None
def compute(self, EV):
try:
self.val = UpTB(self, EV, 0)
self.thresh = self.val < Pipeline_Width * 1.5
except ZeroDivisionError:
handle_error_metric(self, "UpTB zero division")
desc = """
Uops per taken branch"""
class Metric_CPI:
name = "CPI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['Pipeline', 'Mem'])
sibling = None
def compute(self, EV):
try:
self.val = CPI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "CPI zero division")
desc = """
Cycles Per Instruction (per Logical Processor)"""
class Metric_CLKS:
name = "CLKS"
domain = "Count"
maxval = 0
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['Pipeline'])
sibling = None
def compute(self, EV):
try:
self.val = CLKS(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "CLKS zero division")
desc = """
Per-Logical Processor actual clocks when the Logical
Processor is active."""
class Metric_SLOTS:
name = "SLOTS"
domain = "Count"
maxval = 0
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['TmaL1'])
sibling = None
def compute(self, EV):
try:
self.val = SLOTS(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "SLOTS zero division")
desc = """
Total issue-pipeline slots (per-Physical Core till ICL; per-
Logical Processor ICL onward)"""
class Metric_Slots_Utilization:
name = "Slots_Utilization"
domain = "Metric"
maxval = 1.0
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['SMT', 'TmaL1'])
sibling = None
def compute(self, EV):
try:
self.val = Slots_Utilization(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Slots_Utilization zero division")
desc = """
Fraction of Physical Core issue-slots utilized by this
Logical Processor"""
class Metric_Execute_per_Issue:
name = "Execute_per_Issue"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['Cor', 'Pipeline'])
sibling = None
def compute(self, EV):
try:
self.val = Execute_per_Issue(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Execute_per_Issue zero division")
desc = """
The ratio of Executed- by Issued-Uops. Ratio > 1 suggests
high rate of uop micro-fusions. Ratio < 1 suggest high rate
of \"execute\" at rename stage."""
class Metric_CoreIPC:
name = "CoreIPC"
domain = "Core_Metric"
maxval = Pipeline_Width + 2
errcount = 0
area = "Info.Core"
metricgroup = frozenset(['Ret', 'SMT', 'TmaL1'])
sibling = None
def compute(self, EV):
try:
self.val = CoreIPC(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "CoreIPC zero division")
desc = """
Instructions Per Cycle across hyper-threads (per physical
core)"""
class Metric_FLOPc:
name = "FLOPc"
domain = "Core_Metric"
maxval = 10.0
errcount = 0
area = "Info.Core"
metricgroup = frozenset(['Ret', 'Flops'])
sibling = None
def compute(self, EV):
try:
self.val = FLOPc(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "FLOPc zero division")
desc = """
Floating Point Operations Per Cycle"""
class Metric_FP_Arith_Utilization:
name = "FP_Arith_Utilization"
domain = "Core_Metric"
maxval = 2.0
errcount = 0
area = "Info.Core"
metricgroup = frozenset(['Cor', 'Flops', 'HPC'])
sibling = None
def compute(self, EV):
try:
self.val = FP_Arith_Utilization(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "FP_Arith_Utilization zero division")
desc = """
Actual per-core usage of the Floating Point non-X87
execution units (regardless of precision or vector-width).
Values > 1 are possible due to Fused-Multiply Add FMA
counting - common."""
class Metric_ILP:
name = "ILP"
domain = "Metric"
maxval = Exe_Ports
errcount = 0
area = "Info.Core"
metricgroup = frozenset(['Backend', 'Cor', 'Pipeline', 'PortsUtil'])
sibling = None
def compute(self, EV):
try:
self.val = ILP(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "ILP zero division")
desc = """
Instruction-Level-Parallelism (average number of uops
executed when there is execution) per thread (logical-
processor)"""
class Metric_EPC:
name = "EPC"
domain = "Metric"
maxval = 20.0
errcount = 0
area = "Info.Core"
metricgroup = frozenset(['Power'])
sibling = None
def compute(self, EV):
try:
self.val = EPC(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "EPC zero division")
desc = """
uops Executed per Cycle"""
class Metric_CORE_CLKS:
name = "CORE_CLKS"
domain = "Count"
maxval = 0
errcount = 0
area = "Info.Core"
metricgroup = frozenset(['SMT'])
sibling = None
def compute(self, EV):
try:
self.val = CORE_CLKS(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "CORE_CLKS zero division")
desc = """
Core actual clocks when any Logical Processor is active on
the Physical Core"""
class Metric_IpLoad:
name = "IpLoad"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpLoad(self, EV, 0)
self.thresh = (self.val < 3)
except ZeroDivisionError:
handle_error_metric(self, "IpLoad zero division")
desc = """
Instructions per Load (lower number means higher occurrence
rate). Tip: reduce memory accesses."""
class Metric_IpStore:
name = "IpStore"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpStore(self, EV, 0)
self.thresh = (self.val < 8)
except ZeroDivisionError:
handle_error_metric(self, "IpStore zero division")
desc = """
Instructions per Store (lower number means higher occurrence
rate). Tip: reduce memory accesses."""
class Metric_IpBranch:
name = "IpBranch"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Branches', 'Fed', 'InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpBranch(self, EV, 0)
self.thresh = (self.val < 8)
except ZeroDivisionError:
handle_error_metric(self, "IpBranch zero division")
desc = """
Instructions per Branch (lower number means higher
occurrence rate)"""
class Metric_IpCall:
name = "IpCall"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Branches', 'Fed', 'PGO'])
sibling = None
def compute(self, EV):
try:
self.val = IpCall(self, EV, 0)
self.thresh = (self.val < 200)
except ZeroDivisionError:
handle_error_metric(self, "IpCall zero division")
desc = """
Instructions per (near) call (lower number means higher
occurrence rate)"""
class Metric_IpTB:
name = "IpTB"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Branches', 'Fed', 'FetchBW', 'Frontend', 'PGO'])
sibling = None
def compute(self, EV):
try:
self.val = IpTB(self, EV, 0)
self.thresh = self.val < Pipeline_Width * 2 + 1
except ZeroDivisionError:
handle_error_metric(self, "IpTB zero division")
desc = """
Instructions per taken branch"""
class Metric_BpTkBranch:
name = "BpTkBranch"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Branches', 'Fed', 'PGO'])
sibling = None
def compute(self, EV):
try:
self.val = BpTkBranch(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "BpTkBranch zero division")
desc = """
Branch instructions per taken branch. . Can be used to
approximate PGO-likelihood for non-loopy codes."""
class Metric_IpFLOP:
name = "IpFLOP"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Flops', 'InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpFLOP(self, EV, 0)
self.thresh = (self.val < 10)
except ZeroDivisionError:
handle_error_metric(self, "IpFLOP zero division")
desc = """
Instructions per Floating Point (FP) Operation (lower number
means higher occurrence rate). Reference: Tuning Performance
via Metrics with Expectations.
https://doi.org/10.1109/LCA.2019.2916408"""
class Metric_IpArith:
name = "IpArith"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Flops', 'InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpArith(self, EV, 0)
self.thresh = (self.val < 10)
except ZeroDivisionError:
handle_error_metric(self, "IpArith zero division")
desc = """
Instructions per FP Arithmetic instruction (lower number
means higher occurrence rate). Values < 1 are possible due
to intentional FMA double counting. Approximated prior to
BDW."""
class Metric_IpArith_Scalar_SP:
name = "IpArith_Scalar_SP"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Flops', 'FpScalar', 'InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpArith_Scalar_SP(self, EV, 0)
self.thresh = (self.val < 10)
except ZeroDivisionError:
handle_error_metric(self, "IpArith_Scalar_SP zero division")
desc = """
Instructions per FP Arithmetic Scalar Single-Precision
instruction (lower number means higher occurrence rate).
Values < 1 are possible due to intentional FMA double
counting."""
class Metric_IpArith_Scalar_DP:
name = "IpArith_Scalar_DP"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Flops', 'FpScalar', 'InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpArith_Scalar_DP(self, EV, 0)
self.thresh = (self.val < 10)
except ZeroDivisionError:
handle_error_metric(self, "IpArith_Scalar_DP zero division")
desc = """
Instructions per FP Arithmetic Scalar Double-Precision
instruction (lower number means higher occurrence rate).
Values < 1 are possible due to intentional FMA double
counting."""
class Metric_IpArith_AVX128:
name = "IpArith_AVX128"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Flops', 'FpVector', 'InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpArith_AVX128(self, EV, 0)
self.thresh = (self.val < 10)
except ZeroDivisionError:
handle_error_metric(self, "IpArith_AVX128 zero division")
desc = """
Instructions per FP Arithmetic AVX/SSE 128-bit instruction
(lower number means higher occurrence rate). Values < 1 are
possible due to intentional FMA double counting."""
class Metric_IpArith_AVX256:
name = "IpArith_AVX256"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Flops', 'FpVector', 'InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpArith_AVX256(self, EV, 0)
self.thresh = (self.val < 10)
except ZeroDivisionError:
handle_error_metric(self, "IpArith_AVX256 zero division")
desc = """
Instructions per FP Arithmetic AVX* 256-bit instruction
(lower number means higher occurrence rate). Values < 1 are
possible due to intentional FMA double counting."""
class Metric_IpArith_AVX512:
name = "IpArith_AVX512"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Flops', 'FpVector', 'InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpArith_AVX512(self, EV, 0)
self.thresh = (self.val < 10)
except ZeroDivisionError:
handle_error_metric(self, "IpArith_AVX512 zero division")
desc = """
Instructions per FP Arithmetic AVX 512-bit instruction
(lower number means higher occurrence rate). Values < 1 are
possible due to intentional FMA double counting."""
class Metric_IpPause:
name = "IpPause"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Flops', 'FpVector', 'InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpPause(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "IpPause zero division")
desc = """
Instructions per PAUSE (lower number means higher occurrence
rate)"""
class Metric_IpSWPF:
name = "IpSWPF"
domain = "Inst_Metric"
maxval = 1000
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Prefetches'])
sibling = None
def compute(self, EV):
try:
self.val = IpSWPF(self, EV, 0)
self.thresh = (self.val < 100)
except ZeroDivisionError:
handle_error_metric(self, "IpSWPF zero division")
desc = """
Instructions per Software prefetch instruction (of any type:
NTA/T0/T1/T2/Prefetch) (lower number means higher occurrence
rate)"""
class Metric_Instructions:
name = "Instructions"
domain = "Count"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Summary', 'TmaL1'])
sibling = None
def compute(self, EV):
try:
self.val = Instructions(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Instructions zero division")
desc = """
Total number of retired Instructions"""
class Metric_Retire:
name = "Retire"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Pipeline"
metricgroup = frozenset(['Pipeline', 'Ret'])
sibling = None
def compute(self, EV):
try:
self.val = Retire(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Retire zero division")
desc = """
Average number of Uops retired in cycles where at least one
uop has retired."""
class Metric_IpAssist:
name = "IpAssist"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Pipeline"
metricgroup = frozenset(['MicroSeq', 'Pipeline', 'Ret', 'Retire'])
sibling = None
def compute(self, EV):
try:
self.val = IpAssist(self, EV, 0)
self.thresh = (self.val < 100000)
except ZeroDivisionError:
handle_error_metric(self, "IpAssist zero division")
desc = """
Instructions per a microcode Assist invocation. See Assists
tree node for details (lower number means higher occurrence
rate)"""
class Metric_Execute:
name = "Execute"
domain = "Metric"
maxval = Exe_Ports
errcount = 0
area = "Info.Pipeline"
metricgroup = frozenset(['Cor', 'Pipeline', 'PortsUtil', 'SMT'])
sibling = None
def compute(self, EV):
try:
self.val = Execute(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Execute zero division")
desc = """
"""
class Metric_Fetch_LSD:
name = "Fetch_LSD"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Pipeline"
metricgroup = frozenset(['Fed', 'FetchBW'])
sibling = None
def compute(self, EV):
try:
self.val = Fetch_LSD(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Fetch_LSD zero division")
desc = """
Average number of uops fetched from LSD per cycle"""
class Metric_Fetch_DSB:
name = "Fetch_DSB"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Pipeline"
metricgroup = frozenset(['Fed', 'FetchBW'])
sibling = None
def compute(self, EV):
try:
self.val = Fetch_DSB(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Fetch_DSB zero division")
desc = """
Average number of uops fetched from DSB per cycle"""
class Metric_Fetch_MITE:
name = "Fetch_MITE"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Pipeline"
metricgroup = frozenset(['Fed', 'FetchBW'])
sibling = None
def compute(self, EV):
try:
self.val = Fetch_MITE(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Fetch_MITE zero division")
desc = """
Average number of uops fetched from MITE per cycle"""
class Metric_Fetch_UpC:
name = "Fetch_UpC"
domain = "Metric"
maxval = 6.0
errcount = 0
area = "Info.Frontend"
metricgroup = frozenset(['Fed', 'FetchBW'])
sibling = None
def compute(self, EV):
try:
self.val = Fetch_UpC(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Fetch_UpC zero division")
desc = """
Average number of Uops issued by front-end when it issued
something"""
class Metric_LSD_Coverage:
name = "LSD_Coverage"
domain = "Metric"
maxval = 1.0
errcount = 0
area = "Info.Frontend"
metricgroup = frozenset(['Fed', 'LSD'])
sibling = None
def compute(self, EV):
try:
self.val = LSD_Coverage(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "LSD_Coverage zero division")
desc = """
Fraction of Uops delivered by the LSD (Loop Stream Detector;
aka Loop Cache)"""
class Metric_DSB_Coverage:
name = "DSB_Coverage"
domain = "Metric"
maxval = 1.0
errcount = 0
area = "Info.Frontend"
metricgroup = frozenset(['DSB', 'Fed', 'FetchBW'])
sibling = None
def compute(self, EV):
try:
self.val = DSB_Coverage(self, EV, 0)
self.thresh = (self.val < 0.7) and HighIPC(self, EV, 1)
except ZeroDivisionError:
handle_error_metric(self, "DSB_Coverage zero division")
desc = """
Fraction of Uops delivered by the DSB (aka Decoded ICache;
or Uop Cache). See section 'Decoded ICache' in Optimization
Manual. http://www.intel.com/content/www/us/en/architecture-
and-technology/64-ia-32-architectures-optimization-
manual.html"""
class Metric_DSB_Switch_Cost:
name = "DSB_Switch_Cost"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Frontend"
metricgroup = frozenset(['DSBmiss'])
sibling = None
def compute(self, EV):
try:
self.val = DSB_Switch_Cost(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "DSB_Switch_Cost zero division")
desc = """
Average number of cycles of a switch from the DSB fetch-unit
to MITE fetch unit - see DSB_Switches tree node for details."""
class Metric_DSB_Misses:
name = "DSB_Misses"
domain = "Scaled_Slots"
maxval = 0
errcount = 0
area = "Info.Botlnk.L2"
metricgroup = frozenset(['DSBmiss', 'Fed'])
sibling = None
def compute(self, EV):
try:
self.val = DSB_Misses(self, EV, 0)
self.thresh = (self.val > 10)
except ZeroDivisionError:
handle_error_metric(self, "DSB_Misses zero division")
desc = """
Total pipeline cost of DSB (uop cache) misses - subset of
the Instruction_Fetch_BW Bottleneck."""
class Metric_DSB_Bandwidth:
name = "DSB_Bandwidth"
domain = "Scaled_Slots"
maxval = 0
errcount = 0
area = "Info.Botlnk.L2"
metricgroup = frozenset(['DSB', 'FetchBW'])
sibling = None
def compute(self, EV):
try:
self.val = DSB_Bandwidth(self, EV, 0)
self.thresh = (self.val > 10)
except ZeroDivisionError:
handle_error_metric(self, "DSB_Bandwidth zero division")
desc = """
Total pipeline cost of DSB (uop cache) hits - subset of the
Instruction_Fetch_BW Bottleneck."""
class Metric_ICache_Miss_Latency:
name = "ICache_Miss_Latency"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Frontend"
metricgroup = frozenset(['Fed', 'FetchLat', 'IcMiss'])
sibling = None
def compute(self, EV):
try:
self.val = ICache_Miss_Latency(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "ICache_Miss_Latency zero division")
desc = """
Average Latency for L1 instruction cache misses"""
class Metric_IC_Misses:
name = "IC_Misses"
domain = "Scaled_Slots"
maxval = 0
errcount = 0
area = "Info.Botlnk.L2"
metricgroup = frozenset(['Fed', 'FetchLat', 'IcMiss'])
sibling = None
def compute(self, EV):
try:
self.val = IC_Misses(self, EV, 0)
self.thresh = (self.val > 5)
except ZeroDivisionError:
handle_error_metric(self, "IC_Misses zero division")
desc = """
Total pipeline cost of Instruction Cache misses - subset of
the Big_Code Bottleneck."""
class Metric_IpDSB_Miss_Ret:
name = "IpDSB_Miss_Ret"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Frontend"
metricgroup = frozenset(['DSBmiss', 'Fed'])
sibling = None
def compute(self, EV):
try:
self.val = IpDSB_Miss_Ret(self, EV, 0)
self.thresh = (self.val < 50)
except ZeroDivisionError:
handle_error_metric(self, "IpDSB_Miss_Ret zero division")
desc = """
Instructions per non-speculative DSB miss (lower number
means higher occurrence rate)"""
class Metric_IpUnknown_Branch:
name = "IpUnknown_Branch"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Frontend"
metricgroup = frozenset(['Fed'])
sibling = None
def compute(self, EV):
try:
self.val = IpUnknown_Branch(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "IpUnknown_Branch zero division")
desc = """
Instructions per speculative Unknown Branch Misprediction
(BAClear) (lower number means higher occurrence rate)"""
class Metric_L2MPKI_Code:
name = "L2MPKI_Code"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Frontend"
metricgroup = frozenset(['IcMiss'])
sibling = None
def compute(self, EV):
try:
self.val = L2MPKI_Code(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2MPKI_Code zero division")
desc = """
L2 cache true code cacheline misses per kilo instruction"""
class Metric_L2MPKI_Code_All:
name = "L2MPKI_Code_All"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Frontend"
metricgroup = frozenset(['IcMiss'])
sibling = None
def compute(self, EV):
try:
self.val = L2MPKI_Code_All(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2MPKI_Code_All zero division")
desc = """
L2 cache speculative code cacheline misses per kilo
instruction"""
class Metric_IpMispredict:
name = "IpMispredict"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Bad_Spec"
metricgroup = frozenset(['Bad', 'BadSpec', 'BrMispredicts'])
sibling = None
def compute(self, EV):
try:
self.val = IpMispredict(self, EV, 0)
self.thresh = (self.val < 200)
except ZeroDivisionError:
handle_error_metric(self, "IpMispredict zero division")
desc = """
Number of Instructions per non-speculative Branch
Misprediction (JEClear) (lower number means higher
occurrence rate)"""
class Metric_IpMisp_Cond_Ntaken:
name = "IpMisp_Cond_Ntaken"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Bad_Spec"
metricgroup = frozenset(['Bad', 'BrMispredicts'])
sibling = None
def compute(self, EV):
try:
self.val = IpMisp_Cond_Ntaken(self, EV, 0)
self.thresh = (self.val < 200)
except ZeroDivisionError:
handle_error_metric(self, "IpMisp_Cond_Ntaken zero division")
desc = """
Instructions per retired Mispredicts for conditional non-
taken branches (lower number means higher occurrence rate)."""
class Metric_IpMisp_Cond_Taken:
name = "IpMisp_Cond_Taken"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Bad_Spec"
metricgroup = frozenset(['Bad', 'BrMispredicts'])
sibling = None
def compute(self, EV):
try:
self.val = IpMisp_Cond_Taken(self, EV, 0)
self.thresh = (self.val < 200)
except ZeroDivisionError:
handle_error_metric(self, "IpMisp_Cond_Taken zero division")
desc = """
Instructions per retired Mispredicts for conditional taken
branches (lower number means higher occurrence rate)."""
class Metric_IpMisp_Ret:
name = "IpMisp_Ret"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Bad_Spec"
metricgroup = frozenset(['Bad', 'BrMispredicts'])
sibling = None
def compute(self, EV):
try:
self.val = IpMisp_Ret(self, EV, 0)
self.thresh = (self.val < 500)
except ZeroDivisionError:
handle_error_metric(self, "IpMisp_Ret zero division")
desc = """
Instructions per retired Mispredicts for return branches
(lower number means higher occurrence rate)."""
class Metric_IpMisp_Indirect:
name = "IpMisp_Indirect"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Bad_Spec"
metricgroup = frozenset(['Bad', 'BrMispredicts'])
sibling = None
def compute(self, EV):
try:
self.val = IpMisp_Indirect(self, EV, 0)
self.thresh = (self.val < 1000)
except ZeroDivisionError:
handle_error_metric(self, "IpMisp_Indirect zero division")
desc = """
Instructions per retired Mispredicts for indirect CALL or
JMP branches (lower number means higher occurrence rate)."""
class Metric_Branch_Misprediction_Cost:
name = "Branch_Misprediction_Cost"
domain = "Core_Metric"
maxval = 300
errcount = 0
area = "Info.Bad_Spec"
metricgroup = frozenset(['Bad', 'BrMispredicts'])
sibling = None
def compute(self, EV):
try:
self.val = Branch_Misprediction_Cost(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Branch_Misprediction_Cost zero division")
desc = """
Branch Misprediction Cost: Fraction of TMA slots wasted per
non-speculative branch misprediction (retired JEClear)"""
class Metric_Spec_Clears_Ratio:
name = "Spec_Clears_Ratio"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Bad_Spec"
metricgroup = frozenset(['BrMispredicts'])
sibling = None
def compute(self, EV):
try:
self.val = Spec_Clears_Ratio(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Spec_Clears_Ratio zero division")
desc = """
Speculative to Retired ratio of all clears (covering
Mispredicts and nukes)"""
class Metric_Cond_NT:
name = "Cond_NT"
domain = "Fraction"
maxval = 1.0
errcount = 0
area = "Info.Branches"
metricgroup = frozenset(['Bad', 'Branches', 'CodeGen', 'PGO'])
sibling = None
def compute(self, EV):
try:
self.val = Cond_NT(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Cond_NT zero division")
desc = """
Fraction of branches that are non-taken conditionals"""
class Metric_Cond_TK:
name = "Cond_TK"
domain = "Fraction"
maxval = 1.0
errcount = 0
area = "Info.Branches"
metricgroup = frozenset(['Bad', 'Branches', 'CodeGen', 'PGO'])
sibling = None
def compute(self, EV):
try:
self.val = Cond_TK(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Cond_TK zero division")
desc = """
Fraction of branches that are taken conditionals"""
class Metric_CallRet:
name = "CallRet"
domain = "Fraction"
maxval = 1.0
errcount = 0
area = "Info.Branches"
metricgroup = frozenset(['Bad', 'Branches'])
sibling = None
def compute(self, EV):
try:
self.val = CallRet(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "CallRet zero division")
desc = """
Fraction of branches that are CALL or RET"""
class Metric_Jump:
name = "Jump"
domain = "Fraction"
maxval = 1.0
errcount = 0
area = "Info.Branches"
metricgroup = frozenset(['Bad', 'Branches'])
sibling = None
def compute(self, EV):
try:
self.val = Jump(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Jump zero division")
desc = """
Fraction of branches that are unconditional (direct or
indirect) jumps"""
class Metric_Other_Branches:
name = "Other_Branches"
domain = "Fraction"
maxval = 1.0
errcount = 0
area = "Info.Branches"
metricgroup = frozenset(['Bad', 'Branches'])
sibling = None
def compute(self, EV):
try:
self.val = Other_Branches(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Other_Branches zero division")
desc = """
Fraction of branches of other types (not individually
covered by other metrics in Info.Branches group)"""
class Metric_Load_Miss_Real_Latency:
name = "Load_Miss_Real_Latency"
domain = "Clocks_Latency"
maxval = 1000
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['Mem', 'MemoryBound', 'MemoryLat'])
sibling = None
def compute(self, EV):
try:
self.val = Load_Miss_Real_Latency(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Load_Miss_Real_Latency zero division")
desc = """
Actual Average Latency for L1 data-cache miss demand load
operations (in core cycles)"""
class Metric_MLP:
name = "MLP"
domain = "Metric"
maxval = 10.0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['Mem', 'MemoryBound', 'MemoryBW'])
sibling = None
def compute(self, EV):
try:
self.val = MLP(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "MLP zero division")
desc = """
Memory-Level-Parallelism (average number of L1 miss demand
load when there is at least one such miss. Per-Logical
Processor)"""
class Metric_L1MPKI:
name = "L1MPKI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['CacheHits', 'Mem'])
sibling = None
def compute(self, EV):
try:
self.val = L1MPKI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L1MPKI zero division")
desc = """
L1 cache true misses per kilo instruction for retired demand
loads"""
class Metric_L1MPKI_Load:
name = "L1MPKI_Load"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['CacheHits', 'Mem'])
sibling = None
def compute(self, EV):
try:
self.val = L1MPKI_Load(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L1MPKI_Load zero division")
desc = """
L1 cache true misses per kilo instruction for all demand
loads (including speculative)"""
class Metric_L2MPKI:
name = "L2MPKI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['Mem', 'Backend', 'CacheHits'])
sibling = None
def compute(self, EV):
try:
self.val = L2MPKI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2MPKI zero division")
desc = """
L2 cache true misses per kilo instruction for retired demand
loads"""
class Metric_L2MPKI_All:
name = "L2MPKI_All"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['CacheHits', 'Mem', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = L2MPKI_All(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2MPKI_All zero division")
desc = """
L2 cache misses per kilo instruction for all request types
(including speculative)"""
class Metric_L2MPKI_Load:
name = "L2MPKI_Load"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['CacheHits', 'Mem'])
sibling = None
def compute(self, EV):
try:
self.val = L2MPKI_Load(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2MPKI_Load zero division")
desc = """
L2 cache misses per kilo instruction for all demand loads
(including speculative)"""
class Metric_L2MPKI_RFO:
name = "L2MPKI_RFO"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['CacheMisses', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = L2MPKI_RFO(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2MPKI_RFO zero division")
desc = """
Offcore requests (L2 cache miss) per kilo instruction for
demand RFOs"""
class Metric_L2HPKI_Load:
name = "L2HPKI_Load"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['CacheHits', 'Mem'])
sibling = None
def compute(self, EV):
try:
self.val = L2HPKI_Load(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2HPKI_Load zero division")
desc = """
L2 cache hits per kilo instruction for all demand loads
(including speculative)"""
class Metric_L3MPKI:
name = "L3MPKI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['Mem'])
sibling = None
def compute(self, EV):
try:
self.val = L3MPKI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L3MPKI zero division")
desc = """
L3 cache true misses per kilo instruction for retired demand
loads"""
class Metric_FB_HPKI:
name = "FB_HPKI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['CacheHits', 'Mem'])
sibling = None
def compute(self, EV):
try:
self.val = FB_HPKI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "FB_HPKI zero division")
desc = """
Fill Buffer (FB) hits per kilo instructions for retired
demand loads (L1D misses that merge into ongoing miss-
handling entries)"""
class Metric_L1D_Cache_Fill_BW:
name = "L1D_Cache_Fill_BW"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['Mem', 'MemoryBW'])
sibling = None
def compute(self, EV):
try:
self.val = L1D_Cache_Fill_BW(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L1D_Cache_Fill_BW zero division")
desc = """
"""
class Metric_L2_Cache_Fill_BW:
name = "L2_Cache_Fill_BW"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['Mem', 'MemoryBW'])
sibling = None
def compute(self, EV):
try:
self.val = L2_Cache_Fill_BW(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2_Cache_Fill_BW zero division")
desc = """
"""
class Metric_L3_Cache_Fill_BW:
name = "L3_Cache_Fill_BW"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['Mem', 'MemoryBW'])
sibling = None
def compute(self, EV):
try:
self.val = L3_Cache_Fill_BW(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L3_Cache_Fill_BW zero division")
desc = """
"""
class Metric_L3_Cache_Access_BW:
name = "L3_Cache_Access_BW"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['Mem', 'MemoryBW', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = L3_Cache_Access_BW(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L3_Cache_Access_BW zero division")
desc = """
"""
class Metric_Page_Walks_Utilization:
name = "Page_Walks_Utilization"
domain = "Core_Metric"
maxval = 1.0
errcount = 0
area = "Info.Memory.TLB"
metricgroup = frozenset(['Mem', 'MemoryTLB'])
sibling = None
def compute(self, EV):
try:
self.val = Page_Walks_Utilization(self, EV, 0)
self.thresh = (self.val > 0.5)
except ZeroDivisionError:
handle_error_metric(self, "Page_Walks_Utilization zero division")
desc = """
Utilization of the core's Page Walker(s) serving STLB misses
triggered by instruction/Load/Store accesses"""
class Metric_Code_STLB_MPKI:
name = "Code_STLB_MPKI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory.TLB"
metricgroup = frozenset(['Fed', 'MemoryTLB'])
sibling = None
def compute(self, EV):
try:
self.val = Code_STLB_MPKI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Code_STLB_MPKI zero division")
desc = """
STLB (2nd level TLB) code speculative misses per kilo
instruction (misses of any page-size that complete the page
walk)"""
class Metric_Load_STLB_MPKI:
name = "Load_STLB_MPKI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory.TLB"
metricgroup = frozenset(['Mem', 'MemoryTLB'])
sibling = None
def compute(self, EV):
try:
self.val = Load_STLB_MPKI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Load_STLB_MPKI zero division")
desc = """
STLB (2nd level TLB) data load speculative misses per kilo
instruction (misses of any page-size that complete the page
walk)"""
class Metric_Store_STLB_MPKI:
name = "Store_STLB_MPKI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory.TLB"
metricgroup = frozenset(['Mem', 'MemoryTLB'])
sibling = None
def compute(self, EV):
try:
self.val = Store_STLB_MPKI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Store_STLB_MPKI zero division")
desc = """
STLB (2nd level TLB) data store speculative misses per kilo
instruction (misses of any page-size that complete the page
walk)"""
class Metric_L1D_Cache_Fill_BW_2T:
name = "L1D_Cache_Fill_BW_2T"
domain = "Core_Metric"
maxval = 0
errcount = 0
area = "Info.Memory.Core"
metricgroup = frozenset(['Mem', 'MemoryBW'])
sibling = None
def compute(self, EV):
try:
self.val = L1D_Cache_Fill_BW_2T(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L1D_Cache_Fill_BW_2T zero division")
desc = """
Average per-core data fill bandwidth to the L1 data cache
[GB / sec]"""
class Metric_L2_Cache_Fill_BW_2T:
name = "L2_Cache_Fill_BW_2T"
domain = "Core_Metric"
maxval = 0
errcount = 0
area = "Info.Memory.Core"
metricgroup = frozenset(['Mem', 'MemoryBW'])
sibling = None
def compute(self, EV):
try:
self.val = L2_Cache_Fill_BW_2T(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2_Cache_Fill_BW_2T zero division")
desc = """
Average per-core data fill bandwidth to the L2 cache [GB /
sec]"""
class Metric_L3_Cache_Fill_BW_2T:
name = "L3_Cache_Fill_BW_2T"
domain = "Core_Metric"
maxval = 0
errcount = 0
area = "Info.Memory.Core"
metricgroup = frozenset(['Mem', 'MemoryBW'])
sibling = None
def compute(self, EV):
try:
self.val = L3_Cache_Fill_BW_2T(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L3_Cache_Fill_BW_2T zero division")
desc = """
Average per-core data fill bandwidth to the L3 cache [GB /
sec]"""
class Metric_L3_Cache_Access_BW_2T:
name = "L3_Cache_Access_BW_2T"
domain = "Core_Metric"
maxval = 0
errcount = 0
area = "Info.Memory.Core"
metricgroup = frozenset(['Mem', 'MemoryBW', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = L3_Cache_Access_BW_2T(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L3_Cache_Access_BW_2T zero division")
desc = """
Average per-core data access bandwidth to the L3 cache [GB /
sec]"""
class Metric_Load_L2_Miss_Latency:
name = "Load_L2_Miss_Latency"
domain = "Clocks_Latency"
maxval = 1000
errcount = 0
area = "Info.Memory.Latency"
metricgroup = frozenset(['Memory_Lat', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = Load_L2_Miss_Latency(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Load_L2_Miss_Latency zero division")
desc = """
Average Latency for L2 cache miss demand Loads"""
class Metric_Load_L3_Miss_Latency:
name = "Load_L3_Miss_Latency"
domain = "Clocks_Latency"
maxval = 1000
errcount = 0
area = "Info.Memory.Latency"
metricgroup = frozenset(['Memory_Lat', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = Load_L3_Miss_Latency(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Load_L3_Miss_Latency zero division")
desc = """
Average Latency for L3 cache miss demand Loads"""
class Metric_Load_L2_MLP:
name = "Load_L2_MLP"
domain = "Metric"
maxval = 100
errcount = 0
area = "Info.Memory.Latency"
metricgroup = frozenset(['Memory_BW', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = Load_L2_MLP(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Load_L2_MLP zero division")
desc = """
Average Parallel L2 cache miss demand Loads"""
class Metric_Data_L2_MLP:
name = "Data_L2_MLP"
domain = "Metric"
maxval = 100
errcount = 0
area = "Info.Memory.Latency"
metricgroup = frozenset(['Memory_BW', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = Data_L2_MLP(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Data_L2_MLP zero division")
desc = """
Average Parallel L2 cache miss data reads"""
class Metric_UC_Load_PKI:
name = "UC_Load_PKI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory.Mix"
metricgroup = frozenset(['Mem'])
sibling = None
def compute(self, EV):
try:
self.val = UC_Load_PKI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "UC_Load_PKI zero division")
desc = """
Un-cacheable retired load per kilo instruction"""
class Metric_Bus_Lock_PKI:
name = "Bus_Lock_PKI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory.Mix"
metricgroup = frozenset(['Mem'])
sibling = None
def compute(self, EV):
try:
self.val = Bus_Lock_PKI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Bus_Lock_PKI zero division")
desc = """
\"Bus lock\" per kilo instruction"""
class Metric_CPU_Utilization:
name = "CPU_Utilization"
domain = "Metric"
maxval = 1
errcount = 0
area = "Info.System"
metricgroup = frozenset(['HPC', 'Summary'])
sibling = None
def compute(self, EV):
try:
self.val = CPU_Utilization(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "CPU_Utilization zero division")
desc = """
Average CPU Utilization (percentage)"""
class Metric_CPUs_Utilized:
name = "CPUs_Utilized"
domain = "Metric"
maxval = 300
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Summary'])
sibling = None
def compute(self, EV):
try:
self.val = CPUs_Utilized(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "CPUs_Utilized zero division")
desc = """
Average number of utilized CPUs"""
class Metric_Core_Frequency:
name = "Core_Frequency"
domain = "SystemMetric"
maxval = 0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Summary', 'Power'])
sibling = None
def compute(self, EV):
try:
self.val = Core_Frequency(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Core_Frequency zero division")
desc = """
Measured Average Core Frequency for unhalted processors
[GHz]"""
class Metric_GFLOPs:
name = "GFLOPs"
domain = "Metric"
maxval = 200
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Cor', 'Flops', 'HPC'])
sibling = None
def compute(self, EV):
try:
self.val = GFLOPs(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "GFLOPs zero division")
desc = """
Giga Floating Point Operations Per Second. Aggregate across
all supported options of: FP precisions, scalar and vector
instructions, vector-width"""
class Metric_Turbo_Utilization:
name = "Turbo_Utilization"
domain = "Core_Metric"
maxval = 10.0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Power'])
sibling = None
def compute(self, EV):
try:
self.val = Turbo_Utilization(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Turbo_Utilization zero division")
desc = """
Average Frequency Utilization relative nominal frequency"""
class Metric_Power_License0_Utilization:
name = "Power_License0_Utilization"
domain = "Core_Metric"
maxval = 1.0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Power'])
sibling = None
def compute(self, EV):
try:
self.val = Power_License0_Utilization(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Power_License0_Utilization zero division")
desc = """
Fraction of Core cycles where the core was running with
power-delivery for baseline license level 0. This includes
non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit
codes."""
class Metric_Power_License1_Utilization:
name = "Power_License1_Utilization"
domain = "Core_Metric"
maxval = 1.0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Power'])
sibling = None
def compute(self, EV):
try:
self.val = Power_License1_Utilization(self, EV, 0)
self.thresh = (self.val > 0.5)
except ZeroDivisionError:
handle_error_metric(self, "Power_License1_Utilization zero division")
desc = """
Fraction of Core cycles where the core was running with
power-delivery for license level 1. This includes high
current AVX 256-bit instructions as well as low current AVX
512-bit instructions."""
class Metric_Power_License2_Utilization:
name = "Power_License2_Utilization"
domain = "Core_Metric"
maxval = 1.0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Power'])
sibling = None
def compute(self, EV):
try:
self.val = Power_License2_Utilization(self, EV, 0)
self.thresh = (self.val > 0.5)
except ZeroDivisionError:
handle_error_metric(self, "Power_License2_Utilization zero division")
desc = """
Fraction of Core cycles where the core was running with
power-delivery for license level 2 (introduced in SKX).
This includes high current AVX 512-bit instructions."""
class Metric_SMT_2T_Utilization:
name = "SMT_2T_Utilization"
domain = "Core_Metric"
maxval = 1.0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['SMT'])
sibling = None
def compute(self, EV):
try:
self.val = SMT_2T_Utilization(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "SMT_2T_Utilization zero division")
desc = """
Fraction of cycles where both hardware Logical Processors
were active"""
class Metric_Kernel_Utilization:
name = "Kernel_Utilization"
domain = "Metric"
maxval = 1.0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['OS'])
sibling = None
def compute(self, EV):
try:
self.val = Kernel_Utilization(self, EV, 0)
self.thresh = (self.val > 0.05)
except ZeroDivisionError:
handle_error_metric(self, "Kernel_Utilization zero division")
desc = """
Fraction of cycles spent in the Operating System (OS) Kernel
mode"""
class Metric_Kernel_CPI:
name = "Kernel_CPI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['OS'])
sibling = None
def compute(self, EV):
try:
self.val = Kernel_CPI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Kernel_CPI zero division")
desc = """
Cycles Per Instruction for the Operating System (OS) Kernel
mode"""
class Metric_DRAM_BW_Use:
name = "DRAM_BW_Use"
domain = "GB/sec"
maxval = 200
errcount = 0
area = "Info.System"
metricgroup = frozenset(['HPC', 'MemOffcore', 'MemoryBW', 'SoC'])
sibling = None
def compute(self, EV):
try:
self.val = DRAM_BW_Use(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "DRAM_BW_Use zero division")
desc = """
Average external Memory Bandwidth Use for reads and writes
[GB / sec]"""
class Metric_Power:
name = "Power"
domain = "SystemMetric"
maxval = 200
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Power', 'SoC'])
sibling = None
def compute(self, EV):
try:
self.val = Power(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Power zero division")
desc = """
Total package Power in Watts"""
class Metric_Time:
name = "Time"
domain = "Seconds"
maxval = 0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Summary'])
sibling = None
def compute(self, EV):
try:
self.val = Time(self, EV, 0)
self.thresh = (self.val < 1)
except ZeroDivisionError:
handle_error_metric(self, "Time zero division")
desc = """
Run duration time in seconds"""
class Metric_Socket_CLKS:
name = "Socket_CLKS"
domain = "Count"
maxval = 0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['SoC'])
sibling = None
def compute(self, EV):
try:
self.val = Socket_CLKS(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Socket_CLKS zero division")
desc = """
Socket actual clocks when any core is active on that socket"""
class Metric_IpFarBranch:
name = "IpFarBranch"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Branches', 'OS'])
sibling = None
def compute(self, EV):
try:
self.val = IpFarBranch(self, EV, 0)
self.thresh = (self.val < 1000000)
except ZeroDivisionError:
handle_error_metric(self, "IpFarBranch zero division")
desc = """
Instructions per Far Branch ( Far Branches apply upon
transition from application to operating system, handling
interrupts, exceptions) [lower number means higher
occurrence rate]"""
# Schedule
class Setup:
def __init__(self, r):
o = dict()
n = Frontend_Bound() ; r.run(n) ; o["Frontend_Bound"] = n
n = Fetch_Latency() ; r.run(n) ; o["Fetch_Latency"] = n
n = ICache_Misses() ; r.run(n) ; o["ICache_Misses"] = n
n = ITLB_Misses() ; r.run(n) ; o["ITLB_Misses"] = n
n = Branch_Resteers() ; r.run(n) ; o["Branch_Resteers"] = n
n = Mispredicts_Resteers() ; r.run(n) ; o["Mispredicts_Resteers"] = n
n = Clears_Resteers() ; r.run(n) ; o["Clears_Resteers"] = n
n = Unknown_Branches() ; r.run(n) ; o["Unknown_Branches"] = n
n = MS_Switches() ; r.run(n) ; o["MS_Switches"] = n
n = LCP() ; r.run(n) ; o["LCP"] = n
n = DSB_Switches() ; r.run(n) ; o["DSB_Switches"] = n
n = Fetch_Bandwidth() ; r.run(n) ; o["Fetch_Bandwidth"] = n
n = MITE() ; r.run(n) ; o["MITE"] = n
n = Decoder0_Alone() ; r.run(n) ; o["Decoder0_Alone"] = n
n = MITE_4wide() ; r.run(n) ; o["MITE_4wide"] = n
n = DSB() ; r.run(n) ; o["DSB"] = n
n = LSD() ; r.run(n) ; o["LSD"] = n
n = Bad_Speculation() ; r.run(n) ; o["Bad_Speculation"] = n
n = Branch_Mispredicts() ; r.run(n) ; o["Branch_Mispredicts"] = n
n = Other_Mispredicts() ; r.run(n) ; o["Other_Mispredicts"] = n
n = Machine_Clears() ; r.run(n) ; o["Machine_Clears"] = n
n = Other_Nukes() ; r.run(n) ; o["Other_Nukes"] = n
n = Backend_Bound() ; r.run(n) ; o["Backend_Bound"] = n
n = Memory_Bound() ; r.run(n) ; o["Memory_Bound"] = n
n = L1_Bound() ; r.run(n) ; o["L1_Bound"] = n
n = DTLB_Load() ; r.run(n) ; o["DTLB_Load"] = n
n = Load_STLB_Hit() ; r.run(n) ; o["Load_STLB_Hit"] = n
n = Load_STLB_Miss() ; r.run(n) ; o["Load_STLB_Miss"] = n
n = Store_Fwd_Blk() ; r.run(n) ; o["Store_Fwd_Blk"] = n
n = L1_Hit_Latency() ; r.run(n) ; o["L1_Hit_Latency"] = n
n = Lock_Latency() ; r.run(n) ; o["Lock_Latency"] = n
n = Split_Loads() ; r.run(n) ; o["Split_Loads"] = n
n = G4K_Aliasing() ; r.run(n) ; o["G4K_Aliasing"] = n
n = FB_Full() ; r.run(n) ; o["FB_Full"] = n
n = L2_Bound() ; r.run(n) ; o["L2_Bound"] = n
n = L3_Bound() ; r.run(n) ; o["L3_Bound"] = n
n = Contested_Accesses() ; r.run(n) ; o["Contested_Accesses"] = n
n = Data_Sharing() ; r.run(n) ; o["Data_Sharing"] = n
n = L3_Hit_Latency() ; r.run(n) ; o["L3_Hit_Latency"] = n
n = SQ_Full() ; r.run(n) ; o["SQ_Full"] = n
n = DRAM_Bound() ; r.run(n) ; o["DRAM_Bound"] = n
n = MEM_Bandwidth() ; r.run(n) ; o["MEM_Bandwidth"] = n
n = MEM_Latency() ; r.run(n) ; o["MEM_Latency"] = n
n = Store_Bound() ; r.run(n) ; o["Store_Bound"] = n
n = Store_Latency() ; r.run(n) ; o["Store_Latency"] = n
n = False_Sharing() ; r.run(n) ; o["False_Sharing"] = n
n = Split_Stores() ; r.run(n) ; o["Split_Stores"] = n
n = Streaming_Stores() ; r.run(n) ; o["Streaming_Stores"] = n
n = DTLB_Store() ; r.run(n) ; o["DTLB_Store"] = n
n = Store_STLB_Hit() ; r.run(n) ; o["Store_STLB_Hit"] = n
n = Store_STLB_Miss() ; r.run(n) ; o["Store_STLB_Miss"] = n
n = Core_Bound() ; r.run(n) ; o["Core_Bound"] = n
n = Divider() ; r.run(n) ; o["Divider"] = n
n = Serializing_Operation() ; r.run(n) ; o["Serializing_Operation"] = n
n = Slow_Pause() ; r.run(n) ; o["Slow_Pause"] = n
n = Ports_Utilization() ; r.run(n) ; o["Ports_Utilization"] = n
n = Ports_Utilized_0() ; r.run(n) ; o["Ports_Utilized_0"] = n
n = Mixing_Vectors() ; r.run(n) ; o["Mixing_Vectors"] = n
n = Ports_Utilized_1() ; r.run(n) ; o["Ports_Utilized_1"] = n
n = Ports_Utilized_2() ; r.run(n) ; o["Ports_Utilized_2"] = n
n = Ports_Utilized_3m() ; r.run(n) ; o["Ports_Utilized_3m"] = n
n = ALU_Op_Utilization() ; r.run(n) ; o["ALU_Op_Utilization"] = n
n = Port_0() ; r.run(n) ; o["Port_0"] = n
n = Port_1() ; r.run(n) ; o["Port_1"] = n
n = Port_5() ; r.run(n) ; o["Port_5"] = n
n = Port_6() ; r.run(n) ; o["Port_6"] = n
n = Load_Op_Utilization() ; r.run(n) ; o["Load_Op_Utilization"] = n
n = Store_Op_Utilization() ; r.run(n) ; o["Store_Op_Utilization"] = n
n = Retiring() ; r.run(n) ; o["Retiring"] = n
n = Light_Operations() ; r.run(n) ; o["Light_Operations"] = n
n = FP_Arith() ; r.run(n) ; o["FP_Arith"] = n
n = X87_Use() ; r.run(n) ; o["X87_Use"] = n
n = FP_Scalar() ; r.run(n) ; o["FP_Scalar"] = n
n = FP_Vector() ; r.run(n) ; o["FP_Vector"] = n
n = FP_Vector_128b() ; r.run(n) ; o["FP_Vector_128b"] = n
n = FP_Vector_256b() ; r.run(n) ; o["FP_Vector_256b"] = n
n = FP_Vector_512b() ; r.run(n) ; o["FP_Vector_512b"] = n
n = Memory_Operations() ; r.run(n) ; o["Memory_Operations"] = n
n = Branch_Instructions() ; r.run(n) ; o["Branch_Instructions"] = n
n = Other_Light_Ops() ; r.run(n) ; o["Other_Light_Ops"] = n
n = Nop_Instructions() ; r.run(n) ; o["Nop_Instructions"] = n
n = Heavy_Operations() ; r.run(n) ; o["Heavy_Operations"] = n
n = Few_Uops_Instructions() ; r.run(n) ; o["Few_Uops_Instructions"] = n
n = Microcode_Sequencer() ; r.run(n) ; o["Microcode_Sequencer"] = n
n = Assists() ; r.run(n) ; o["Assists"] = n
n = FP_Assists() ; r.run(n) ; o["FP_Assists"] = n
n = CISC() ; r.run(n) ; o["CISC"] = n
# parents
o["Fetch_Latency"].parent = o["Frontend_Bound"]
o["ICache_Misses"].parent = o["Fetch_Latency"]
o["ITLB_Misses"].parent = o["Fetch_Latency"]
o["Branch_Resteers"].parent = o["Fetch_Latency"]
o["Mispredicts_Resteers"].parent = o["Branch_Resteers"]
o["Clears_Resteers"].parent = o["Branch_Resteers"]
o["Unknown_Branches"].parent = o["Branch_Resteers"]
o["MS_Switches"].parent = o["Fetch_Latency"]
o["LCP"].parent = o["Fetch_Latency"]
o["DSB_Switches"].parent = o["Fetch_Latency"]
o["Fetch_Bandwidth"].parent = o["Frontend_Bound"]
o["MITE"].parent = o["Fetch_Bandwidth"]
o["Decoder0_Alone"].parent = o["MITE"]
o["MITE_4wide"].parent = o["MITE"]
o["DSB"].parent = o["Fetch_Bandwidth"]
o["LSD"].parent = o["Fetch_Bandwidth"]
o["Branch_Mispredicts"].parent = o["Bad_Speculation"]
o["Other_Mispredicts"].parent = o["Branch_Mispredicts"]
o["Machine_Clears"].parent = o["Bad_Speculation"]
o["Other_Nukes"].parent = o["Machine_Clears"]
o["Memory_Bound"].parent = o["Backend_Bound"]
o["L1_Bound"].parent = o["Memory_Bound"]
o["DTLB_Load"].parent = o["L1_Bound"]
o["Load_STLB_Hit"].parent = o["DTLB_Load"]
o["Load_STLB_Miss"].parent = o["DTLB_Load"]
o["Store_Fwd_Blk"].parent = o["L1_Bound"]
o["L1_Hit_Latency"].parent = o["L1_Bound"]
o["Lock_Latency"].parent = o["L1_Bound"]
o["Split_Loads"].parent = o["L1_Bound"]
o["G4K_Aliasing"].parent = o["L1_Bound"]
o["FB_Full"].parent = o["L1_Bound"]
o["L2_Bound"].parent = o["Memory_Bound"]
o["L3_Bound"].parent = o["Memory_Bound"]
o["Contested_Accesses"].parent = o["L3_Bound"]
o["Data_Sharing"].parent = o["L3_Bound"]
o["L3_Hit_Latency"].parent = o["L3_Bound"]
o["SQ_Full"].parent = o["L3_Bound"]
o["DRAM_Bound"].parent = o["Memory_Bound"]
o["MEM_Bandwidth"].parent = o["DRAM_Bound"]
o["MEM_Latency"].parent = o["DRAM_Bound"]
o["Store_Bound"].parent = o["Memory_Bound"]
o["Store_Latency"].parent = o["Store_Bound"]
o["False_Sharing"].parent = o["Store_Bound"]
o["Split_Stores"].parent = o["Store_Bound"]
o["Streaming_Stores"].parent = o["Store_Bound"]
o["DTLB_Store"].parent = o["Store_Bound"]
o["Store_STLB_Hit"].parent = o["DTLB_Store"]
o["Store_STLB_Miss"].parent = o["DTLB_Store"]
o["Core_Bound"].parent = o["Backend_Bound"]
o["Divider"].parent = o["Core_Bound"]
o["Serializing_Operation"].parent = o["Core_Bound"]
o["Slow_Pause"].parent = o["Serializing_Operation"]
o["Ports_Utilization"].parent = o["Core_Bound"]
o["Ports_Utilized_0"].parent = o["Ports_Utilization"]
o["Mixing_Vectors"].parent = o["Ports_Utilized_0"]
o["Ports_Utilized_1"].parent = o["Ports_Utilization"]
o["Ports_Utilized_2"].parent = o["Ports_Utilization"]
o["Ports_Utilized_3m"].parent = o["Ports_Utilization"]
o["ALU_Op_Utilization"].parent = o["Ports_Utilized_3m"]
o["Port_0"].parent = o["ALU_Op_Utilization"]
o["Port_1"].parent = o["ALU_Op_Utilization"]
o["Port_5"].parent = o["ALU_Op_Utilization"]
o["Port_6"].parent = o["ALU_Op_Utilization"]
o["Load_Op_Utilization"].parent = o["Ports_Utilized_3m"]
o["Store_Op_Utilization"].parent = o["Ports_Utilized_3m"]
o["Light_Operations"].parent = o["Retiring"]
o["FP_Arith"].parent = o["Light_Operations"]
o["X87_Use"].parent = o["FP_Arith"]
o["FP_Scalar"].parent = o["FP_Arith"]
o["FP_Vector"].parent = o["FP_Arith"]
o["FP_Vector_128b"].parent = o["FP_Vector"]
o["FP_Vector_256b"].parent = o["FP_Vector"]
o["FP_Vector_512b"].parent = o["FP_Vector"]
o["Memory_Operations"].parent = o["Light_Operations"]
o["Branch_Instructions"].parent = o["Light_Operations"]
o["Other_Light_Ops"].parent = o["Light_Operations"]
o["Nop_Instructions"].parent = o["Other_Light_Ops"]
o["Heavy_Operations"].parent = o["Retiring"]
o["Few_Uops_Instructions"].parent = o["Heavy_Operations"]
o["Microcode_Sequencer"].parent = o["Heavy_Operations"]
o["Assists"].parent = o["Microcode_Sequencer"]
o["FP_Assists"].parent = o["Assists"]
o["CISC"].parent = o["Microcode_Sequencer"]
# user visible metrics
n = Metric_Mispredictions() ; r.metric(n) ; o["Mispredictions"] = n
n = Metric_Big_Code() ; r.metric(n) ; o["Big_Code"] = n
n = Metric_Instruction_Fetch_BW() ; r.metric(n) ; o["Instruction_Fetch_BW"] = n
n = Metric_Cache_Memory_Bandwidth() ; r.metric(n) ; o["Cache_Memory_Bandwidth"] = n
n = Metric_Cache_Memory_Latency() ; r.metric(n) ; o["Cache_Memory_Latency"] = n
n = Metric_Memory_Data_TLBs() ; r.metric(n) ; o["Memory_Data_TLBs"] = n
n = Metric_Memory_Synchronization() ; r.metric(n) ; o["Memory_Synchronization"] = n
n = Metric_Compute_Bound_Est() ; r.metric(n) ; o["Compute_Bound_Est"] = n
n = Metric_Irregular_Overhead() ; r.metric(n) ; o["Irregular_Overhead"] = n
n = Metric_Other_Bottlenecks() ; r.metric(n) ; o["Other_Bottlenecks"] = n
n = Metric_Branching_Overhead() ; r.metric(n) ; o["Branching_Overhead"] = n
n = Metric_Useful_Work() ; r.metric(n) ; o["Useful_Work"] = n
n = Metric_Core_Bound_Likely() ; r.metric(n) ; o["Core_Bound_Likely"] = n
n = Metric_IPC() ; r.metric(n) ; o["IPC"] = n
n = Metric_UopPI() ; r.metric(n) ; o["UopPI"] = n
n = Metric_UpTB() ; r.metric(n) ; o["UpTB"] = n
n = Metric_CPI() ; r.metric(n) ; o["CPI"] = n
n = Metric_CLKS() ; r.metric(n) ; o["CLKS"] = n
n = Metric_SLOTS() ; r.metric(n) ; o["SLOTS"] = n
n = Metric_Slots_Utilization() ; r.metric(n) ; o["Slots_Utilization"] = n
n = Metric_Execute_per_Issue() ; r.metric(n) ; o["Execute_per_Issue"] = n
n = Metric_CoreIPC() ; r.metric(n) ; o["CoreIPC"] = n
n = Metric_FLOPc() ; r.metric(n) ; o["FLOPc"] = n
n = Metric_FP_Arith_Utilization() ; r.metric(n) ; o["FP_Arith_Utilization"] = n
n = Metric_ILP() ; r.metric(n) ; o["ILP"] = n
n = Metric_EPC() ; r.metric(n) ; o["EPC"] = n
n = Metric_CORE_CLKS() ; r.metric(n) ; o["CORE_CLKS"] = n
n = Metric_IpLoad() ; r.metric(n) ; o["IpLoad"] = n
n = Metric_IpStore() ; r.metric(n) ; o["IpStore"] = n
n = Metric_IpBranch() ; r.metric(n) ; o["IpBranch"] = n
n = Metric_IpCall() ; r.metric(n) ; o["IpCall"] = n
n = Metric_IpTB() ; r.metric(n) ; o["IpTB"] = n
n = Metric_BpTkBranch() ; r.metric(n) ; o["BpTkBranch"] = n
n = Metric_IpFLOP() ; r.metric(n) ; o["IpFLOP"] = n
n = Metric_IpArith() ; r.metric(n) ; o["IpArith"] = n
n = Metric_IpArith_Scalar_SP() ; r.metric(n) ; o["IpArith_Scalar_SP"] = n
n = Metric_IpArith_Scalar_DP() ; r.metric(n) ; o["IpArith_Scalar_DP"] = n
n = Metric_IpArith_AVX128() ; r.metric(n) ; o["IpArith_AVX128"] = n
n = Metric_IpArith_AVX256() ; r.metric(n) ; o["IpArith_AVX256"] = n
n = Metric_IpArith_AVX512() ; r.metric(n) ; o["IpArith_AVX512"] = n
n = Metric_IpPause() ; r.metric(n) ; o["IpPause"] = n
n = Metric_IpSWPF() ; r.metric(n) ; o["IpSWPF"] = n
n = Metric_Instructions() ; r.metric(n) ; o["Instructions"] = n
n = Metric_Retire() ; r.metric(n) ; o["Retire"] = n
n = Metric_IpAssist() ; r.metric(n) ; o["IpAssist"] = n
n = Metric_Execute() ; r.metric(n) ; o["Execute"] = n
n = Metric_Fetch_LSD() ; r.metric(n) ; o["Fetch_LSD"] = n
n = Metric_Fetch_DSB() ; r.metric(n) ; o["Fetch_DSB"] = n
n = Metric_Fetch_MITE() ; r.metric(n) ; o["Fetch_MITE"] = n
n = Metric_Fetch_UpC() ; r.metric(n) ; o["Fetch_UpC"] = n
n = Metric_LSD_Coverage() ; r.metric(n) ; o["LSD_Coverage"] = n
n = Metric_DSB_Coverage() ; r.metric(n) ; o["DSB_Coverage"] = n
n = Metric_DSB_Switch_Cost() ; r.metric(n) ; o["DSB_Switch_Cost"] = n
n = Metric_DSB_Misses() ; r.metric(n) ; o["DSB_Misses"] = n
n = Metric_DSB_Bandwidth() ; r.metric(n) ; o["DSB_Bandwidth"] = n
n = Metric_ICache_Miss_Latency() ; r.metric(n) ; o["ICache_Miss_Latency"] = n
n = Metric_IC_Misses() ; r.metric(n) ; o["IC_Misses"] = n
n = Metric_IpDSB_Miss_Ret() ; r.metric(n) ; o["IpDSB_Miss_Ret"] = n
n = Metric_IpUnknown_Branch() ; r.metric(n) ; o["IpUnknown_Branch"] = n
n = Metric_L2MPKI_Code() ; r.metric(n) ; o["L2MPKI_Code"] = n
n = Metric_L2MPKI_Code_All() ; r.metric(n) ; o["L2MPKI_Code_All"] = n
n = Metric_IpMispredict() ; r.metric(n) ; o["IpMispredict"] = n
n = Metric_IpMisp_Cond_Ntaken() ; r.metric(n) ; o["IpMisp_Cond_Ntaken"] = n
n = Metric_IpMisp_Cond_Taken() ; r.metric(n) ; o["IpMisp_Cond_Taken"] = n
n = Metric_IpMisp_Ret() ; r.metric(n) ; o["IpMisp_Ret"] = n
n = Metric_IpMisp_Indirect() ; r.metric(n) ; o["IpMisp_Indirect"] = n
n = Metric_Branch_Misprediction_Cost() ; r.metric(n) ; o["Branch_Misprediction_Cost"] = n
n = Metric_Spec_Clears_Ratio() ; r.metric(n) ; o["Spec_Clears_Ratio"] = n
n = Metric_Cond_NT() ; r.metric(n) ; o["Cond_NT"] = n
n = Metric_Cond_TK() ; r.metric(n) ; o["Cond_TK"] = n
n = Metric_CallRet() ; r.metric(n) ; o["CallRet"] = n
n = Metric_Jump() ; r.metric(n) ; o["Jump"] = n
n = Metric_Other_Branches() ; r.metric(n) ; o["Other_Branches"] = n
n = Metric_Load_Miss_Real_Latency() ; r.metric(n) ; o["Load_Miss_Real_Latency"] = n
n = Metric_MLP() ; r.metric(n) ; o["MLP"] = n
n = Metric_L1MPKI() ; r.metric(n) ; o["L1MPKI"] = n
n = Metric_L1MPKI_Load() ; r.metric(n) ; o["L1MPKI_Load"] = n
n = Metric_L2MPKI() ; r.metric(n) ; o["L2MPKI"] = n
n = Metric_L2MPKI_All() ; r.metric(n) ; o["L2MPKI_All"] = n
n = Metric_L2MPKI_Load() ; r.metric(n) ; o["L2MPKI_Load"] = n
n = Metric_L2MPKI_RFO() ; r.metric(n) ; o["L2MPKI_RFO"] = n
n = Metric_L2HPKI_Load() ; r.metric(n) ; o["L2HPKI_Load"] = n
n = Metric_L3MPKI() ; r.metric(n) ; o["L3MPKI"] = n
n = Metric_FB_HPKI() ; r.metric(n) ; o["FB_HPKI"] = n
n = Metric_L1D_Cache_Fill_BW() ; r.metric(n) ; o["L1D_Cache_Fill_BW"] = n
n = Metric_L2_Cache_Fill_BW() ; r.metric(n) ; o["L2_Cache_Fill_BW"] = n
n = Metric_L3_Cache_Fill_BW() ; r.metric(n) ; o["L3_Cache_Fill_BW"] = n
n = Metric_L3_Cache_Access_BW() ; r.metric(n) ; o["L3_Cache_Access_BW"] = n
n = Metric_Page_Walks_Utilization() ; r.metric(n) ; o["Page_Walks_Utilization"] = n
n = Metric_Code_STLB_MPKI() ; r.metric(n) ; o["Code_STLB_MPKI"] = n
n = Metric_Load_STLB_MPKI() ; r.metric(n) ; o["Load_STLB_MPKI"] = n
n = Metric_Store_STLB_MPKI() ; r.metric(n) ; o["Store_STLB_MPKI"] = n
n = Metric_L1D_Cache_Fill_BW_2T() ; r.metric(n) ; o["L1D_Cache_Fill_BW_2T"] = n
n = Metric_L2_Cache_Fill_BW_2T() ; r.metric(n) ; o["L2_Cache_Fill_BW_2T"] = n
n = Metric_L3_Cache_Fill_BW_2T() ; r.metric(n) ; o["L3_Cache_Fill_BW_2T"] = n
n = Metric_L3_Cache_Access_BW_2T() ; r.metric(n) ; o["L3_Cache_Access_BW_2T"] = n
n = Metric_Load_L2_Miss_Latency() ; r.metric(n) ; o["Load_L2_Miss_Latency"] = n
n = Metric_Load_L3_Miss_Latency() ; r.metric(n) ; o["Load_L3_Miss_Latency"] = n
n = Metric_Load_L2_MLP() ; r.metric(n) ; o["Load_L2_MLP"] = n
n = Metric_Data_L2_MLP() ; r.metric(n) ; o["Data_L2_MLP"] = n
n = Metric_UC_Load_PKI() ; r.metric(n) ; o["UC_Load_PKI"] = n
n = Metric_Bus_Lock_PKI() ; r.metric(n) ; o["Bus_Lock_PKI"] = n
n = Metric_CPU_Utilization() ; r.metric(n) ; o["CPU_Utilization"] = n
n = Metric_CPUs_Utilized() ; r.metric(n) ; o["CPUs_Utilized"] = n
n = Metric_Core_Frequency() ; r.metric(n) ; o["Core_Frequency"] = n
n = Metric_GFLOPs() ; r.metric(n) ; o["GFLOPs"] = n
n = Metric_Turbo_Utilization() ; r.metric(n) ; o["Turbo_Utilization"] = n
n = Metric_Power_License0_Utilization() ; r.metric(n) ; o["Power_License0_Utilization"] = n
n = Metric_Power_License1_Utilization() ; r.metric(n) ; o["Power_License1_Utilization"] = n
n = Metric_Power_License2_Utilization() ; r.metric(n) ; o["Power_License2_Utilization"] = n
n = Metric_SMT_2T_Utilization() ; r.metric(n) ; o["SMT_2T_Utilization"] = n
n = Metric_Kernel_Utilization() ; r.metric(n) ; o["Kernel_Utilization"] = n
n = Metric_Kernel_CPI() ; r.metric(n) ; o["Kernel_CPI"] = n
n = Metric_DRAM_BW_Use() ; r.metric(n) ; o["DRAM_BW_Use"] = n
n = Metric_Power() ; r.metric(n) ; o["Power"] = n
n = Metric_Time() ; r.metric(n) ; o["Time"] = n
n = Metric_Socket_CLKS() ; r.metric(n) ; o["Socket_CLKS"] = n
n = Metric_IpFarBranch() ; r.metric(n) ; o["IpFarBranch"] = n
# references between groups
o["Branch_Resteers"].Unknown_Branches = o["Unknown_Branches"]
o["Fetch_Bandwidth"].Frontend_Bound = o["Frontend_Bound"]
o["Fetch_Bandwidth"].Fetch_Latency = o["Fetch_Latency"]
o["Bad_Speculation"].Retiring = o["Retiring"]
o["Bad_Speculation"].Frontend_Bound = o["Frontend_Bound"]
o["Bad_Speculation"].Backend_Bound = o["Backend_Bound"]
o["Branch_Mispredicts"].Retiring = o["Retiring"]
o["Branch_Mispredicts"].Bad_Speculation = o["Bad_Speculation"]
o["Branch_Mispredicts"].Frontend_Bound = o["Frontend_Bound"]
o["Branch_Mispredicts"].Backend_Bound = o["Backend_Bound"]
o["Other_Mispredicts"].Retiring = o["Retiring"]
o["Other_Mispredicts"].Backend_Bound = o["Backend_Bound"]
o["Other_Mispredicts"].Bad_Speculation = o["Bad_Speculation"]
o["Other_Mispredicts"].Frontend_Bound = o["Frontend_Bound"]
o["Other_Mispredicts"].Branch_Mispredicts = o["Branch_Mispredicts"]
o["Machine_Clears"].Branch_Mispredicts = o["Branch_Mispredicts"]
o["Machine_Clears"].Retiring = o["Retiring"]
o["Machine_Clears"].Frontend_Bound = o["Frontend_Bound"]
o["Machine_Clears"].Backend_Bound = o["Backend_Bound"]
o["Machine_Clears"].Bad_Speculation = o["Bad_Speculation"]
o["Other_Nukes"].Machine_Clears = o["Machine_Clears"]
o["Other_Nukes"].Branch_Mispredicts = o["Branch_Mispredicts"]
o["Other_Nukes"].Retiring = o["Retiring"]
o["Other_Nukes"].Backend_Bound = o["Backend_Bound"]
o["Other_Nukes"].Bad_Speculation = o["Bad_Speculation"]
o["Other_Nukes"].Frontend_Bound = o["Frontend_Bound"]
o["Memory_Bound"].Retiring = o["Retiring"]
o["Memory_Bound"].Backend_Bound = o["Backend_Bound"]
o["Load_STLB_Hit"].Load_STLB_Miss = o["Load_STLB_Miss"]
o["Load_STLB_Hit"].DTLB_Load = o["DTLB_Load"]
o["DRAM_Bound"].L2_Bound = o["L2_Bound"]
o["MEM_Latency"].MEM_Bandwidth = o["MEM_Bandwidth"]
o["Store_STLB_Hit"].DTLB_Store = o["DTLB_Store"]
o["Store_STLB_Hit"].Store_STLB_Miss = o["Store_STLB_Miss"]
o["Core_Bound"].Memory_Bound = o["Memory_Bound"]
o["Core_Bound"].Retiring = o["Retiring"]
o["Core_Bound"].Backend_Bound = o["Backend_Bound"]
o["Ports_Utilization"].Ports_Utilized_0 = o["Ports_Utilized_0"]
o["Ports_Utilization"].Retiring = o["Retiring"]
o["Retiring"].Heavy_Operations = o["Heavy_Operations"]
o["Light_Operations"].Retiring = o["Retiring"]
o["Light_Operations"].Heavy_Operations = o["Heavy_Operations"]
o["Light_Operations"].Microcode_Sequencer = o["Microcode_Sequencer"]
o["FP_Arith"].Retiring = o["Retiring"]
o["FP_Arith"].FP_Scalar = o["FP_Scalar"]
o["FP_Arith"].X87_Use = o["X87_Use"]
o["FP_Arith"].FP_Vector = o["FP_Vector"]
o["X87_Use"].Retiring = o["Retiring"]
o["FP_Scalar"].Retiring = o["Retiring"]
o["FP_Vector"].Retiring = o["Retiring"]
o["FP_Vector_128b"].Retiring = o["Retiring"]
o["FP_Vector_256b"].Retiring = o["Retiring"]
o["FP_Vector_512b"].Retiring = o["Retiring"]
o["Memory_Operations"].Retiring = o["Retiring"]
o["Memory_Operations"].Light_Operations = o["Light_Operations"]
o["Memory_Operations"].Heavy_Operations = o["Heavy_Operations"]
o["Memory_Operations"].Microcode_Sequencer = o["Microcode_Sequencer"]
o["Branch_Instructions"].Retiring = o["Retiring"]
o["Branch_Instructions"].Light_Operations = o["Light_Operations"]
o["Branch_Instructions"].Heavy_Operations = o["Heavy_Operations"]
o["Branch_Instructions"].Microcode_Sequencer = o["Microcode_Sequencer"]
o["Other_Light_Ops"].Light_Operations = o["Light_Operations"]
o["Other_Light_Ops"].Retiring = o["Retiring"]
o["Other_Light_Ops"].FP_Arith = o["FP_Arith"]
o["Other_Light_Ops"].Heavy_Operations = o["Heavy_Operations"]
o["Other_Light_Ops"].FP_Vector = o["FP_Vector"]
o["Other_Light_Ops"].FP_Scalar = o["FP_Scalar"]
o["Other_Light_Ops"].Branch_Instructions = o["Branch_Instructions"]
o["Other_Light_Ops"].X87_Use = o["X87_Use"]
o["Other_Light_Ops"].Microcode_Sequencer = o["Microcode_Sequencer"]
o["Other_Light_Ops"].Memory_Operations = o["Memory_Operations"]
o["Nop_Instructions"].Retiring = o["Retiring"]
o["Nop_Instructions"].Light_Operations = o["Light_Operations"]
o["Nop_Instructions"].Heavy_Operations = o["Heavy_Operations"]
o["Nop_Instructions"].Microcode_Sequencer = o["Microcode_Sequencer"]
o["Heavy_Operations"].Retiring = o["Retiring"]
o["Heavy_Operations"].Microcode_Sequencer = o["Microcode_Sequencer"]
o["Few_Uops_Instructions"].Retiring = o["Retiring"]
o["Few_Uops_Instructions"].Heavy_Operations = o["Heavy_Operations"]
o["Few_Uops_Instructions"].Microcode_Sequencer = o["Microcode_Sequencer"]
o["CISC"].Microcode_Sequencer = o["Microcode_Sequencer"]
o["CISC"].Assists = o["Assists"]
o["Mispredictions"].Branch_Mispredicts = o["Branch_Mispredicts"]
o["Mispredictions"].LCP = o["LCP"]
o["Mispredictions"].Retiring = o["Retiring"]
o["Mispredictions"].Other_Mispredicts = o["Other_Mispredicts"]
o["Mispredictions"].Microcode_Sequencer = o["Microcode_Sequencer"]
o["Mispredictions"].Frontend_Bound = o["Frontend_Bound"]
o["Mispredictions"].DSB_Switches = o["DSB_Switches"]
o["Mispredictions"].Backend_Bound = o["Backend_Bound"]
o["Mispredictions"].Branch_Resteers = o["Branch_Resteers"]
o["Mispredictions"].ICache_Misses = o["ICache_Misses"]
o["Mispredictions"].MS_Switches = o["MS_Switches"]
o["Mispredictions"].Bad_Speculation = o["Bad_Speculation"]
o["Mispredictions"].ITLB_Misses = o["ITLB_Misses"]
o["Mispredictions"].Unknown_Branches = o["Unknown_Branches"]
o["Mispredictions"].Fetch_Latency = o["Fetch_Latency"]
o["Mispredictions"].Mispredicts_Resteers = o["Mispredicts_Resteers"]
o["Big_Code"].LCP = o["LCP"]
o["Big_Code"].ICache_Misses = o["ICache_Misses"]
o["Big_Code"].DSB_Switches = o["DSB_Switches"]
o["Big_Code"].Branch_Resteers = o["Branch_Resteers"]
o["Big_Code"].MS_Switches = o["MS_Switches"]
o["Big_Code"].ITLB_Misses = o["ITLB_Misses"]
o["Big_Code"].Unknown_Branches = o["Unknown_Branches"]
o["Big_Code"].Fetch_Latency = o["Fetch_Latency"]
o["Instruction_Fetch_BW"].Retiring = o["Retiring"]
o["Instruction_Fetch_BW"].Other_Mispredicts = o["Other_Mispredicts"]
o["Instruction_Fetch_BW"].DSB_Switches = o["DSB_Switches"]
o["Instruction_Fetch_BW"].Assists = o["Assists"]
o["Instruction_Fetch_BW"].Backend_Bound = o["Backend_Bound"]
o["Instruction_Fetch_BW"].Branch_Resteers = o["Branch_Resteers"]
o["Instruction_Fetch_BW"].Heavy_Operations = o["Heavy_Operations"]
o["Instruction_Fetch_BW"].Fetch_Latency = o["Fetch_Latency"]
o["Instruction_Fetch_BW"].ICache_Misses = o["ICache_Misses"]
o["Instruction_Fetch_BW"].Microcode_Sequencer = o["Microcode_Sequencer"]
o["Instruction_Fetch_BW"].Frontend_Bound = o["Frontend_Bound"]
o["Instruction_Fetch_BW"].Bad_Speculation = o["Bad_Speculation"]
o["Instruction_Fetch_BW"].ITLB_Misses = o["ITLB_Misses"]
o["Instruction_Fetch_BW"].Mispredicts_Resteers = o["Mispredicts_Resteers"]
o["Instruction_Fetch_BW"].Branch_Mispredicts = o["Branch_Mispredicts"]
o["Instruction_Fetch_BW"].LCP = o["LCP"]
o["Instruction_Fetch_BW"].Few_Uops_Instructions = o["Few_Uops_Instructions"]
o["Instruction_Fetch_BW"].Clears_Resteers = o["Clears_Resteers"]
o["Instruction_Fetch_BW"].MS_Switches = o["MS_Switches"]
o["Instruction_Fetch_BW"].Unknown_Branches = o["Unknown_Branches"]
o["Cache_Memory_Bandwidth"].L1_Bound = o["L1_Bound"]
o["Cache_Memory_Bandwidth"].Store_Fwd_Blk = o["Store_Fwd_Blk"]
o["Cache_Memory_Bandwidth"].SQ_Full = o["SQ_Full"]
o["Cache_Memory_Bandwidth"].MEM_Bandwidth = o["MEM_Bandwidth"]
o["Cache_Memory_Bandwidth"].L1_Hit_Latency = o["L1_Hit_Latency"]
o["Cache_Memory_Bandwidth"].Retiring = o["Retiring"]
o["Cache_Memory_Bandwidth"].G4K_Aliasing = o["G4K_Aliasing"]
o["Cache_Memory_Bandwidth"].Data_Sharing = o["Data_Sharing"]
o["Cache_Memory_Bandwidth"].L2_Bound = o["L2_Bound"]
o["Cache_Memory_Bandwidth"].Memory_Bound = o["Memory_Bound"]
o["Cache_Memory_Bandwidth"].Lock_Latency = o["Lock_Latency"]
o["Cache_Memory_Bandwidth"].MEM_Latency = o["MEM_Latency"]
o["Cache_Memory_Bandwidth"].Backend_Bound = o["Backend_Bound"]
o["Cache_Memory_Bandwidth"].Store_Bound = o["Store_Bound"]
o["Cache_Memory_Bandwidth"].Split_Loads = o["Split_Loads"]
o["Cache_Memory_Bandwidth"].L3_Hit_Latency = o["L3_Hit_Latency"]
o["Cache_Memory_Bandwidth"].DTLB_Load = o["DTLB_Load"]
o["Cache_Memory_Bandwidth"].L3_Bound = o["L3_Bound"]
o["Cache_Memory_Bandwidth"].FB_Full = o["FB_Full"]
o["Cache_Memory_Bandwidth"].Contested_Accesses = o["Contested_Accesses"]
o["Cache_Memory_Bandwidth"].DRAM_Bound = o["DRAM_Bound"]
o["Cache_Memory_Latency"].L1_Bound = o["L1_Bound"]
o["Cache_Memory_Latency"].G4K_Aliasing = o["G4K_Aliasing"]
o["Cache_Memory_Latency"].Retiring = o["Retiring"]
o["Cache_Memory_Latency"].Data_Sharing = o["Data_Sharing"]
o["Cache_Memory_Latency"].L2_Bound = o["L2_Bound"]
o["Cache_Memory_Latency"].Contested_Accesses = o["Contested_Accesses"]
o["Cache_Memory_Latency"].L1_Hit_Latency = o["L1_Hit_Latency"]
o["Cache_Memory_Latency"].MEM_Bandwidth = o["MEM_Bandwidth"]
o["Cache_Memory_Latency"].Store_Latency = o["Store_Latency"]
o["Cache_Memory_Latency"].Backend_Bound = o["Backend_Bound"]
o["Cache_Memory_Latency"].L3_Hit_Latency = o["L3_Hit_Latency"]
o["Cache_Memory_Latency"].DTLB_Load = o["DTLB_Load"]
o["Cache_Memory_Latency"].False_Sharing = o["False_Sharing"]
o["Cache_Memory_Latency"].Streaming_Stores = o["Streaming_Stores"]
o["Cache_Memory_Latency"].Memory_Bound = o["Memory_Bound"]
o["Cache_Memory_Latency"].SQ_Full = o["SQ_Full"]
o["Cache_Memory_Latency"].Store_Bound = o["Store_Bound"]
o["Cache_Memory_Latency"].Split_Loads = o["Split_Loads"]
o["Cache_Memory_Latency"].L3_Bound = o["L3_Bound"]
o["Cache_Memory_Latency"].FB_Full = o["FB_Full"]
o["Cache_Memory_Latency"].Store_Fwd_Blk = o["Store_Fwd_Blk"]
o["Cache_Memory_Latency"].DTLB_Store = o["DTLB_Store"]
o["Cache_Memory_Latency"].Split_Stores = o["Split_Stores"]
o["Cache_Memory_Latency"].Lock_Latency = o["Lock_Latency"]
o["Cache_Memory_Latency"].MEM_Latency = o["MEM_Latency"]
o["Cache_Memory_Latency"].DRAM_Bound = o["DRAM_Bound"]
o["Memory_Data_TLBs"].L1_Bound = o["L1_Bound"]
o["Memory_Data_TLBs"].DTLB_Load = o["DTLB_Load"]
o["Memory_Data_TLBs"].False_Sharing = o["False_Sharing"]
o["Memory_Data_TLBs"].G4K_Aliasing = o["G4K_Aliasing"]
o["Memory_Data_TLBs"].Retiring = o["Retiring"]
o["Memory_Data_TLBs"].DTLB_Store = o["DTLB_Store"]
o["Memory_Data_TLBs"].L2_Bound = o["L2_Bound"]
o["Memory_Data_TLBs"].Memory_Bound = o["Memory_Bound"]
o["Memory_Data_TLBs"].Store_Bound = o["Store_Bound"]
o["Memory_Data_TLBs"].Split_Loads = o["Split_Loads"]
o["Memory_Data_TLBs"].L3_Bound = o["L3_Bound"]
o["Memory_Data_TLBs"].FB_Full = o["FB_Full"]
o["Memory_Data_TLBs"].Streaming_Stores = o["Streaming_Stores"]
o["Memory_Data_TLBs"].Store_Fwd_Blk = o["Store_Fwd_Blk"]
o["Memory_Data_TLBs"].L1_Hit_Latency = o["L1_Hit_Latency"]
o["Memory_Data_TLBs"].Store_Latency = o["Store_Latency"]
o["Memory_Data_TLBs"].Split_Stores = o["Split_Stores"]
o["Memory_Data_TLBs"].Lock_Latency = o["Lock_Latency"]
o["Memory_Data_TLBs"].Backend_Bound = o["Backend_Bound"]
o["Memory_Data_TLBs"].DRAM_Bound = o["DRAM_Bound"]
o["Memory_Synchronization"].L1_Bound = o["L1_Bound"]
o["Memory_Synchronization"].False_Sharing = o["False_Sharing"]
o["Memory_Synchronization"].Retiring = o["Retiring"]
o["Memory_Synchronization"].Frontend_Bound = o["Frontend_Bound"]
o["Memory_Synchronization"].Machine_Clears = o["Machine_Clears"]
o["Memory_Synchronization"].Data_Sharing = o["Data_Sharing"]
o["Memory_Synchronization"].Memory_Bound = o["Memory_Bound"]
o["Memory_Synchronization"].SQ_Full = o["SQ_Full"]
o["Memory_Synchronization"].Store_Bound = o["Store_Bound"]
o["Memory_Synchronization"].L3_Bound = o["L3_Bound"]
o["Memory_Synchronization"].L2_Bound = o["L2_Bound"]
o["Memory_Synchronization"].Streaming_Stores = o["Streaming_Stores"]
o["Memory_Synchronization"].Contested_Accesses = o["Contested_Accesses"]
o["Memory_Synchronization"].DTLB_Store = o["DTLB_Store"]
o["Memory_Synchronization"].Branch_Mispredicts = o["Branch_Mispredicts"]
o["Memory_Synchronization"].Store_Latency = o["Store_Latency"]
o["Memory_Synchronization"].Split_Stores = o["Split_Stores"]
o["Memory_Synchronization"].Backend_Bound = o["Backend_Bound"]
o["Memory_Synchronization"].Bad_Speculation = o["Bad_Speculation"]
o["Memory_Synchronization"].L3_Hit_Latency = o["L3_Hit_Latency"]
o["Memory_Synchronization"].Other_Nukes = o["Other_Nukes"]
o["Memory_Synchronization"].DRAM_Bound = o["DRAM_Bound"]
o["Compute_Bound_Est"].Serializing_Operation = o["Serializing_Operation"]
o["Compute_Bound_Est"].Ports_Utilization = o["Ports_Utilization"]
o["Compute_Bound_Est"].Retiring = o["Retiring"]
o["Compute_Bound_Est"].Ports_Utilized_2 = o["Ports_Utilized_2"]
o["Compute_Bound_Est"].Memory_Bound = o["Memory_Bound"]
o["Compute_Bound_Est"].Ports_Utilized_1 = o["Ports_Utilized_1"]
o["Compute_Bound_Est"].Core_Bound = o["Core_Bound"]
o["Compute_Bound_Est"].Backend_Bound = o["Backend_Bound"]
o["Compute_Bound_Est"].Ports_Utilized_3m = o["Ports_Utilized_3m"]
o["Compute_Bound_Est"].Divider = o["Divider"]
o["Compute_Bound_Est"].Ports_Utilized_0 = o["Ports_Utilized_0"]
o["Irregular_Overhead"].Ports_Utilization = o["Ports_Utilization"]
o["Irregular_Overhead"].Retiring = o["Retiring"]
o["Irregular_Overhead"].ICache_Misses = o["ICache_Misses"]
o["Irregular_Overhead"].Heavy_Operations = o["Heavy_Operations"]
o["Irregular_Overhead"].Frontend_Bound = o["Frontend_Bound"]
o["Irregular_Overhead"].Microcode_Sequencer = o["Microcode_Sequencer"]
o["Irregular_Overhead"].Core_Bound = o["Core_Bound"]
o["Irregular_Overhead"].Ports_Utilized_0 = o["Ports_Utilized_0"]
o["Irregular_Overhead"].Bad_Speculation = o["Bad_Speculation"]
o["Irregular_Overhead"].ITLB_Misses = o["ITLB_Misses"]
o["Irregular_Overhead"].Divider = o["Divider"]
o["Irregular_Overhead"].Mispredicts_Resteers = o["Mispredicts_Resteers"]
o["Irregular_Overhead"].Serializing_Operation = o["Serializing_Operation"]
o["Irregular_Overhead"].Machine_Clears = o["Machine_Clears"]
o["Irregular_Overhead"].Branch_Mispredicts = o["Branch_Mispredicts"]
o["Irregular_Overhead"].LCP = o["LCP"]
o["Irregular_Overhead"].Other_Mispredicts = o["Other_Mispredicts"]
o["Irregular_Overhead"].Few_Uops_Instructions = o["Few_Uops_Instructions"]
o["Irregular_Overhead"].DSB_Switches = o["DSB_Switches"]
o["Irregular_Overhead"].Memory_Bound = o["Memory_Bound"]
o["Irregular_Overhead"].Assists = o["Assists"]
o["Irregular_Overhead"].Backend_Bound = o["Backend_Bound"]
o["Irregular_Overhead"].Branch_Resteers = o["Branch_Resteers"]
o["Irregular_Overhead"].Clears_Resteers = o["Clears_Resteers"]
o["Irregular_Overhead"].MS_Switches = o["MS_Switches"]
o["Irregular_Overhead"].Other_Nukes = o["Other_Nukes"]
o["Irregular_Overhead"].Unknown_Branches = o["Unknown_Branches"]
o["Irregular_Overhead"].Fetch_Latency = o["Fetch_Latency"]
o["Other_Bottlenecks"].Retiring = o["Retiring"]
o["Other_Bottlenecks"].Data_Sharing = o["Data_Sharing"]
o["Other_Bottlenecks"].L2_Bound = o["L2_Bound"]
o["Other_Bottlenecks"].Contested_Accesses = o["Contested_Accesses"]
o["Other_Bottlenecks"].L3_Bound = o["L3_Bound"]
o["Other_Bottlenecks"].Machine_Clears = o["Machine_Clears"]
o["Other_Bottlenecks"].MEM_Bandwidth = o["MEM_Bandwidth"]
o["Other_Bottlenecks"].Store_Latency = o["Store_Latency"]
o["Other_Bottlenecks"].Other_Mispredicts = o["Other_Mispredicts"]
o["Other_Bottlenecks"].DSB_Switches = o["DSB_Switches"]
o["Other_Bottlenecks"].Ports_Utilized_0 = o["Ports_Utilized_0"]
o["Other_Bottlenecks"].Ports_Utilized_1 = o["Ports_Utilized_1"]
o["Other_Bottlenecks"].Ports_Utilized_2 = o["Ports_Utilized_2"]
o["Other_Bottlenecks"].DTLB_Load = o["DTLB_Load"]
o["Other_Bottlenecks"].ICache_Misses = o["ICache_Misses"]
o["Other_Bottlenecks"].Streaming_Stores = o["Streaming_Stores"]
o["Other_Bottlenecks"].Memory_Bound = o["Memory_Bound"]
o["Other_Bottlenecks"].SQ_Full = o["SQ_Full"]
o["Other_Bottlenecks"].Store_Bound = o["Store_Bound"]
o["Other_Bottlenecks"].Bad_Speculation = o["Bad_Speculation"]
o["Other_Bottlenecks"].FB_Full = o["FB_Full"]
o["Other_Bottlenecks"].Mispredicts_Resteers = o["Mispredicts_Resteers"]
o["Other_Bottlenecks"].Store_Fwd_Blk = o["Store_Fwd_Blk"]
o["Other_Bottlenecks"].Split_Stores = o["Split_Stores"]
o["Other_Bottlenecks"].Few_Uops_Instructions = o["Few_Uops_Instructions"]
o["Other_Bottlenecks"].Other_Nukes = o["Other_Nukes"]
o["Other_Bottlenecks"].Unknown_Branches = o["Unknown_Branches"]
o["Other_Bottlenecks"].DRAM_Bound = o["DRAM_Bound"]
o["Other_Bottlenecks"].L1_Bound = o["L1_Bound"]
o["Other_Bottlenecks"].G4K_Aliasing = o["G4K_Aliasing"]
o["Other_Bottlenecks"].Core_Bound = o["Core_Bound"]
o["Other_Bottlenecks"].Divider = o["Divider"]
o["Other_Bottlenecks"].L1_Hit_Latency = o["L1_Hit_Latency"]
o["Other_Bottlenecks"].Assists = o["Assists"]
o["Other_Bottlenecks"].Backend_Bound = o["Backend_Bound"]
o["Other_Bottlenecks"].Branch_Resteers = o["Branch_Resteers"]
o["Other_Bottlenecks"].L3_Hit_Latency = o["L3_Hit_Latency"]
o["Other_Bottlenecks"].Microcode_Sequencer = o["Microcode_Sequencer"]
o["Other_Bottlenecks"].Fetch_Latency = o["Fetch_Latency"]
o["Other_Bottlenecks"].Ports_Utilization = o["Ports_Utilization"]
o["Other_Bottlenecks"].False_Sharing = o["False_Sharing"]
o["Other_Bottlenecks"].Heavy_Operations = o["Heavy_Operations"]
o["Other_Bottlenecks"].Frontend_Bound = o["Frontend_Bound"]
o["Other_Bottlenecks"].Serializing_Operation = o["Serializing_Operation"]
o["Other_Bottlenecks"].MEM_Latency = o["MEM_Latency"]
o["Other_Bottlenecks"].Split_Loads = o["Split_Loads"]
o["Other_Bottlenecks"].ITLB_Misses = o["ITLB_Misses"]
o["Other_Bottlenecks"].DTLB_Store = o["DTLB_Store"]
o["Other_Bottlenecks"].Branch_Mispredicts = o["Branch_Mispredicts"]
o["Other_Bottlenecks"].LCP = o["LCP"]
o["Other_Bottlenecks"].Lock_Latency = o["Lock_Latency"]
o["Other_Bottlenecks"].Clears_Resteers = o["Clears_Resteers"]
o["Other_Bottlenecks"].MS_Switches = o["MS_Switches"]
o["Other_Bottlenecks"].Ports_Utilized_3m = o["Ports_Utilized_3m"]
o["Useful_Work"].Assists = o["Assists"]
o["Useful_Work"].Retiring = o["Retiring"]
o["Useful_Work"].Heavy_Operations = o["Heavy_Operations"]
o["Useful_Work"].Few_Uops_Instructions = o["Few_Uops_Instructions"]
o["Useful_Work"].Microcode_Sequencer = o["Microcode_Sequencer"]
o["Core_Bound_Likely"].Memory_Bound = o["Memory_Bound"]
o["Core_Bound_Likely"].Ports_Utilized_0 = o["Ports_Utilized_0"]
o["Core_Bound_Likely"].Core_Bound = o["Core_Bound"]
o["Core_Bound_Likely"].Backend_Bound = o["Backend_Bound"]
o["Core_Bound_Likely"].Retiring = o["Retiring"]
o["Core_Bound_Likely"].Ports_Utilization = o["Ports_Utilization"]
o["UopPI"].Retiring = o["Retiring"]
o["UpTB"].Retiring = o["Retiring"]
o["Retire"].Retiring = o["Retiring"]
o["DSB_Misses"].LSD = o["LSD"]
o["DSB_Misses"].MITE = o["MITE"]
o["DSB_Misses"].LCP = o["LCP"]
o["DSB_Misses"].Fetch_Bandwidth = o["Fetch_Bandwidth"]
o["DSB_Misses"].Frontend_Bound = o["Frontend_Bound"]
o["DSB_Misses"].DSB_Switches = o["DSB_Switches"]
o["DSB_Misses"].Branch_Resteers = o["Branch_Resteers"]
o["DSB_Misses"].ICache_Misses = o["ICache_Misses"]
o["DSB_Misses"].MS_Switches = o["MS_Switches"]
o["DSB_Misses"].ITLB_Misses = o["ITLB_Misses"]
o["DSB_Misses"].DSB = o["DSB"]
o["DSB_Misses"].Unknown_Branches = o["Unknown_Branches"]
o["DSB_Misses"].Fetch_Latency = o["Fetch_Latency"]
o["DSB_Bandwidth"].LSD = o["LSD"]
o["DSB_Bandwidth"].Fetch_Bandwidth = o["Fetch_Bandwidth"]
o["DSB_Bandwidth"].Frontend_Bound = o["Frontend_Bound"]
o["DSB_Bandwidth"].MITE = o["MITE"]
o["DSB_Bandwidth"].DSB = o["DSB"]
o["DSB_Bandwidth"].Fetch_Latency = o["Fetch_Latency"]
o["IC_Misses"].Fetch_Latency = o["Fetch_Latency"]
o["IC_Misses"].LCP = o["LCP"]
o["IC_Misses"].MS_Switches = o["MS_Switches"]
o["IC_Misses"].ICache_Misses = o["ICache_Misses"]
o["IC_Misses"].ITLB_Misses = o["ITLB_Misses"]
o["IC_Misses"].Unknown_Branches = o["Unknown_Branches"]
o["IC_Misses"].DSB_Switches = o["DSB_Switches"]
o["IC_Misses"].Branch_Resteers = o["Branch_Resteers"]
o["Branch_Misprediction_Cost"].Retiring = o["Retiring"]
o["Branch_Misprediction_Cost"].ICache_Misses = o["ICache_Misses"]
o["Branch_Misprediction_Cost"].Microcode_Sequencer = o["Microcode_Sequencer"]
o["Branch_Misprediction_Cost"].Frontend_Bound = o["Frontend_Bound"]
o["Branch_Misprediction_Cost"].Bad_Speculation = o["Bad_Speculation"]
o["Branch_Misprediction_Cost"].ITLB_Misses = o["ITLB_Misses"]
o["Branch_Misprediction_Cost"].Mispredicts_Resteers = o["Mispredicts_Resteers"]
o["Branch_Misprediction_Cost"].Branch_Mispredicts = o["Branch_Mispredicts"]
o["Branch_Misprediction_Cost"].LCP = o["LCP"]
o["Branch_Misprediction_Cost"].Other_Mispredicts = o["Other_Mispredicts"]
o["Branch_Misprediction_Cost"].DSB_Switches = o["DSB_Switches"]
o["Branch_Misprediction_Cost"].Backend_Bound = o["Backend_Bound"]
o["Branch_Misprediction_Cost"].Branch_Resteers = o["Branch_Resteers"]
o["Branch_Misprediction_Cost"].MS_Switches = o["MS_Switches"]
o["Branch_Misprediction_Cost"].Unknown_Branches = o["Unknown_Branches"]
o["Branch_Misprediction_Cost"].Fetch_Latency = o["Fetch_Latency"]
# siblings cross-tree
o["Mispredicts_Resteers"].sibling = (o["Branch_Mispredicts"],)
o["Clears_Resteers"].sibling = (o["MS_Switches"], o["Machine_Clears"], o["L1_Bound"], o["Microcode_Sequencer"],)
o["MS_Switches"].sibling = (o["Clears_Resteers"], o["Machine_Clears"], o["L1_Bound"], o["Serializing_Operation"], o["Mixing_Vectors"], o["Microcode_Sequencer"],)
o["LCP"].sibling = (o["DSB_Switches"], o["Fetch_Bandwidth"],)
o["DSB_Switches"].sibling = (o["LCP"], o["Fetch_Bandwidth"],)
o["Fetch_Bandwidth"].sibling = (o["LCP"], o["DSB_Switches"],)
o["Decoder0_Alone"].sibling = (o["Few_Uops_Instructions"],)
o["Branch_Mispredicts"].sibling = (o["Mispredicts_Resteers"],)
o["Machine_Clears"].sibling = (o["Clears_Resteers"], o["MS_Switches"], o["L1_Bound"], o["Contested_Accesses"], o["Data_Sharing"], o["False_Sharing"], o["Microcode_Sequencer"],)
o["L1_Bound"].sibling = (o["Clears_Resteers"], o["MS_Switches"], o["Machine_Clears"], o["Ports_Utilized_1"], o["Microcode_Sequencer"],)
o["DTLB_Load"].sibling = (o["DTLB_Store"],)
o["Lock_Latency"].sibling = (o["Store_Latency"],)
o["FB_Full"].sibling = (o["SQ_Full"], o["MEM_Bandwidth"], o["Store_Latency"], o["Streaming_Stores"],)
o["Contested_Accesses"].sibling = (o["Machine_Clears"], o["Data_Sharing"], o["False_Sharing"],)
o["Data_Sharing"].sibling = (o["Machine_Clears"], o["Contested_Accesses"], o["False_Sharing"],)
o["L3_Hit_Latency"].sibling = (o["MEM_Latency"],)
o["L3_Hit_Latency"].overlap = True
o["SQ_Full"].sibling = (o["FB_Full"], o["MEM_Bandwidth"],)
o["MEM_Bandwidth"].sibling = (o["FB_Full"], o["SQ_Full"],)
o["MEM_Latency"].sibling = (o["L3_Hit_Latency"],)
o["Store_Latency"].sibling = (o["Lock_Latency"], o["FB_Full"],)
o["Store_Latency"].overlap = True
o["False_Sharing"].sibling = (o["Machine_Clears"], o["Contested_Accesses"], o["Data_Sharing"],)
o["Streaming_Stores"].sibling = (o["FB_Full"],)
o["DTLB_Store"].sibling = (o["DTLB_Load"],)
o["Serializing_Operation"].sibling = (o["MS_Switches"],)
o["Mixing_Vectors"].sibling = (o["MS_Switches"],)
o["Ports_Utilized_1"].sibling = (o["L1_Bound"],)
o["Ports_Utilized_2"].sibling = (o["Port_0"], o["Port_1"], o["Port_5"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["FP_Vector_512b"],)
o["Port_0"].sibling = (o["Ports_Utilized_2"], o["Port_1"], o["Port_5"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["FP_Vector_512b"],)
o["Port_1"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_5"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["FP_Vector_512b"],)
o["Port_5"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["FP_Vector_512b"],)
o["Port_6"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["FP_Vector_512b"],)
o["FP_Scalar"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"], o["Port_6"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["FP_Vector_512b"],)
o["FP_Vector"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"], o["Port_6"], o["FP_Scalar"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["FP_Vector_512b"],)
o["FP_Vector_128b"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_256b"], o["FP_Vector_512b"],)
o["FP_Vector_256b"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_512b"],)
o["FP_Vector_512b"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"],)
o["Few_Uops_Instructions"].sibling = (o["Decoder0_Alone"],)
o["Microcode_Sequencer"].sibling = (o["Clears_Resteers"], o["MS_Switches"], o["Machine_Clears"], o["L1_Bound"],)
o["Mispredictions"].sibling = (o["Mispredicts_Resteers"], o["Branch_Mispredicts"],)
o["Cache_Memory_Bandwidth"].sibling = (o["FB_Full"], o["SQ_Full"], o["MEM_Bandwidth"],)
o["Cache_Memory_Latency"].sibling = (o["L3_Hit_Latency"], o["MEM_Latency"],)
o["Memory_Data_TLBs"].sibling = (o["DTLB_Load"], o["DTLB_Store"],)
o["Memory_Synchronization"].sibling = (o["DTLB_Load"], o["DTLB_Store"],)
o["Irregular_Overhead"].sibling = (o["MS_Switches"], o["Microcode_Sequencer"],)
o["IpTB"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],)
o["DSB_Coverage"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],)
o["DSB_Misses"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],)
o["DSB_Bandwidth"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],)
o["Branch_Misprediction_Cost"].sibling = (o["Mispredicts_Resteers"], o["Branch_Mispredicts"],)
o["DRAM_BW_Use"].sibling = (o["FB_Full"], o["SQ_Full"], o["MEM_Bandwidth"],)
| 238,593 | Python | .py | 5,504 | 37.395712 | 1,797 | 0.656737 | andikleen/pmu-tools | 1,984 | 331 | 178 | GPL-2.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,882 | bdx_server_ratios.py | andikleen_pmu-tools/bdx_server_ratios.py | # -*- coding: latin-1 -*-
#
# auto generated TopDown/TMA 4.8-full-perf description for Intel Xeon E5 v4 (code named Broadwell EP)
# Please see http://ark.intel.com for more details on these CPUs.
#
# References:
# http://bit.ly/tma-ispass14
# http://halobates.de/blog/p/262
# https://sites.google.com/site/analysismethods/yasin-pubs
# https://download.01.org/perfmon/
# https://github.com/andikleen/pmu-tools/wiki/toplev-manual
#
# Helpers
print_error = lambda msg: False
smt_enabled = False
ebs_mode = False
version = "4.8-full-perf"
base_frequency = -1.0
Memory = 0
Average_Frequency = 0.0
num_cores = 1
num_threads = 1
num_sockets = 1
def handle_error(obj, msg):
print_error(msg)
obj.errcount += 1
obj.val = 0
obj.thresh = False
def handle_error_metric(obj, msg):
print_error(msg)
obj.errcount += 1
obj.val = 0
# Constants
Exe_Ports = 8
Mem_L2_Store_Cost = 9
Mem_L3_Weight = 7
Mem_STLB_Hit_Cost = 8
BAClear_Cost = 12
MS_Switches_Cost = 2
Avg_Assist_Cost = 66
Pipeline_Width = 4
OneMillion = 1000000
OneBillion = 1000000000
Energy_Unit = 61
Errata_Whitelist = "BDE69;BDE70"
EBS_Mode = 0
DS = 1
# Aux. formulas
def Backend_Bound_Cycles(self, EV, level):
return (EV("CYCLE_ACTIVITY.STALLS_TOTAL", level) + EV("UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC", level) - Few_Uops_Executed_Threshold(self, EV, level) - Frontend_RS_Empty_Cycles(self, EV, level) + EV("RESOURCE_STALLS.SB", level))
def Cycles_0_Ports_Utilized(self, EV, level):
return (EV("UOPS_EXECUTED.CORE:i1:c1", level)) / 2 if smt_enabled else(EV("CYCLE_ACTIVITY.STALLS_TOTAL", level) - Frontend_RS_Empty_Cycles(self, EV, level))
def Cycles_1_Port_Utilized(self, EV, level):
return (EV("UOPS_EXECUTED.CORE:c1", level) - EV("UOPS_EXECUTED.CORE:c2", level)) / 2 if smt_enabled else(EV("UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC", level) - EV("UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC", level))
def Cycles_2_Ports_Utilized(self, EV, level):
return (EV("UOPS_EXECUTED.CORE:c2", level) - EV("UOPS_EXECUTED.CORE:c3", level)) / 2 if smt_enabled else(EV("UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC", level) - EV("UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC", level))
def Cycles_3m_Ports_Utilized(self, EV, level):
return (EV("UOPS_EXECUTED.CORE:c3", level) / 2) if smt_enabled else EV("UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC", level)
def DurationTimeInSeconds(self, EV, level):
return EV("interval-ms", 0) / 1000
def Execute_Cycles(self, EV, level):
return (EV("UOPS_EXECUTED.CORE:c1", level) / 2) if smt_enabled else EV("UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC", level)
def Fetched_Uops(self, EV, level):
return (EV("IDQ.DSB_UOPS", level) + EV("LSD.UOPS", level) + EV("IDQ.MITE_UOPS", level) + EV("IDQ.MS_UOPS", level))
def Few_Uops_Executed_Threshold(self, EV, level):
EV("UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC", level)
EV("UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC", level)
return EV("UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC", level) if (IPC(self, EV, level)> 1.8) else EV("UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC", level)
# Floating Point computational (arithmetic) Operations Count
def FLOP_Count(self, EV, level):
return EV("FP_ARITH_INST_RETIRED.SCALAR", level) + 2 * EV("FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", level) + 4 * EV("FP_ARITH_INST_RETIRED.4_FLOPS", level) + 8 * EV("FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE", level)
# Floating Point computational (arithmetic) Operations Count
def FP_Arith_Scalar(self, EV, level):
return EV("FP_ARITH_INST_RETIRED.SCALAR", level)
# Floating Point computational (arithmetic) Operations Count
def FP_Arith_Vector(self, EV, level):
return EV("FP_ARITH_INST_RETIRED.VECTOR", level)
def Frontend_RS_Empty_Cycles(self, EV, level):
EV("RS_EVENTS.EMPTY_CYCLES", level)
return EV("RS_EVENTS.EMPTY_CYCLES", level) if (self.Fetch_Latency.compute(EV)> 0.1) else 0
def HighIPC(self, EV, level):
val = IPC(self, EV, level) / Pipeline_Width
return val
def ITLB_Miss_Cycles(self, EV, level):
return (14 * EV("ITLB_MISSES.STLB_HIT", level) + EV("ITLB_MISSES.WALK_DURATION:c1", level) + 7 * EV("ITLB_MISSES.WALK_COMPLETED", level))
def LOAD_L1_MISS(self, EV, level):
return EV("MEM_LOAD_UOPS_RETIRED.L2_HIT", level) + EV("MEM_LOAD_UOPS_RETIRED.L3_HIT", level) + EV("MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT", level) + EV("MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM", level) + EV("MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS", level)
def LOAD_L1_MISS_NET(self, EV, level):
return LOAD_L1_MISS(self, EV, level) + EV("MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM", level) + EV("MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM", level) + EV("MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM", level) + EV("MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD", level)
def LOAD_L3_HIT(self, EV, level):
return EV("MEM_LOAD_UOPS_RETIRED.L3_HIT", level) * (1 + EV("MEM_LOAD_UOPS_RETIRED.HIT_LFB", level) / LOAD_L1_MISS_NET(self, EV, level))
def LOAD_LCL_MEM(self, EV, level):
return EV("MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM", level) * (1 + EV("MEM_LOAD_UOPS_RETIRED.HIT_LFB", level) / LOAD_L1_MISS_NET(self, EV, level))
def LOAD_RMT_FWD(self, EV, level):
return EV("MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD", level) * (1 + EV("MEM_LOAD_UOPS_RETIRED.HIT_LFB", level) / LOAD_L1_MISS_NET(self, EV, level))
def LOAD_RMT_HITM(self, EV, level):
return EV("MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM", level) * (1 + EV("MEM_LOAD_UOPS_RETIRED.HIT_LFB", level) / LOAD_L1_MISS_NET(self, EV, level))
def LOAD_RMT_MEM(self, EV, level):
return EV("MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM", level) * (1 + EV("MEM_LOAD_UOPS_RETIRED.HIT_LFB", level) / LOAD_L1_MISS_NET(self, EV, level))
def LOAD_XSNP_HIT(self, EV, level):
return EV("MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT", level) * (1 + EV("MEM_LOAD_UOPS_RETIRED.HIT_LFB", level) / LOAD_L1_MISS_NET(self, EV, level))
def LOAD_XSNP_HITM(self, EV, level):
return EV("MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM", level) * (1 + EV("MEM_LOAD_UOPS_RETIRED.HIT_LFB", level) / LOAD_L1_MISS_NET(self, EV, level))
def LOAD_XSNP_MISS(self, EV, level):
return EV("MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS", level) * (1 + EV("MEM_LOAD_UOPS_RETIRED.HIT_LFB", level) / LOAD_L1_MISS_NET(self, EV, level))
def Mem_L3_Hit_Fraction(self, EV, level):
return EV("MEM_LOAD_UOPS_RETIRED.L3_HIT", level) / (EV("MEM_LOAD_UOPS_RETIRED.L3_HIT", level) + Mem_L3_Weight * EV("MEM_LOAD_UOPS_RETIRED.L3_MISS", level))
def Mem_Lock_St_Fraction(self, EV, level):
return EV("MEM_UOPS_RETIRED.LOCK_LOADS", level) / EV("MEM_UOPS_RETIRED.ALL_STORES", level)
def Memory_Bound_Fraction(self, EV, level):
return (EV("CYCLE_ACTIVITY.STALLS_MEM_ANY", level) + EV("RESOURCE_STALLS.SB", level)) / Backend_Bound_Cycles(self, EV, level)
def Mispred_Clears_Fraction(self, EV, level):
return EV("BR_MISP_RETIRED.ALL_BRANCHES", level) / (EV("BR_MISP_RETIRED.ALL_BRANCHES", level) + EV("MACHINE_CLEARS.COUNT", level))
def ORO_Demand_RFO_C1(self, EV, level):
return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO", level)) , level )
def ORO_DRD_Any_Cycles(self, EV, level):
return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", level)) , level )
def ORO_DRD_BW_Cycles(self, EV, level):
return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD:c4", level)) , level )
def SQ_Full_Cycles(self, EV, level):
return (EV("OFFCORE_REQUESTS_BUFFER.SQ_FULL", level) / 2) if smt_enabled else EV("OFFCORE_REQUESTS_BUFFER.SQ_FULL", level)
def Store_L2_Hit_Cycles(self, EV, level):
return EV("L2_RQSTS.RFO_HIT", level) * Mem_L2_Store_Cost *(1 - Mem_Lock_St_Fraction(self, EV, level))
def Mem_XSNP_HitM_Cost(self, EV, level):
return 60
def Mem_XSNP_Hit_Cost(self, EV, level):
return 43
def Mem_XSNP_None_Cost(self, EV, level):
return 41
def Mem_Local_DRAM_Cost(self, EV, level):
return 200
def Mem_Remote_DRAM_Cost(self, EV, level):
return 310
def Mem_Remote_HitM_Cost(self, EV, level):
return 200
def Mem_Remote_Fwd_Cost(self, EV, level):
return 180
def Recovery_Cycles(self, EV, level):
return (EV("INT_MISC.RECOVERY_CYCLES_ANY", level) / 2) if smt_enabled else EV("INT_MISC.RECOVERY_CYCLES", level)
def Retire_Fraction(self, EV, level):
return Retired_Slots(self, EV, level) / EV("UOPS_ISSUED.ANY", level)
# Retired slots per Logical Processor
def Retired_Slots(self, EV, level):
return EV("UOPS_RETIRED.RETIRE_SLOTS", level)
# Number of logical processors (enabled or online) on the target system
def Num_CPUs(self, EV, level):
return 8 if smt_enabled else 4
# Instructions Per Cycle (per Logical Processor)
def IPC(self, EV, level):
return EV("INST_RETIRED.ANY", level) / CLKS(self, EV, level)
# Uops Per Instruction
def UopPI(self, EV, level):
val = Retired_Slots(self, EV, level) / EV("INST_RETIRED.ANY", level)
self.thresh = (val > 1.05)
return val
# Uops per taken branch
def UpTB(self, EV, level):
val = Retired_Slots(self, EV, level) / EV("BR_INST_RETIRED.NEAR_TAKEN", level)
self.thresh = val < Pipeline_Width * 1.5
return val
# Cycles Per Instruction (per Logical Processor)
def CPI(self, EV, level):
return 1 / IPC(self, EV, level)
# Per-Logical Processor actual clocks when the Logical Processor is active.
def CLKS(self, EV, level):
return EV("CPU_CLK_UNHALTED.THREAD", level)
# Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward)
def SLOTS(self, EV, level):
return Pipeline_Width * CORE_CLKS(self, EV, level)
# The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of "execute" at rename stage.
def Execute_per_Issue(self, EV, level):
return EV("UOPS_EXECUTED.THREAD", level) / EV("UOPS_ISSUED.ANY", level)
# Instructions Per Cycle across hyper-threads (per physical core)
def CoreIPC(self, EV, level):
return EV("INST_RETIRED.ANY", level) / CORE_CLKS(self, EV, level)
# Floating Point Operations Per Cycle
def FLOPc(self, EV, level):
return FLOP_Count(self, EV, level) / CORE_CLKS(self, EV, level)
# Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to Fused-Multiply Add FMA counting - common.
def FP_Arith_Utilization(self, EV, level):
return (FP_Arith_Scalar(self, EV, level) + FP_Arith_Vector(self, EV, level)) / (2 * CORE_CLKS(self, EV, level))
# Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor)
def ILP(self, EV, level):
return EV("UOPS_EXECUTED.THREAD", level) / EV("UOPS_EXECUTED.THREAD:c1", level)
# Core actual clocks when any Logical Processor is active on the Physical Core
def CORE_CLKS(self, EV, level):
return ((EV("CPU_CLK_UNHALTED.THREAD", level) / 2) * (1 + EV("CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE", level) / EV("CPU_CLK_UNHALTED.REF_XCLK", level))) if ebs_mode else(EV("CPU_CLK_UNHALTED.THREAD_ANY", level) / 2) if smt_enabled else CLKS(self, EV, level)
# Instructions per Load (lower number means higher occurrence rate). Tip: reduce memory accesses. #Link Opt Guide section: Minimize Register Spills
def IpLoad(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("MEM_UOPS_RETIRED.ALL_LOADS", level)
self.thresh = (val < 3)
return val
# Instructions per Store (lower number means higher occurrence rate). Tip: reduce memory accesses. #Link Opt Guide section: Minimize Register Spills
def IpStore(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("MEM_UOPS_RETIRED.ALL_STORES", level)
self.thresh = (val < 8)
return val
# Instructions per Branch (lower number means higher occurrence rate)
def IpBranch(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.ALL_BRANCHES", level)
self.thresh = (val < 8)
return val
# Instructions per (near) call (lower number means higher occurrence rate)
def IpCall(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.NEAR_CALL", level)
self.thresh = (val < 200)
return val
# Instructions per taken branch
def IpTB(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.NEAR_TAKEN", level)
self.thresh = val < Pipeline_Width * 2 + 1
return val
# Branch instructions per taken branch. . Can be used to approximate PGO-likelihood for non-loopy codes.
def BpTkBranch(self, EV, level):
return EV("BR_INST_RETIRED.ALL_BRANCHES", level) / EV("BR_INST_RETIRED.NEAR_TAKEN", level)
# Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate). Reference: Tuning Performance via Metrics with Expectations. https://doi.org/10.1109/LCA.2019.2916408
def IpFLOP(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / FLOP_Count(self, EV, level)
self.thresh = (val < 10)
return val
# Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW.
def IpArith(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / (FP_Arith_Scalar(self, EV, level) + FP_Arith_Vector(self, EV, level))
self.thresh = (val < 10)
return val
# Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.
def IpArith_Scalar_SP(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("FP_ARITH_INST_RETIRED.SCALAR_SINGLE", level)
self.thresh = (val < 10)
return val
# Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.
def IpArith_Scalar_DP(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("FP_ARITH_INST_RETIRED.SCALAR_DOUBLE", level)
self.thresh = (val < 10)
return val
# Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.
def IpArith_AVX128(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / (EV("FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", level) + EV("FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE", level))
self.thresh = (val < 10)
return val
# Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.
def IpArith_AVX256(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / (EV("FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE", level) + EV("FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE", level))
self.thresh = (val < 10)
return val
# Total number of retired Instructions
def Instructions(self, EV, level):
return EV("INST_RETIRED.ANY", level)
# Average number of Uops retired in cycles where at least one uop has retired.
def Retire(self, EV, level):
return Retired_Slots(self, EV, level) / EV("UOPS_RETIRED.RETIRE_SLOTS:c1", level)
def Execute(self, EV, level):
return EV("UOPS_EXECUTED.THREAD", level) / Execute_Cycles(self, EV, level)
# Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). See section 'Decoded ICache' in Optimization Manual. http://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-optimization-manual.html
def DSB_Coverage(self, EV, level):
val = EV("IDQ.DSB_UOPS", level) / Fetched_Uops(self, EV, level)
self.thresh = (val < 0.7) and HighIPC(self, EV, 1)
return val
# Instructions per speculative Unknown Branch Misprediction (BAClear) (lower number means higher occurrence rate)
def IpUnknown_Branch(self, EV, level):
return Instructions(self, EV, level) / EV("BACLEARS.ANY", level)
# Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)
def IpMispredict(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("BR_MISP_RETIRED.ALL_BRANCHES", level)
self.thresh = (val < 200)
return val
# Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).
def IpMisp_Indirect(self, EV, level):
val = Instructions(self, EV, level) / (Retire_Fraction(self, EV, level) * EV("BR_MISP_EXEC.INDIRECT", level))
self.thresh = (val < 1000)
return val
# Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)
def Load_Miss_Real_Latency(self, EV, level):
return EV("L1D_PEND_MISS.PENDING", level) / (EV("MEM_LOAD_UOPS_RETIRED.L1_MISS", level) + EV("MEM_LOAD_UOPS_RETIRED.HIT_LFB", level))
# Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)
def MLP(self, EV, level):
return EV("L1D_PEND_MISS.PENDING", level) / EV("L1D_PEND_MISS.PENDING_CYCLES", level)
# L1 cache true misses per kilo instruction for retired demand loads
def L1MPKI(self, EV, level):
return 1000 * EV("MEM_LOAD_UOPS_RETIRED.L1_MISS", level) / EV("INST_RETIRED.ANY", level)
# L2 cache true misses per kilo instruction for retired demand loads
def L2MPKI(self, EV, level):
return 1000 * EV("MEM_LOAD_UOPS_RETIRED.L2_MISS", level) / EV("INST_RETIRED.ANY", level)
# L2 cache misses per kilo instruction for all request types (including speculative)
def L2MPKI_All(self, EV, level):
return 1000 * EV("L2_RQSTS.MISS", level) / EV("INST_RETIRED.ANY", level)
# L2 cache misses per kilo instruction for all demand loads (including speculative)
def L2MPKI_Load(self, EV, level):
return 1000 * EV("L2_RQSTS.DEMAND_DATA_RD_MISS", level) / EV("INST_RETIRED.ANY", level)
# Offcore requests (L2 cache miss) per kilo instruction for demand RFOs
def L2MPKI_RFO(self, EV, level):
return 1000 * EV("OFFCORE_REQUESTS.DEMAND_RFO", level) / EV("INST_RETIRED.ANY", level)
# L2 cache hits per kilo instruction for all request types (including speculative)
def L2HPKI_All(self, EV, level):
return 1000 *(EV("L2_RQSTS.REFERENCES", level) - EV("L2_RQSTS.MISS", level)) / EV("INST_RETIRED.ANY", level)
# L2 cache hits per kilo instruction for all demand loads (including speculative)
def L2HPKI_Load(self, EV, level):
return 1000 * EV("L2_RQSTS.DEMAND_DATA_RD_HIT", level) / EV("INST_RETIRED.ANY", level)
# L3 cache true misses per kilo instruction for retired demand loads
def L3MPKI(self, EV, level):
return 1000 * EV("MEM_LOAD_UOPS_RETIRED.L3_MISS", level) / EV("INST_RETIRED.ANY", level)
def L1D_Cache_Fill_BW(self, EV, level):
return 64 * EV("L1D.REPLACEMENT", level) / OneBillion / Time(self, EV, level)
def L2_Cache_Fill_BW(self, EV, level):
return 64 * EV("L2_LINES_IN.ALL", level) / OneBillion / Time(self, EV, level)
def L3_Cache_Fill_BW(self, EV, level):
return 64 * EV("LONGEST_LAT_CACHE.MISS", level) / OneBillion / Time(self, EV, level)
# Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses
def Page_Walks_Utilization(self, EV, level):
val = (EV("ITLB_MISSES.WALK_DURATION", level) + EV("DTLB_LOAD_MISSES.WALK_DURATION", level) + EV("DTLB_STORE_MISSES.WALK_DURATION", level) + 7 *(EV("DTLB_STORE_MISSES.WALK_COMPLETED", level) + EV("DTLB_LOAD_MISSES.WALK_COMPLETED", level) + EV("ITLB_MISSES.WALK_COMPLETED", level))) / (2 * CORE_CLKS(self, EV, level))
self.thresh = (val > 0.5)
return val
# Average per-core data fill bandwidth to the L1 data cache [GB / sec]
def L1D_Cache_Fill_BW_2T(self, EV, level):
return L1D_Cache_Fill_BW(self, EV, level)
# Average per-core data fill bandwidth to the L2 cache [GB / sec]
def L2_Cache_Fill_BW_2T(self, EV, level):
return L2_Cache_Fill_BW(self, EV, level)
# Average per-core data fill bandwidth to the L3 cache [GB / sec]
def L3_Cache_Fill_BW_2T(self, EV, level):
return L3_Cache_Fill_BW(self, EV, level)
# Average Latency for L2 cache miss demand Loads
def Load_L2_Miss_Latency(self, EV, level):
return EV("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD", level) / EV("OFFCORE_REQUESTS.DEMAND_DATA_RD", level)
# Average Parallel L2 cache miss demand Loads
def Load_L2_MLP(self, EV, level):
return EV("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD", level) / EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD", level)
# Average Parallel L2 cache miss data reads
def Data_L2_MLP(self, EV, level):
return EV("OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD", level) / EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", level)
# Average CPU Utilization (percentage)
def CPU_Utilization(self, EV, level):
return CPUs_Utilized(self, EV, level) / Num_CPUs(self, EV, level)
# Average number of utilized CPUs
def CPUs_Utilized(self, EV, level):
return EV("CPU_CLK_UNHALTED.REF_TSC", level) / EV("msr/tsc/", 0)
# Measured Average Core Frequency for unhalted processors [GHz]
def Core_Frequency(self, EV, level):
return Turbo_Utilization(self, EV, level) * EV("msr/tsc/", 0) / OneBillion / Time(self, EV, level)
# Measured Average Uncore Frequency for the SoC [GHz]
def Uncore_Frequency(self, EV, level):
return Socket_CLKS(self, EV, level) / 1e9 / Time(self, EV, level)
# Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width
def GFLOPs(self, EV, level):
return (FLOP_Count(self, EV, level) / OneBillion) / Time(self, EV, level)
# Average Frequency Utilization relative nominal frequency
def Turbo_Utilization(self, EV, level):
return CLKS(self, EV, level) / EV("CPU_CLK_UNHALTED.REF_TSC", level)
# Fraction of cycles where both hardware Logical Processors were active
def SMT_2T_Utilization(self, EV, level):
return 1 - EV("CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE", level) / (EV("CPU_CLK_UNHALTED.REF_XCLK_ANY", level) / 2) if smt_enabled else 0
# Fraction of cycles spent in the Operating System (OS) Kernel mode
def Kernel_Utilization(self, EV, level):
val = EV("CPU_CLK_UNHALTED.THREAD_P:SUP", level) / EV("CPU_CLK_UNHALTED.THREAD", level)
self.thresh = (val > 0.05)
return val
# Cycles Per Instruction for the Operating System (OS) Kernel mode
def Kernel_CPI(self, EV, level):
return EV("CPU_CLK_UNHALTED.THREAD_P:SUP", level) / EV("INST_RETIRED.ANY_P:SUP", level)
# Average external Memory Bandwidth Use for reads and writes [GB / sec]
def DRAM_BW_Use(self, EV, level):
return (64 *(EV("UNC_M_CAS_COUNT.RD", level) + EV("UNC_M_CAS_COUNT.WR", level)) / OneBillion) / Time(self, EV, level)
# Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches.
def MEM_Read_Latency(self, EV, level):
return OneBillion *(EV("UNC_C_TOR_OCCUPANCY.MISS_OPCODE:opc=0x182", level) / EV("UNC_C_TOR_INSERTS.MISS_OPCODE:opc=0x182", level)) / (Socket_CLKS(self, EV, level) / Time(self, EV, level))
# Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches
def MEM_Parallel_Reads(self, EV, level):
return EV("UNC_C_TOR_OCCUPANCY.MISS_OPCODE:opc=0x182", level) / EV("UNC_C_TOR_OCCUPANCY.MISS_OPCODE:opc=0x182:c1", level)
# Run duration time in seconds
def Time(self, EV, level):
val = EV("interval-s", 0)
self.thresh = (val < 1)
return val
# Socket actual clocks when any core is active on that socket
def Socket_CLKS(self, EV, level):
return EV("UNC_C_CLOCKTICKS:one_unit", level)
# Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]
def IpFarBranch(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.FAR_BRANCH:USER", level)
self.thresh = (val < 1000000)
return val
# Event groups
class Frontend_Bound:
name = "Frontend_Bound"
domain = "Slots"
area = "FE"
level = 1
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvFB', 'BvIO', 'TmaL1', 'PGO'])
maxval = None
def compute(self, EV):
try:
self.val = EV("IDQ_UOPS_NOT_DELIVERED.CORE", 1) / SLOTS(self, EV, 1)
self.thresh = (self.val > 0.15)
except ZeroDivisionError:
handle_error(self, "Frontend_Bound zero division")
return self.val
desc = """
This category represents fraction of slots where the
processor's Frontend undersupplies its Backend. Frontend
denotes the first part of the processor core responsible to
fetch operations that are executed later on by the Backend
part. Within the Frontend; a branch predictor predicts the
next address to fetch; cache-lines are fetched from the
memory subsystem; parsed into instructions; and lastly
decoded into micro-operations (uops). Ideally the Frontend
can issue Pipeline_Width uops every cycle to the Backend.
Frontend Bound denotes unutilized issue-slots when there is
no Backend stall; i.e. bubbles where Frontend delivered no
uops while Backend could have accepted them. For example;
stalls due to instruction-cache misses would be categorized
under Frontend Bound."""
class Fetch_Latency:
name = "Fetch_Latency"
domain = "Slots"
area = "FE"
level = 2
htoff = False
sample = ['RS_EVENTS.EMPTY_END']
errcount = 0
sibling = None
metricgroup = frozenset(['Frontend', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = Pipeline_Width * EV("IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE", 2) / SLOTS(self, EV, 2)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Fetch_Latency zero division")
return self.val
desc = """
This metric represents fraction of slots the CPU was stalled
due to Frontend latency issues. For example; instruction-
cache misses; iTLB misses or fetch stalls after a branch
misprediction are categorized under Frontend Latency. In
such cases; the Frontend eventually delivers no uops for
some period."""
class ICache_Misses:
name = "ICache_Misses"
domain = "Clocks"
area = "FE"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BigFootprint', 'BvBC', 'FetchLat', 'IcMiss'])
maxval = None
def compute(self, EV):
try:
self.val = EV("ICACHE.IFDATA_STALL", 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "ICache_Misses zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due to instruction cache misses.. Using compiler's
Profile-Guided Optimization (PGO) can reduce i-cache misses
through improved hot code layout."""
class ITLB_Misses:
name = "ITLB_Misses"
domain = "Clocks"
area = "FE"
level = 3
htoff = False
sample = ['ITLB_MISSES.WALK_COMPLETED']
errcount = 0
sibling = None
metricgroup = frozenset(['BigFootprint', 'BvBC', 'FetchLat', 'MemoryTLB'])
maxval = None
def compute(self, EV):
try:
self.val = ITLB_Miss_Cycles(self, EV, 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "ITLB_Misses zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due to Instruction TLB (ITLB) misses.. Consider
large 2M pages for code (selectively prefer hot large-size
function, due to limited 2M entries). Linux options:
standard binaries use libhugetlbfs; Hfsort.. https://github.
com/libhugetlbfs/libhugetlbfs;https://research.fb.com/public
ations/optimizing-function-placement-for-large-scale-data-
center-applications-2/"""
class Branch_Resteers:
name = "Branch_Resteers"
domain = "Clocks"
area = "FE"
level = 3
htoff = False
sample = ['BR_MISP_RETIRED.ALL_BRANCHES']
errcount = 0
sibling = None
metricgroup = frozenset(['FetchLat'])
maxval = None
def compute(self, EV):
try:
self.val = BAClear_Cost *(EV("BR_MISP_RETIRED.ALL_BRANCHES", 3) + EV("MACHINE_CLEARS.COUNT", 3) + EV("BACLEARS.ANY", 3)) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Branch_Resteers zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due to Branch Resteers. Branch Resteers estimates
the Frontend delay in fetching operations from corrected
path; following all sorts of miss-predicted branches. For
example; branchy code with lots of miss-predictions might
get categorized under Branch Resteers. Note the value of
this node may overlap with its siblings."""
class Mispredicts_Resteers:
name = "Mispredicts_Resteers"
domain = "Clocks"
area = "FE"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BadSpec', 'BrMispredicts', 'BvMP'])
maxval = None
def compute(self, EV):
try:
self.val = EV("BR_MISP_RETIRED.ALL_BRANCHES", 4) * self.Branch_Resteers.compute(EV) / (EV("BR_MISP_RETIRED.ALL_BRANCHES", 4) + EV("MACHINE_CLEARS.COUNT", 4) + EV("BACLEARS.ANY", 4))
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Mispredicts_Resteers zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due to Branch Resteers as a result of Branch
Misprediction at execution stage."""
class Clears_Resteers:
name = "Clears_Resteers"
domain = "Clocks"
area = "FE"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BadSpec', 'MachineClears'])
maxval = None
def compute(self, EV):
try:
self.val = EV("MACHINE_CLEARS.COUNT", 4) * self.Branch_Resteers.compute(EV) / (EV("BR_MISP_RETIRED.ALL_BRANCHES", 4) + EV("MACHINE_CLEARS.COUNT", 4) + EV("BACLEARS.ANY", 4))
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Clears_Resteers zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due to Branch Resteers as a result of Machine
Clears."""
class Unknown_Branches:
name = "Unknown_Branches"
domain = "Clocks"
area = "FE"
level = 4
htoff = False
sample = ['BACLEARS.ANY']
errcount = 0
sibling = None
metricgroup = frozenset(['BigFootprint', 'BvBC', 'FetchLat'])
maxval = None
def compute(self, EV):
try:
self.val = self.Branch_Resteers.compute(EV) - self.Mispredicts_Resteers.compute(EV) - self.Clears_Resteers.compute(EV)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Unknown_Branches zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due to new branch address clears. These are fetched
branches the Branch Prediction Unit was unable to recognize
(e.g. first time the branch is fetched or hitting BTB
capacity limit) hence called Unknown Branches"""
class MS_Switches:
name = "MS_Switches"
domain = "Clocks_Estimated"
area = "FE"
level = 3
htoff = False
sample = ['IDQ.MS_SWITCHES']
errcount = 0
sibling = None
metricgroup = frozenset(['FetchLat', 'MicroSeq'])
maxval = 1.0
def compute(self, EV):
try:
self.val = MS_Switches_Cost * EV("IDQ.MS_SWITCHES", 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "MS_Switches zero division")
return self.val
desc = """
This metric estimates the fraction of cycles when the CPU
was stalled due to switches of uop delivery to the Microcode
Sequencer (MS). Commonly used instructions are optimized for
delivery by the DSB (decoded i-cache) or MITE (legacy
instruction decode) pipelines. Certain operations cannot be
handled natively by the execution pipeline; and must be
performed by microcode (small programs injected into the
execution stream). Switching to the MS too often can
negatively impact performance. The MS is designated to
deliver long uop flows required by CISC instructions like
CPUID; or uncommon conditions like Floating Point Assists
when dealing with Denormals."""
class LCP:
name = "LCP"
domain = "Clocks"
area = "FE"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['FetchLat'])
maxval = None
def compute(self, EV):
try:
self.val = EV("ILD_STALL.LCP", 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "LCP zero division")
return self.val
desc = """
This metric represents fraction of cycles CPU was stalled
due to Length Changing Prefixes (LCPs). Using proper
compiler flags or Intel Compiler by default will certainly
avoid this."""
class DSB_Switches:
name = "DSB_Switches"
domain = "Clocks"
area = "FE"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['DSBmiss', 'FetchLat'])
maxval = None
def compute(self, EV):
try:
self.val = EV("DSB2MITE_SWITCHES.PENALTY_CYCLES", 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "DSB_Switches zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due to switches from DSB to MITE pipelines. The DSB
(decoded i-cache) is a Uop Cache where the front-end
directly delivers Uops (micro operations) avoiding heavy x86
decoding. The DSB pipeline has shorter latency and delivered
higher bandwidth than the MITE (legacy instruction decode
pipeline). Switching between the two pipelines can cause
penalties hence this metric measures the exposed penalty..
See section 'Optimization for Decoded Icache' in
Optimization Manual:. http://www.intel.com/content/www/us/en
/architecture-and-technology/64-ia-32-architectures-
optimization-manual.html"""
class Fetch_Bandwidth:
name = "Fetch_Bandwidth"
domain = "Slots"
area = "FE"
level = 2
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['FetchBW', 'Frontend', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = self.Frontend_Bound.compute(EV) - self.Fetch_Latency.compute(EV)
self.thresh = (self.val > 0.2)
except ZeroDivisionError:
handle_error(self, "Fetch_Bandwidth zero division")
return self.val
desc = """
This metric represents fraction of slots the CPU was stalled
due to Frontend bandwidth issues. For example;
inefficiencies at the instruction decoders; or restrictions
for caching in the DSB (decoded uops cache) are categorized
under Fetch Bandwidth. In such cases; the Frontend typically
delivers suboptimal amount of uops to the Backend."""
class MITE:
name = "MITE"
domain = "Slots_Estimated"
area = "FE"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['DSBmiss', 'FetchBW'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("IDQ.ALL_MITE_CYCLES_ANY_UOPS", 3) - EV("IDQ.ALL_MITE_CYCLES_4_UOPS", 3)) / CORE_CLKS(self, EV, 3) / 2
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "MITE zero division")
return self.val
desc = """
This metric represents Core fraction of cycles in which CPU
was likely limited due to the MITE pipeline (the legacy
decode pipeline). This pipeline is used for code that was
not pre-cached in the DSB or LSD. For example;
inefficiencies due to asymmetric decoders; use of long
immediate or LCP can manifest as MITE fetch bandwidth
bottleneck.. Consider tuning codegen of 'small hotspots'
that can fit in DSB. Read about 'Decoded ICache' in
Optimization Manual:. http://www.intel.com/content/www/us/en
/architecture-and-technology/64-ia-32-architectures-
optimization-manual.html"""
class DSB:
name = "DSB"
domain = "Slots_Estimated"
area = "FE"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['DSB', 'FetchBW'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("IDQ.ALL_DSB_CYCLES_ANY_UOPS", 3) - EV("IDQ.ALL_DSB_CYCLES_4_UOPS", 3)) / CORE_CLKS(self, EV, 3) / 2
self.thresh = (self.val > 0.15) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "DSB zero division")
return self.val
desc = """
This metric represents Core fraction of cycles in which CPU
was likely limited due to DSB (decoded uop cache) fetch
pipeline. For example; inefficient utilization of the DSB
cache structure or bank conflict when reading from it; are
categorized here."""
class Bad_Speculation:
name = "Bad_Speculation"
domain = "Slots"
area = "BAD"
level = 1
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['TmaL1'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("UOPS_ISSUED.ANY", 1) - Retired_Slots(self, EV, 1) + Pipeline_Width * Recovery_Cycles(self, EV, 1)) / SLOTS(self, EV, 1)
self.thresh = (self.val > 0.15)
except ZeroDivisionError:
handle_error(self, "Bad_Speculation zero division")
return self.val
desc = """
This category represents fraction of slots wasted due to
incorrect speculations. This include slots used to issue
uops that do not eventually get retired and slots for which
the issue-pipeline was blocked due to recovery from earlier
incorrect speculation. For example; wasted work due to miss-
predicted branches are categorized under Bad Speculation
category. Incorrect data speculation followed by Memory
Ordering Nukes is another example."""
class Branch_Mispredicts:
name = "Branch_Mispredicts"
domain = "Slots"
area = "BAD"
level = 2
htoff = False
sample = ['BR_MISP_RETIRED.ALL_BRANCHES:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['BadSpec', 'BrMispredicts', 'BvMP', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = Mispred_Clears_Fraction(self, EV, 2) * self.Bad_Speculation.compute(EV)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Branch_Mispredicts zero division")
return self.val
desc = """
This metric represents fraction of slots the CPU has wasted
due to Branch Misprediction. These slots are either wasted
by uops fetched from an incorrectly speculated program path;
or stalls when the out-of-order part of the machine needs to
recover its state from a speculative path.. Using profile
feedback in the compiler may help. Please see the
Optimization Manual for general strategies for addressing
branch misprediction issues..
http://www.intel.com/content/www/us/en/architecture-and-
technology/64-ia-32-architectures-optimization-manual.html"""
class Machine_Clears:
name = "Machine_Clears"
domain = "Slots"
area = "BAD"
level = 2
htoff = False
sample = ['MACHINE_CLEARS.COUNT']
errcount = 0
sibling = None
metricgroup = frozenset(['BadSpec', 'BvMS', 'MachineClears', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = self.Bad_Speculation.compute(EV) - self.Branch_Mispredicts.compute(EV)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Machine_Clears zero division")
return self.val
desc = """
This metric represents fraction of slots the CPU has wasted
due to Machine Clears. These slots are either wasted by
uops fetched prior to the clear; or stalls the out-of-order
portion of the machine needs to recover its state after the
clear. For example; this can happen due to memory ordering
Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code
(SMC) nukes.. See \"Memory Disambiguation\" in Optimization
Manual and:. https://software.intel.com/sites/default/files/
m/d/4/1/d/8/sma.pdf"""
class Backend_Bound:
name = "Backend_Bound"
domain = "Slots"
area = "BE"
level = 1
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvOB', 'TmaL1'])
maxval = None
def compute(self, EV):
try:
self.val = 1 -(self.Frontend_Bound.compute(EV) + self.Bad_Speculation.compute(EV) + self.Retiring.compute(EV))
self.thresh = (self.val > 0.2)
except ZeroDivisionError:
handle_error(self, "Backend_Bound zero division")
return self.val
desc = """
This category represents fraction of slots where no uops are
being delivered due to a lack of required resources for
accepting new uops in the Backend. Backend is the portion of
the processor core where the out-of-order scheduler
dispatches ready uops into their respective execution units;
and once completed these uops get retired according to
program order. For example; stalls due to data-cache misses
or stalls due to the divider unit being overloaded are both
categorized under Backend Bound. Backend Bound is further
divided into two main categories: Memory Bound and Core
Bound."""
class Memory_Bound:
name = "Memory_Bound"
domain = "Slots"
area = "BE/Mem"
level = 2
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Backend', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = Memory_Bound_Fraction(self, EV, 2) * self.Backend_Bound.compute(EV)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Memory_Bound zero division")
return self.val
desc = """
This metric represents fraction of slots the Memory
subsystem within the Backend was a bottleneck. Memory Bound
estimates fraction of slots where pipeline is likely stalled
due to demand load or store instructions. This accounts
mainly for (1) non-completed in-flight memory demand loads
which coincides with execution units starvation; in addition
to (2) cases where stores could impose backpressure on the
pipeline when many of them get buffered at the same time
(less common out of the two)."""
class L1_Bound:
name = "L1_Bound"
domain = "Stalls"
area = "BE/Mem"
level = 3
htoff = False
sample = ['MEM_LOAD_UOPS_RETIRED.L1_HIT:pp', 'MEM_LOAD_UOPS_RETIRED.HIT_LFB:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['CacheHits', 'MemoryBound', 'TmaL3mem'])
maxval = None
def compute(self, EV):
try:
self.val = max((EV("CYCLE_ACTIVITY.STALLS_MEM_ANY", 3) - EV("CYCLE_ACTIVITY.STALLS_L1D_MISS", 3)) / CLKS(self, EV, 3) , 0 )
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "L1_Bound zero division")
return self.val
desc = """
This metric estimates how often the CPU was stalled without
loads missing the L1 data cache. The L1 data cache
typically has the shortest latency. However; in certain
cases like loads blocked on older stores; a load might
suffer due to high latency even though it is being satisfied
by the L1. Another example is loads who miss in the TLB.
These cases are characterized by execution unit stalls;
while some non-completed demand load lives in the machine
without having that demand load missing the L1 cache."""
class DTLB_Load:
name = "DTLB_Load"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_UOPS_RETIRED.STLB_MISS_LOADS:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['BvMT', 'MemoryTLB'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (Mem_STLB_Hit_Cost * EV("DTLB_LOAD_MISSES.STLB_HIT", 4) + EV("DTLB_LOAD_MISSES.WALK_DURATION:c1", 4) + 7 * EV("DTLB_LOAD_MISSES.WALK_COMPLETED", 4)) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "DTLB_Load zero division")
return self.val
desc = """
This metric roughly estimates the fraction of cycles where
the Data TLB (DTLB) was missed by load accesses. TLBs
(Translation Look-aside Buffers) are processor caches for
recently used entries out of the Page Tables that are used
to map virtual- to physical-addresses by the operating
system. This metric approximates the potential delay of
demand loads missing the first-level data TLB (assuming
worst case scenario with back to back misses to different
pages). This includes hitting in the second-level TLB (STLB)
as well as performing a hardware page walk on an STLB miss.."""
class Store_Fwd_Blk:
name = "Store_Fwd_Blk"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = 1.0
def compute(self, EV):
try:
self.val = 13 * EV("LD_BLOCKS.STORE_FORWARD", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Store_Fwd_Blk zero division")
return self.val
desc = """
This metric roughly estimates fraction of cycles when the
memory subsystem had loads blocked since they could not
forward data from earlier (in program order) overlapping
stores. To streamline memory operations in the pipeline; a
load can avoid waiting for memory if a prior in-flight store
is writing the data that the load wants to read (store
forwarding process). However; in some cases the load may be
blocked for a significant time pending the store forward.
For example; when the prior store is writing a smaller
region than the load is reading."""
class Lock_Latency:
name = "Lock_Latency"
domain = "Clocks"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_UOPS_RETIRED.LOCK_LOADS:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['Offcore'])
maxval = 1.0
def compute(self, EV):
try:
self.val = Mem_Lock_St_Fraction(self, EV, 4) * ORO_Demand_RFO_C1(self, EV, 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Lock_Latency zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU spent
handling cache misses due to lock operations. Due to the
microarchitecture handling of locks; they are classified as
L1_Bound regardless of what memory source satisfied them."""
class Split_Loads:
name = "Split_Loads"
domain = "Clocks_Calculated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_UOPS_RETIRED.SPLIT_LOADS:pp']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = 1.0
def compute(self, EV):
try:
self.val = Load_Miss_Real_Latency(self, EV, 4) * EV("LD_BLOCKS.NO_SR", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Split_Loads zero division")
return self.val
desc = """
This metric estimates fraction of cycles handling memory
load split accesses - load that cross 64-byte cache line
boundary. . Consider aligning data or hot structure fields.
See the Optimization Manual for more details"""
class G4K_Aliasing:
name = "4K_Aliasing"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = EV("LD_BLOCKS_PARTIAL.ADDRESS_ALIAS", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "G4K_Aliasing zero division")
return self.val
desc = """
This metric estimates how often memory load accesses were
aliased by preceding stores (in program order) with a 4K
address offset. False match is possible; which incur a few
cycles load re-issue. However; the short re-issue duration
is often hidden by the out-of-order core and HW
optimizations; hence a user may safely ignore a high value
of this metric unless it manages to propagate up into parent
nodes of the hierarchy (e.g. to L1_Bound).. Consider
reducing independent loads/stores accesses with 4K offsets.
See the Optimization Manual for more details"""
class FB_Full:
name = "FB_Full"
domain = "Clocks_Calculated"
area = "BE/Mem"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvMB', 'MemoryBW'])
maxval = None
def compute(self, EV):
try:
self.val = Load_Miss_Real_Latency(self, EV, 4) * EV("L1D_PEND_MISS.FB_FULL:c1", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.3)
except ZeroDivisionError:
handle_error(self, "FB_Full zero division")
return self.val
desc = """
This metric does a *rough estimation* of how often L1D Fill
Buffer unavailability limited additional L1D miss memory
access requests to proceed. The higher the metric value; the
deeper the memory hierarchy level the misses are satisfied
from (metric values >1 are valid). Often it hints on
approaching bandwidth limits (to L2 cache; L3 cache or
external memory).. See $issueBW and $issueSL hints. Avoid
software prefetches if indeed memory BW limited."""
class L2_Bound:
name = "L2_Bound"
domain = "Stalls"
area = "BE/Mem"
level = 3
htoff = False
sample = ['MEM_LOAD_UOPS_RETIRED.L2_HIT:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['BvML', 'CacheHits', 'MemoryBound', 'TmaL3mem'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("CYCLE_ACTIVITY.STALLS_L1D_MISS", 3) - EV("CYCLE_ACTIVITY.STALLS_L2_MISS", 3)) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "L2_Bound zero division")
return self.val
desc = """
This metric estimates how often the CPU was stalled due to
L2 cache accesses by loads. Avoiding cache misses (i.e. L1
misses/L2 hits) can improve the latency and increase
performance."""
class L3_Bound:
name = "L3_Bound"
domain = "Stalls"
area = "BE/Mem"
level = 3
htoff = False
sample = ['MEM_LOAD_UOPS_RETIRED.L3_HIT:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['CacheHits', 'MemoryBound', 'TmaL3mem'])
maxval = None
def compute(self, EV):
try:
self.val = Mem_L3_Hit_Fraction(self, EV, 3) * EV("CYCLE_ACTIVITY.STALLS_L2_MISS", 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "L3_Bound zero division")
return self.val
desc = """
This metric estimates how often the CPU was stalled due to
loads accesses to L3 cache or contended with a sibling Core.
Avoiding cache misses (i.e. L2 misses/L3 hits) can improve
the latency and increase performance."""
class Contested_Accesses:
name = "Contested_Accesses"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM:pp', 'MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['BvMS', 'DataSharing', 'Offcore', 'Snoop'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (Mem_XSNP_HitM_Cost(self, EV, 4) * LOAD_XSNP_HITM(self, EV, 4) + Mem_XSNP_Hit_Cost(self, EV, 4) * LOAD_XSNP_MISS(self, EV, 4)) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Contested_Accesses zero division")
return self.val
desc = """
This metric estimates fraction of cycles while the memory
subsystem was handling synchronizations due to contested
accesses. Contested accesses occur when data written by one
Logical Processor are read by another Logical Processor on a
different Physical Core. Examples of contested accesses
include synchronizations such as locks; true data sharing
such as modified locked variables; and false sharing."""
class Data_Sharing:
name = "Data_Sharing"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['BvMS', 'Offcore', 'Snoop'])
maxval = 1.0
def compute(self, EV):
try:
self.val = Mem_XSNP_Hit_Cost(self, EV, 4) * LOAD_XSNP_HIT(self, EV, 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Data_Sharing zero division")
return self.val
desc = """
This metric estimates fraction of cycles while the memory
subsystem was handling synchronizations due to data-sharing
accesses. Data shared by multiple Logical Processors (even
just read shared) may cause increased access latency due to
cache coherency. Excessive data sharing can drastically harm
multithreaded performance."""
class L3_Hit_Latency:
name = "L3_Hit_Latency"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_LOAD_UOPS_RETIRED.L3_HIT:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['BvML', 'MemoryLat'])
maxval = 1.0
def compute(self, EV):
try:
self.val = Mem_XSNP_None_Cost(self, EV, 4) * LOAD_L3_HIT(self, EV, 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "L3_Hit_Latency zero division")
return self.val
desc = """
This metric estimates fraction of cycles with demand load
accesses that hit the L3 cache under unloaded scenarios
(possibly L3 latency limited). Avoiding private cache
misses (i.e. L2 misses/L3 hits) will improve the latency;
reduce contention with sibling physical cores and increase
performance. Note the value of this node may overlap with
its siblings."""
class SQ_Full:
name = "SQ_Full"
domain = "Clocks"
area = "BE/Mem"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvMB', 'MemoryBW', 'Offcore'])
maxval = None
def compute(self, EV):
try:
self.val = SQ_Full_Cycles(self, EV, 4) / CORE_CLKS(self, EV, 4)
self.thresh = (self.val > 0.3) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "SQ_Full zero division")
return self.val
desc = """
This metric measures fraction of cycles where the Super
Queue (SQ) was full taking into account all request-types
and both hardware SMT threads (Logical Processors)."""
class DRAM_Bound:
name = "DRAM_Bound"
domain = "Stalls"
area = "BE/Mem"
level = 3
htoff = False
sample = ['MEM_LOAD_UOPS_RETIRED.L3_MISS:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['MemoryBound', 'TmaL3mem'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (1 - Mem_L3_Hit_Fraction(self, EV, 3)) * EV("CYCLE_ACTIVITY.STALLS_L2_MISS", 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "DRAM_Bound zero division")
return self.val
desc = """
This metric estimates how often the CPU was stalled on
accesses to external memory (DRAM) by loads. Better caching
can improve the latency and increase performance."""
class MEM_Bandwidth:
name = "MEM_Bandwidth"
domain = "Clocks"
area = "BE/Mem"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvMB', 'MemoryBW', 'Offcore'])
maxval = None
def compute(self, EV):
try:
self.val = ORO_DRD_BW_Cycles(self, EV, 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "MEM_Bandwidth zero division")
return self.val
desc = """
This metric estimates fraction of cycles where the core's
performance was likely hurt due to approaching bandwidth
limits of external memory - DRAM ([SPR-HBM] and/or HBM).
The underlying heuristic assumes that a similar off-core
traffic is generated by all IA cores. This metric does not
aggregate non-data-read requests by this logical processor;
requests from other IA Logical Processors/Physical
Cores/sockets; or other non-IA devices like GPU; hence the
maximum external memory bandwidth limits may or may not be
approached when this metric is flagged (see Uncore counters
for that).. Improve data accesses to reduce cacheline
transfers from/to memory. Examples: 1) Consume all bytes of
a each cacheline before it is evicted (e.g. reorder
structure elements and split non-hot ones), 2) merge
computed-limited with BW-limited loops, 3) NUMA
optimizations in multi-socket system. Note: software
prefetches will not help BW-limited application.."""
class MEM_Latency:
name = "MEM_Latency"
domain = "Clocks"
area = "BE/Mem"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvML', 'MemoryLat', 'Offcore'])
maxval = None
def compute(self, EV):
try:
self.val = ORO_DRD_Any_Cycles(self, EV, 4) / CLKS(self, EV, 4) - self.MEM_Bandwidth.compute(EV)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "MEM_Latency zero division")
return self.val
desc = """
This metric estimates fraction of cycles where the
performance was likely hurt due to latency from external
memory - DRAM ([SPR-HBM] and/or HBM). This metric does not
aggregate requests from other Logical Processors/Physical
Cores/sockets (see Uncore counters for that).. Improve data
accesses or interleave them with compute. Examples: 1) Data
layout re-structuring, 2) Software Prefetches (also through
the compiler).."""
class Local_MEM:
name = "Local_MEM"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 5
htoff = False
sample = ['MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['Server'])
maxval = 1.0
def compute(self, EV):
try:
self.val = Mem_Local_DRAM_Cost(self, EV, 5) * LOAD_LCL_MEM(self, EV, 5) / CLKS(self, EV, 5)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Local_MEM zero division")
return self.val
desc = """
This metric estimates fraction of cycles while the memory
subsystem was handling loads from local memory. Caching will
improve the latency and increase performance."""
class Remote_MEM:
name = "Remote_MEM"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 5
htoff = False
sample = ['MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['Server', 'Snoop'])
maxval = 1.0
def compute(self, EV):
try:
self.val = Mem_Remote_DRAM_Cost(self, EV, 5) * LOAD_RMT_MEM(self, EV, 5) / CLKS(self, EV, 5)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Remote_MEM zero division")
return self.val
desc = """
This metric estimates fraction of cycles while the memory
subsystem was handling loads from remote memory. This is
caused often due to non-optimal NUMA allocations."""
class Remote_Cache:
name = "Remote_Cache"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 5
htoff = False
sample = ['MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM:pp', 'MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['Offcore', 'Server', 'Snoop'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (Mem_Remote_HitM_Cost(self, EV, 5) * LOAD_RMT_HITM(self, EV, 5) + Mem_Remote_Fwd_Cost(self, EV, 5) * LOAD_RMT_FWD(self, EV, 5)) / CLKS(self, EV, 5)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Remote_Cache zero division")
return self.val
desc = """
This metric estimates fraction of cycles while the memory
subsystem was handling loads from remote cache in other
sockets including synchronizations issues. This is caused
often due to non-optimal NUMA allocations."""
class Store_Bound:
name = "Store_Bound"
domain = "Stalls"
area = "BE/Mem"
level = 3
htoff = False
sample = ['MEM_UOPS_RETIRED.ALL_STORES:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['MemoryBound', 'TmaL3mem'])
maxval = None
def compute(self, EV):
try:
self.val = EV("RESOURCE_STALLS.SB", 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Store_Bound zero division")
return self.val
desc = """
This metric estimates how often CPU was stalled due to RFO
store memory accesses; RFO store issue a read-for-ownership
request before the write. Even though store accesses do not
typically stall out-of-order CPUs; there are few cases where
stores can lead to actual stalls. This metric will be
flagged should RFO stores be a bottleneck."""
class Store_Latency:
name = "Store_Latency"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvML', 'MemoryLat', 'Offcore'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (Store_L2_Hit_Cycles(self, EV, 4) + (1 - Mem_Lock_St_Fraction(self, EV, 4)) * ORO_Demand_RFO_C1(self, EV, 4)) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Store_Latency zero division")
return self.val
desc = """
This metric estimates fraction of cycles the CPU spent
handling L1D store misses. Store accesses usually less
impact out-of-order core performance; however; holding
resources for longer time can lead into undesired
implications (e.g. contention on L1D fill-buffer entries -
see FB_Full). Consider to avoid/reduce unnecessary (or
easily load-able/computable) memory store."""
class False_Sharing:
name = "False_Sharing"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM:pp', 'MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM:pp', 'OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE', 'OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.REMOTE_HITM']
errcount = 0
sibling = None
metricgroup = frozenset(['BvMS', 'DataSharing', 'Offcore', 'Snoop'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (Mem_Remote_HitM_Cost(self, EV, 4) * EV("OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.REMOTE_HITM", 4) + Mem_XSNP_HitM_Cost(self, EV, 4) * EV("OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE", 4)) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "False_Sharing zero division")
return self.val
desc = """
This metric roughly estimates how often CPU was handling
synchronizations due to False Sharing. False Sharing is a
multithreading hiccup; where multiple Logical Processors
contend on different data-elements mapped into the same
cache line. . False Sharing can be easily avoided by padding
to make Logical Processors access different lines."""
class Split_Stores:
name = "Split_Stores"
domain = "Core_Utilization"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_UOPS_RETIRED.SPLIT_STORES:pp']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = 2 * EV("MEM_UOPS_RETIRED.SPLIT_STORES", 4) / CORE_CLKS(self, EV, 4)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Split_Stores zero division")
return self.val
desc = """
This metric represents rate of split store accesses.
Consider aligning your data to the 64-byte cache line
granularity."""
class DTLB_Store:
name = "DTLB_Store"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_UOPS_RETIRED.STLB_MISS_STORES:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['BvMT', 'MemoryTLB'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (Mem_STLB_Hit_Cost * EV("DTLB_STORE_MISSES.STLB_HIT", 4) + EV("DTLB_STORE_MISSES.WALK_DURATION:c1", 4) + 7 * EV("DTLB_STORE_MISSES.WALK_COMPLETED", 4)) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "DTLB_Store zero division")
return self.val
desc = """
This metric roughly estimates the fraction of cycles spent
handling first-level data TLB store misses. As with
ordinary data caching; focus on improving data locality and
reducing working-set size to reduce DTLB overhead.
Additionally; consider using profile-guided optimization
(PGO) to collocate frequently-used data on the same page.
Try using larger page sizes for large amounts of frequently-
used data."""
class Core_Bound:
name = "Core_Bound"
domain = "Slots"
area = "BE/Core"
level = 2
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Backend', 'TmaL2', 'Compute'])
maxval = None
def compute(self, EV):
try:
self.val = self.Backend_Bound.compute(EV) - self.Memory_Bound.compute(EV)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Core_Bound zero division")
return self.val
desc = """
This metric represents fraction of slots where Core non-
memory issues were of a bottleneck. Shortage in hardware
compute resources; or dependencies in software's
instructions are both categorized under Core Bound. Hence it
may indicate the machine ran out of an out-of-order
resource; certain execution units are overloaded or
dependencies in program's data- or instruction-flow are
limiting the performance (e.g. FP-chained long-latency
arithmetic operations).. Tip: consider Port Saturation
analysis as next step."""
class Divider:
name = "Divider"
domain = "Clocks"
area = "BE/Core"
level = 3
htoff = False
sample = ['ARITH.FPU_DIV_ACTIVE']
errcount = 0
sibling = None
metricgroup = frozenset(['BvCB'])
maxval = 1.0
def compute(self, EV):
try:
self.val = EV("ARITH.FPU_DIV_ACTIVE", 3) / CORE_CLKS(self, EV, 3)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Divider zero division")
return self.val
desc = """
This metric represents fraction of cycles where the Divider
unit was active. Divide and square root instructions are
performed by the Divider unit and can take considerably
longer latency than integer or Floating Point addition;
subtraction; or multiplication."""
class Ports_Utilization:
name = "Ports_Utilization"
domain = "Clocks"
area = "BE/Core"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['PortsUtil'])
maxval = None
def compute(self, EV):
try:
self.val = (Backend_Bound_Cycles(self, EV, 3) - EV("RESOURCE_STALLS.SB", 3) - EV("CYCLE_ACTIVITY.STALLS_MEM_ANY", 3)) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.15) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Ports_Utilization zero division")
return self.val
desc = """
This metric estimates fraction of cycles the CPU performance
was potentially limited due to Core computation issues (non
divider-related). Two distinct categories can be attributed
into this metric: (1) heavy data-dependency among contiguous
instructions would manifest in this metric - such cases are
often referred to as low Instruction Level Parallelism
(ILP). (2) Contention on some hardware execution unit other
than Divider. For example; when there are too many multiply
operations.. Loop Vectorization -most compilers feature
auto-Vectorization options today- reduces pressure on the
execution ports as multiple elements are calculated with
same uop."""
class Ports_Utilized_0:
name = "Ports_Utilized_0"
domain = "Clocks"
area = "BE/Core"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['PortsUtil'])
maxval = None
def compute(self, EV):
try:
self.val = Cycles_0_Ports_Utilized(self, EV, 4) / CORE_CLKS(self, EV, 4)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Ports_Utilized_0 zero division")
return self.val
desc = """
This metric represents fraction of cycles CPU executed no
uops on any execution port (Logical Processor cycles since
ICL, Physical Core cycles otherwise). Long-latency
instructions like divides may contribute to this metric..
Check assembly view and Appendix C in Optimization Manual to
find out instructions with say 5 or more cycles latency..
http://www.intel.com/content/www/us/en/architecture-and-
technology/64-ia-32-architectures-optimization-manual.html"""
class Ports_Utilized_1:
name = "Ports_Utilized_1"
domain = "Clocks"
area = "BE/Core"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['PortsUtil'])
maxval = None
def compute(self, EV):
try:
self.val = Cycles_1_Port_Utilized(self, EV, 4) / CORE_CLKS(self, EV, 4)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Ports_Utilized_1 zero division")
return self.val
desc = """
This metric represents fraction of cycles where the CPU
executed total of 1 uop per cycle on all execution ports
(Logical Processor cycles since ICL, Physical Core cycles
otherwise). This can be due to heavy data-dependency among
software instructions; or over oversubscribing a particular
hardware resource. In some other cases with high
1_Port_Utilized and L1_Bound; this metric can point to L1
data-cache latency bottleneck that may not necessarily
manifest with complete execution starvation (due to the
short L1 latency e.g. walking a linked list) - looking at
the assembly can be helpful."""
class Ports_Utilized_2:
name = "Ports_Utilized_2"
domain = "Clocks"
area = "BE/Core"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['PortsUtil'])
maxval = None
def compute(self, EV):
try:
self.val = Cycles_2_Ports_Utilized(self, EV, 4) / CORE_CLKS(self, EV, 4)
self.thresh = (self.val > 0.15) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Ports_Utilized_2 zero division")
return self.val
desc = """
This metric represents fraction of cycles CPU executed total
of 2 uops per cycle on all execution ports (Logical
Processor cycles since ICL, Physical Core cycles otherwise).
Loop Vectorization -most compilers feature auto-
Vectorization options today- reduces pressure on the
execution ports as multiple elements are calculated with
same uop."""
class Ports_Utilized_3m:
name = "Ports_Utilized_3m"
domain = "Clocks"
area = "BE/Core"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvCB', 'PortsUtil'])
maxval = None
def compute(self, EV):
try:
self.val = Cycles_3m_Ports_Utilized(self, EV, 4) / CORE_CLKS(self, EV, 4)
self.thresh = (self.val > 0.4) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Ports_Utilized_3m zero division")
return self.val
desc = """
This metric represents fraction of cycles CPU executed total
of 3 or more uops per cycle on all execution ports (Logical
Processor cycles since ICL, Physical Core cycles otherwise)."""
class ALU_Op_Utilization:
name = "ALU_Op_Utilization"
domain = "Core_Execution"
area = "BE/Core"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = (EV("UOPS_DISPATCHED_PORT.PORT_0", 5) + EV("UOPS_DISPATCHED_PORT.PORT_1", 5) + EV("UOPS_DISPATCHED_PORT.PORT_5", 5) + EV("UOPS_DISPATCHED_PORT.PORT_6", 5)) / (4 * CORE_CLKS(self, EV, 5))
self.thresh = (self.val > 0.4)
except ZeroDivisionError:
handle_error(self, "ALU_Op_Utilization zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution ports for ALU operations."""
class Port_0:
name = "Port_0"
domain = "Core_Clocks"
area = "BE/Core"
level = 6
htoff = False
sample = ['UOPS_DISPATCHED_PORT.PORT_0']
errcount = 0
sibling = None
metricgroup = frozenset(['Compute'])
maxval = None
def compute(self, EV):
try:
self.val = EV("UOPS_DISPATCHED_PORT.PORT_0", 6) / CORE_CLKS(self, EV, 6)
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Port_0 zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution port 0 ALU and 2nd branch"""
class Port_1:
name = "Port_1"
domain = "Core_Clocks"
area = "BE/Core"
level = 6
htoff = False
sample = ['UOPS_DISPATCHED_PORT.PORT_1']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = EV("UOPS_DISPATCHED_PORT.PORT_1", 6) / CORE_CLKS(self, EV, 6)
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Port_1 zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution port 1 (ALU)"""
class Port_5:
name = "Port_5"
domain = "Core_Clocks"
area = "BE/Core"
level = 6
htoff = False
sample = ['UOPS_DISPATCHED_PORT.PORT_5']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = EV("UOPS_DISPATCHED_PORT.PORT_5", 6) / CORE_CLKS(self, EV, 6)
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Port_5 zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution port 5 ALU. See section
'Handling Port 5 Pressure' in Optimization Manual:.
http://www.intel.com/content/www/us/en/architecture-and-
technology/64-ia-32-architectures-optimization-manual.html"""
class Port_6:
name = "Port_6"
domain = "Core_Clocks"
area = "BE/Core"
level = 6
htoff = False
sample = ['UOPS_DISPATCHED_PORT.PORT_6']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = EV("UOPS_DISPATCHED_PORT.PORT_6", 6) / CORE_CLKS(self, EV, 6)
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Port_6 zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution port 6 Primary Branch and
simple ALU"""
class Load_Op_Utilization:
name = "Load_Op_Utilization"
domain = "Core_Execution"
area = "BE/Core"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = (EV("UOPS_DISPATCHED_PORT.PORT_2", 5) + EV("UOPS_DISPATCHED_PORT.PORT_3", 5) + EV("UOPS_DISPATCHED_PORT.PORT_7", 5) - EV("UOPS_DISPATCHED_PORT.PORT_4", 5)) / (2 * CORE_CLKS(self, EV, 5))
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Load_Op_Utilization zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution port for Load operations"""
class Port_2:
name = "Port_2"
domain = "Core_Clocks"
area = "BE/Core"
level = 6
htoff = False
sample = ['UOPS_DISPATCHED_PORT.PORT_2']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = EV("UOPS_DISPATCHED_PORT.PORT_2", 6) / CORE_CLKS(self, EV, 6)
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Port_2 zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution port 2 Loads and Store-address"""
class Port_3:
name = "Port_3"
domain = "Core_Clocks"
area = "BE/Core"
level = 6
htoff = False
sample = ['UOPS_DISPATCHED_PORT.PORT_3']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = EV("UOPS_DISPATCHED_PORT.PORT_3", 6) / CORE_CLKS(self, EV, 6)
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Port_3 zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution port 3 Loads and Store-address"""
class Store_Op_Utilization:
name = "Store_Op_Utilization"
domain = "Core_Execution"
area = "BE/Core"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = EV("UOPS_DISPATCHED_PORT.PORT_4", 5) / CORE_CLKS(self, EV, 5)
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Store_Op_Utilization zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution port for Store operations"""
class Port_4:
name = "Port_4"
domain = "Core_Clocks"
area = "BE/Core"
level = 6
htoff = False
sample = ['UOPS_DISPATCHED_PORT.PORT_4']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = EV("UOPS_DISPATCHED_PORT.PORT_4", 6) / CORE_CLKS(self, EV, 6)
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Port_4 zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution port 4 (Store-data)"""
class Port_7:
name = "Port_7"
domain = "Core_Clocks"
area = "BE/Core"
level = 6
htoff = False
sample = ['UOPS_DISPATCHED_PORT.PORT_7']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = EV("UOPS_DISPATCHED_PORT.PORT_7", 6) / CORE_CLKS(self, EV, 6)
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Port_7 zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution port 7 simple Store-address"""
class Retiring:
name = "Retiring"
domain = "Slots"
area = "RET"
level = 1
htoff = False
sample = ['UOPS_RETIRED.RETIRE_SLOTS']
errcount = 0
sibling = None
metricgroup = frozenset(['BvUW', 'TmaL1'])
maxval = None
def compute(self, EV):
try:
self.val = Retired_Slots(self, EV, 1) / SLOTS(self, EV, 1)
self.thresh = (self.val > 0.7) or self.Heavy_Operations.thresh
except ZeroDivisionError:
handle_error(self, "Retiring zero division")
return self.val
desc = """
This category represents fraction of slots utilized by
useful work i.e. issued uops that eventually get retired.
Ideally; all pipeline slots would be attributed to the
Retiring category. Retiring of 100% would indicate the
maximum Pipeline_Width throughput was achieved. Maximizing
Retiring typically increases the Instructions-per-cycle (see
IPC metric). Note that a high Retiring value does not
necessary mean there is no room for more performance. For
example; Heavy-operations or Microcode Assists are
categorized under Retiring. They often indicate suboptimal
performance and can often be optimized or avoided. . A high
Retiring value for non-vectorized code may be a good hint
for programmer to consider vectorizing his code. Doing so
essentially lets more computations be done without
significantly increasing number of instructions thus
improving the performance."""
class Light_Operations:
name = "Light_Operations"
domain = "Slots"
area = "RET"
level = 2
htoff = False
sample = ['INST_RETIRED.PREC_DIST']
errcount = 0
sibling = None
metricgroup = frozenset(['Retire', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = self.Retiring.compute(EV) - self.Heavy_Operations.compute(EV)
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Light_Operations zero division")
return self.val
desc = """
This metric represents fraction of slots where the CPU was
retiring light-weight operations -- instructions that
require no more than one uop (micro-operation). This
correlates with total number of instructions used by the
program. A uops-per-instruction (see UopPI metric) ratio of
1 or less should be expected for decently optimized code
running on Intel Core/Xeon products. While this often
indicates efficient X86 instructions were executed; high
value does not necessarily mean better performance cannot be
achieved. . Focus on techniques that reduce instruction
count or result in more efficient instructions generation
such as vectorization."""
class FP_Arith:
name = "FP_Arith"
domain = "Uops"
area = "RET"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['HPC'])
maxval = None
def compute(self, EV):
try:
self.val = self.X87_Use.compute(EV) + self.FP_Scalar.compute(EV) + self.FP_Vector.compute(EV)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "FP_Arith zero division")
return self.val
desc = """
This metric represents overall arithmetic floating-point
(FP) operations fraction the CPU has executed (retired).
Note this metric's value may exceed its parent due to use of
\"Uops\" CountDomain and FMA double-counting."""
class X87_Use:
name = "X87_Use"
domain = "Uops"
area = "RET"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Compute'])
maxval = None
def compute(self, EV):
try:
self.val = EV("INST_RETIRED.X87", 4) * UopPI(self, EV, 4) / Retired_Slots(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "X87_Use zero division")
return self.val
desc = """
This metric serves as an approximation of legacy x87 usage.
It accounts for instructions beyond X87 FP arithmetic
operations; hence may be used as a thermometer to avoid X87
high usage and preferably upgrade to modern ISA. See Tip
under Tuning Hint.. Tip: consider compiler flags to generate
newer AVX (or SSE) instruction sets; which typically perform
better and feature vectors."""
class FP_Scalar:
name = "FP_Scalar"
domain = "Uops"
area = "RET"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Compute', 'Flops'])
maxval = None
def compute(self, EV):
try:
self.val = FP_Arith_Scalar(self, EV, 4) / Retired_Slots(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "FP_Scalar zero division")
return self.val
desc = """
This metric approximates arithmetic floating-point (FP)
scalar uops fraction the CPU has retired. May overcount due
to FMA double counting.. Investigate what limits (compiler)
generation of vector code."""
class FP_Vector:
name = "FP_Vector"
domain = "Uops"
area = "RET"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Compute', 'Flops'])
maxval = 1.0
def compute(self, EV):
try:
self.val = FP_Arith_Vector(self, EV, 4) / Retired_Slots(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "FP_Vector zero division")
return self.val
desc = """
This metric approximates arithmetic floating-point (FP)
vector uops fraction the CPU has retired aggregated across
all vector widths. May overcount due to FMA double
counting.. Check if vector width is expected"""
class FP_Vector_128b:
name = "FP_Vector_128b"
domain = "Uops"
area = "RET"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Compute', 'Flops'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (EV("FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", 5) + EV("FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE", 5)) / Retired_Slots(self, EV, 5)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "FP_Vector_128b zero division")
return self.val
desc = """
This metric approximates arithmetic FP vector uops fraction
the CPU has retired for 128-bit wide vectors. May overcount
due to FMA double counting.. Try to exploit wider vector
length"""
class FP_Vector_256b:
name = "FP_Vector_256b"
domain = "Uops"
area = "RET"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Compute', 'Flops'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (EV("FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE", 5) + EV("FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE", 5)) / Retired_Slots(self, EV, 5)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "FP_Vector_256b zero division")
return self.val
desc = """
This metric approximates arithmetic FP vector uops fraction
the CPU has retired for 256-bit wide vectors. May overcount
due to FMA double counting.. Try to exploit wider vector
length"""
class Heavy_Operations:
name = "Heavy_Operations"
domain = "Slots"
area = "RET"
level = 2
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Retire', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = self.Microcode_Sequencer.compute(EV)
self.thresh = (self.val > 0.1)
except ZeroDivisionError:
handle_error(self, "Heavy_Operations zero division")
return self.val
desc = """
This metric represents fraction of slots where the CPU was
retiring heavy-weight operations -- instructions that
require two or more uops or micro-coded sequences. This
highly-correlates with the uop length of these
instructions/sequences."""
class Microcode_Sequencer:
name = "Microcode_Sequencer"
domain = "Slots"
area = "RET"
level = 3
htoff = False
sample = ['IDQ.MS_UOPS']
errcount = 0
sibling = None
metricgroup = frozenset(['MicroSeq'])
maxval = None
def compute(self, EV):
try:
self.val = Retire_Fraction(self, EV, 3) * EV("IDQ.MS_UOPS", 3) / SLOTS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Microcode_Sequencer zero division")
return self.val
desc = """
This metric represents fraction of slots the CPU was
retiring uops fetched by the Microcode Sequencer (MS) unit.
The MS is used for CISC instructions not supported by the
default decoders (like repeat move strings; or CPUID); or by
microcode assists used to address some operation modes (like
in Floating Point assists). These cases can often be
avoided.."""
class Assists:
name = "Assists"
domain = "Slots_Estimated"
area = "RET"
level = 4
htoff = False
sample = ['OTHER_ASSISTS.ANY_WB_ASSIST']
errcount = 0
sibling = None
metricgroup = frozenset(['BvIO'])
maxval = 1.0
def compute(self, EV):
try:
self.val = Avg_Assist_Cost * EV("OTHER_ASSISTS.ANY_WB_ASSIST", 4) / SLOTS(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Assists zero division")
return self.val
desc = """
This metric estimates fraction of slots the CPU retired uops
delivered by the Microcode_Sequencer as a result of Assists.
Assists are long sequences of uops that are required in
certain corner-cases for operations that cannot be handled
natively by the execution pipeline. For example; when
working with very small floating point values (so-called
Denormals); the FP units are not set up to perform these
operations natively. Instead; a sequence of instructions to
perform the computation on the Denormals is injected into
the pipeline. Since these microcode sequences might be
dozens of uops long; Assists can be extremely deleterious to
performance and they can be avoided in many cases."""
class CISC:
name = "CISC"
domain = "Slots"
area = "RET"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = max(0 , self.Microcode_Sequencer.compute(EV) - self.Assists.compute(EV))
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "CISC zero division")
return self.val
desc = """
This metric estimates fraction of cycles the CPU retired
uops originated from CISC (complex instruction set computer)
instruction. A CISC instruction has multiple uops that are
required to perform the instruction's functionality as in
the case of read-modify-write as an example. Since these
instructions require multiple uops they may or may not imply
sub-optimal use of machine resources."""
class Metric_IPC:
name = "IPC"
domain = "Metric"
maxval = Pipeline_Width + 2
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['Ret', 'Summary'])
sibling = None
def compute(self, EV):
try:
self.val = IPC(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "IPC zero division")
desc = """
Instructions Per Cycle (per Logical Processor)"""
class Metric_UopPI:
name = "UopPI"
domain = "Metric"
maxval = 2.0
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['Pipeline', 'Ret', 'Retire'])
sibling = None
def compute(self, EV):
try:
self.val = UopPI(self, EV, 0)
self.thresh = (self.val > 1.05)
except ZeroDivisionError:
handle_error_metric(self, "UopPI zero division")
desc = """
Uops Per Instruction"""
class Metric_UpTB:
name = "UpTB"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['Branches', 'Fed', 'FetchBW'])
sibling = None
def compute(self, EV):
try:
self.val = UpTB(self, EV, 0)
self.thresh = self.val < Pipeline_Width * 1.5
except ZeroDivisionError:
handle_error_metric(self, "UpTB zero division")
desc = """
Uops per taken branch"""
class Metric_CPI:
name = "CPI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['Pipeline', 'Mem'])
sibling = None
def compute(self, EV):
try:
self.val = CPI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "CPI zero division")
desc = """
Cycles Per Instruction (per Logical Processor)"""
class Metric_CLKS:
name = "CLKS"
domain = "Count"
maxval = 0
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['Pipeline'])
sibling = None
def compute(self, EV):
try:
self.val = CLKS(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "CLKS zero division")
desc = """
Per-Logical Processor actual clocks when the Logical
Processor is active."""
class Metric_SLOTS:
name = "SLOTS"
domain = "Count"
maxval = 0
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['TmaL1'])
sibling = None
def compute(self, EV):
try:
self.val = SLOTS(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "SLOTS zero division")
desc = """
Total issue-pipeline slots (per-Physical Core till ICL; per-
Logical Processor ICL onward)"""
class Metric_Execute_per_Issue:
name = "Execute_per_Issue"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['Cor', 'Pipeline'])
sibling = None
def compute(self, EV):
try:
self.val = Execute_per_Issue(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Execute_per_Issue zero division")
desc = """
The ratio of Executed- by Issued-Uops. Ratio > 1 suggests
high rate of uop micro-fusions. Ratio < 1 suggest high rate
of \"execute\" at rename stage."""
class Metric_CoreIPC:
name = "CoreIPC"
domain = "Core_Metric"
maxval = Pipeline_Width + 2
errcount = 0
area = "Info.Core"
metricgroup = frozenset(['Ret', 'SMT', 'TmaL1'])
sibling = None
def compute(self, EV):
try:
self.val = CoreIPC(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "CoreIPC zero division")
desc = """
Instructions Per Cycle across hyper-threads (per physical
core)"""
class Metric_FLOPc:
name = "FLOPc"
domain = "Core_Metric"
maxval = 10.0
errcount = 0
area = "Info.Core"
metricgroup = frozenset(['Ret', 'Flops'])
sibling = None
def compute(self, EV):
try:
self.val = FLOPc(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "FLOPc zero division")
desc = """
Floating Point Operations Per Cycle"""
class Metric_FP_Arith_Utilization:
name = "FP_Arith_Utilization"
domain = "Core_Metric"
maxval = 2.0
errcount = 0
area = "Info.Core"
metricgroup = frozenset(['Cor', 'Flops', 'HPC'])
sibling = None
def compute(self, EV):
try:
self.val = FP_Arith_Utilization(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "FP_Arith_Utilization zero division")
desc = """
Actual per-core usage of the Floating Point non-X87
execution units (regardless of precision or vector-width).
Values > 1 are possible due to Fused-Multiply Add FMA
counting - common."""
class Metric_ILP:
name = "ILP"
domain = "Metric"
maxval = Exe_Ports
errcount = 0
area = "Info.Core"
metricgroup = frozenset(['Backend', 'Cor', 'Pipeline', 'PortsUtil'])
sibling = None
def compute(self, EV):
try:
self.val = ILP(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "ILP zero division")
desc = """
Instruction-Level-Parallelism (average number of uops
executed when there is execution) per thread (logical-
processor)"""
class Metric_CORE_CLKS:
name = "CORE_CLKS"
domain = "Count"
maxval = 0
errcount = 0
area = "Info.Core"
metricgroup = frozenset(['SMT'])
sibling = None
def compute(self, EV):
try:
self.val = CORE_CLKS(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "CORE_CLKS zero division")
desc = """
Core actual clocks when any Logical Processor is active on
the Physical Core"""
class Metric_IpLoad:
name = "IpLoad"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpLoad(self, EV, 0)
self.thresh = (self.val < 3)
except ZeroDivisionError:
handle_error_metric(self, "IpLoad zero division")
desc = """
Instructions per Load (lower number means higher occurrence
rate). Tip: reduce memory accesses."""
class Metric_IpStore:
name = "IpStore"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpStore(self, EV, 0)
self.thresh = (self.val < 8)
except ZeroDivisionError:
handle_error_metric(self, "IpStore zero division")
desc = """
Instructions per Store (lower number means higher occurrence
rate). Tip: reduce memory accesses."""
class Metric_IpBranch:
name = "IpBranch"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Branches', 'Fed', 'InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpBranch(self, EV, 0)
self.thresh = (self.val < 8)
except ZeroDivisionError:
handle_error_metric(self, "IpBranch zero division")
desc = """
Instructions per Branch (lower number means higher
occurrence rate)"""
class Metric_IpCall:
name = "IpCall"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Branches', 'Fed', 'PGO'])
sibling = None
def compute(self, EV):
try:
self.val = IpCall(self, EV, 0)
self.thresh = (self.val < 200)
except ZeroDivisionError:
handle_error_metric(self, "IpCall zero division")
desc = """
Instructions per (near) call (lower number means higher
occurrence rate)"""
class Metric_IpTB:
name = "IpTB"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Branches', 'Fed', 'FetchBW', 'Frontend', 'PGO'])
sibling = None
def compute(self, EV):
try:
self.val = IpTB(self, EV, 0)
self.thresh = self.val < Pipeline_Width * 2 + 1
except ZeroDivisionError:
handle_error_metric(self, "IpTB zero division")
desc = """
Instructions per taken branch"""
class Metric_BpTkBranch:
name = "BpTkBranch"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Branches', 'Fed', 'PGO'])
sibling = None
def compute(self, EV):
try:
self.val = BpTkBranch(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "BpTkBranch zero division")
desc = """
Branch instructions per taken branch. . Can be used to
approximate PGO-likelihood for non-loopy codes."""
class Metric_IpFLOP:
name = "IpFLOP"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Flops', 'InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpFLOP(self, EV, 0)
self.thresh = (self.val < 10)
except ZeroDivisionError:
handle_error_metric(self, "IpFLOP zero division")
desc = """
Instructions per Floating Point (FP) Operation (lower number
means higher occurrence rate). Reference: Tuning Performance
via Metrics with Expectations.
https://doi.org/10.1109/LCA.2019.2916408"""
class Metric_IpArith:
name = "IpArith"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Flops', 'InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpArith(self, EV, 0)
self.thresh = (self.val < 10)
except ZeroDivisionError:
handle_error_metric(self, "IpArith zero division")
desc = """
Instructions per FP Arithmetic instruction (lower number
means higher occurrence rate). Values < 1 are possible due
to intentional FMA double counting. Approximated prior to
BDW."""
class Metric_IpArith_Scalar_SP:
name = "IpArith_Scalar_SP"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Flops', 'FpScalar', 'InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpArith_Scalar_SP(self, EV, 0)
self.thresh = (self.val < 10)
except ZeroDivisionError:
handle_error_metric(self, "IpArith_Scalar_SP zero division")
desc = """
Instructions per FP Arithmetic Scalar Single-Precision
instruction (lower number means higher occurrence rate).
Values < 1 are possible due to intentional FMA double
counting."""
class Metric_IpArith_Scalar_DP:
name = "IpArith_Scalar_DP"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Flops', 'FpScalar', 'InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpArith_Scalar_DP(self, EV, 0)
self.thresh = (self.val < 10)
except ZeroDivisionError:
handle_error_metric(self, "IpArith_Scalar_DP zero division")
desc = """
Instructions per FP Arithmetic Scalar Double-Precision
instruction (lower number means higher occurrence rate).
Values < 1 are possible due to intentional FMA double
counting."""
class Metric_IpArith_AVX128:
name = "IpArith_AVX128"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Flops', 'FpVector', 'InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpArith_AVX128(self, EV, 0)
self.thresh = (self.val < 10)
except ZeroDivisionError:
handle_error_metric(self, "IpArith_AVX128 zero division")
desc = """
Instructions per FP Arithmetic AVX/SSE 128-bit instruction
(lower number means higher occurrence rate). Values < 1 are
possible due to intentional FMA double counting."""
class Metric_IpArith_AVX256:
name = "IpArith_AVX256"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Flops', 'FpVector', 'InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpArith_AVX256(self, EV, 0)
self.thresh = (self.val < 10)
except ZeroDivisionError:
handle_error_metric(self, "IpArith_AVX256 zero division")
desc = """
Instructions per FP Arithmetic AVX* 256-bit instruction
(lower number means higher occurrence rate). Values < 1 are
possible due to intentional FMA double counting."""
class Metric_Instructions:
name = "Instructions"
domain = "Count"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Summary', 'TmaL1'])
sibling = None
def compute(self, EV):
try:
self.val = Instructions(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Instructions zero division")
desc = """
Total number of retired Instructions"""
class Metric_Retire:
name = "Retire"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Pipeline"
metricgroup = frozenset(['Pipeline', 'Ret'])
sibling = None
def compute(self, EV):
try:
self.val = Retire(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Retire zero division")
desc = """
Average number of Uops retired in cycles where at least one
uop has retired."""
class Metric_Execute:
name = "Execute"
domain = "Metric"
maxval = Exe_Ports
errcount = 0
area = "Info.Pipeline"
metricgroup = frozenset(['Cor', 'Pipeline', 'PortsUtil', 'SMT'])
sibling = None
def compute(self, EV):
try:
self.val = Execute(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Execute zero division")
desc = """
"""
class Metric_DSB_Coverage:
name = "DSB_Coverage"
domain = "Metric"
maxval = 1.0
errcount = 0
area = "Info.Frontend"
metricgroup = frozenset(['DSB', 'Fed', 'FetchBW'])
sibling = None
def compute(self, EV):
try:
self.val = DSB_Coverage(self, EV, 0)
self.thresh = (self.val < 0.7) and HighIPC(self, EV, 1)
except ZeroDivisionError:
handle_error_metric(self, "DSB_Coverage zero division")
desc = """
Fraction of Uops delivered by the DSB (aka Decoded ICache;
or Uop Cache). See section 'Decoded ICache' in Optimization
Manual. http://www.intel.com/content/www/us/en/architecture-
and-technology/64-ia-32-architectures-optimization-
manual.html"""
class Metric_IpUnknown_Branch:
name = "IpUnknown_Branch"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Frontend"
metricgroup = frozenset(['Fed'])
sibling = None
def compute(self, EV):
try:
self.val = IpUnknown_Branch(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "IpUnknown_Branch zero division")
desc = """
Instructions per speculative Unknown Branch Misprediction
(BAClear) (lower number means higher occurrence rate)"""
class Metric_IpMispredict:
name = "IpMispredict"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Bad_Spec"
metricgroup = frozenset(['Bad', 'BadSpec', 'BrMispredicts'])
sibling = None
def compute(self, EV):
try:
self.val = IpMispredict(self, EV, 0)
self.thresh = (self.val < 200)
except ZeroDivisionError:
handle_error_metric(self, "IpMispredict zero division")
desc = """
Number of Instructions per non-speculative Branch
Misprediction (JEClear) (lower number means higher
occurrence rate)"""
class Metric_IpMisp_Indirect:
name = "IpMisp_Indirect"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Bad_Spec"
metricgroup = frozenset(['Bad', 'BrMispredicts'])
sibling = None
def compute(self, EV):
try:
self.val = IpMisp_Indirect(self, EV, 0)
self.thresh = (self.val < 1000)
except ZeroDivisionError:
handle_error_metric(self, "IpMisp_Indirect zero division")
desc = """
Instructions per retired Mispredicts for indirect CALL or
JMP branches (lower number means higher occurrence rate)."""
class Metric_Load_Miss_Real_Latency:
name = "Load_Miss_Real_Latency"
domain = "Clocks_Latency"
maxval = 1000
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['Mem', 'MemoryBound', 'MemoryLat'])
sibling = None
def compute(self, EV):
try:
self.val = Load_Miss_Real_Latency(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Load_Miss_Real_Latency zero division")
desc = """
Actual Average Latency for L1 data-cache miss demand load
operations (in core cycles)"""
class Metric_MLP:
name = "MLP"
domain = "Metric"
maxval = 10.0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['Mem', 'MemoryBound', 'MemoryBW'])
sibling = None
def compute(self, EV):
try:
self.val = MLP(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "MLP zero division")
desc = """
Memory-Level-Parallelism (average number of L1 miss demand
load when there is at least one such miss. Per-Logical
Processor)"""
class Metric_L1MPKI:
name = "L1MPKI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['CacheHits', 'Mem'])
sibling = None
def compute(self, EV):
try:
self.val = L1MPKI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L1MPKI zero division")
desc = """
L1 cache true misses per kilo instruction for retired demand
loads"""
class Metric_L2MPKI:
name = "L2MPKI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['Mem', 'Backend', 'CacheHits'])
sibling = None
def compute(self, EV):
try:
self.val = L2MPKI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2MPKI zero division")
desc = """
L2 cache true misses per kilo instruction for retired demand
loads"""
class Metric_L2MPKI_All:
name = "L2MPKI_All"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['CacheHits', 'Mem', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = L2MPKI_All(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2MPKI_All zero division")
desc = """
L2 cache misses per kilo instruction for all request types
(including speculative)"""
class Metric_L2MPKI_Load:
name = "L2MPKI_Load"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['CacheHits', 'Mem'])
sibling = None
def compute(self, EV):
try:
self.val = L2MPKI_Load(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2MPKI_Load zero division")
desc = """
L2 cache misses per kilo instruction for all demand loads
(including speculative)"""
class Metric_L2MPKI_RFO:
name = "L2MPKI_RFO"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['CacheMisses', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = L2MPKI_RFO(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2MPKI_RFO zero division")
desc = """
Offcore requests (L2 cache miss) per kilo instruction for
demand RFOs"""
class Metric_L2HPKI_All:
name = "L2HPKI_All"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['CacheHits', 'Mem'])
sibling = None
def compute(self, EV):
try:
self.val = L2HPKI_All(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2HPKI_All zero division")
desc = """
L2 cache hits per kilo instruction for all request types
(including speculative)"""
class Metric_L2HPKI_Load:
name = "L2HPKI_Load"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['CacheHits', 'Mem'])
sibling = None
def compute(self, EV):
try:
self.val = L2HPKI_Load(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2HPKI_Load zero division")
desc = """
L2 cache hits per kilo instruction for all demand loads
(including speculative)"""
class Metric_L3MPKI:
name = "L3MPKI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['Mem'])
sibling = None
def compute(self, EV):
try:
self.val = L3MPKI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L3MPKI zero division")
desc = """
L3 cache true misses per kilo instruction for retired demand
loads"""
class Metric_L1D_Cache_Fill_BW:
name = "L1D_Cache_Fill_BW"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['Mem', 'MemoryBW'])
sibling = None
def compute(self, EV):
try:
self.val = L1D_Cache_Fill_BW(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L1D_Cache_Fill_BW zero division")
desc = """
"""
class Metric_L2_Cache_Fill_BW:
name = "L2_Cache_Fill_BW"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['Mem', 'MemoryBW'])
sibling = None
def compute(self, EV):
try:
self.val = L2_Cache_Fill_BW(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2_Cache_Fill_BW zero division")
desc = """
"""
class Metric_L3_Cache_Fill_BW:
name = "L3_Cache_Fill_BW"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['Mem', 'MemoryBW'])
sibling = None
def compute(self, EV):
try:
self.val = L3_Cache_Fill_BW(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L3_Cache_Fill_BW zero division")
desc = """
"""
class Metric_Page_Walks_Utilization:
name = "Page_Walks_Utilization"
domain = "Core_Metric"
maxval = 1.0
errcount = 0
area = "Info.Memory.TLB"
metricgroup = frozenset(['Mem', 'MemoryTLB'])
sibling = None
def compute(self, EV):
try:
self.val = Page_Walks_Utilization(self, EV, 0)
self.thresh = (self.val > 0.5)
except ZeroDivisionError:
handle_error_metric(self, "Page_Walks_Utilization zero division")
desc = """
Utilization of the core's Page Walker(s) serving STLB misses
triggered by instruction/Load/Store accesses"""
class Metric_L1D_Cache_Fill_BW_2T:
name = "L1D_Cache_Fill_BW_2T"
domain = "Core_Metric"
maxval = 0
errcount = 0
area = "Info.Memory.Core"
metricgroup = frozenset(['Mem', 'MemoryBW'])
sibling = None
def compute(self, EV):
try:
self.val = L1D_Cache_Fill_BW_2T(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L1D_Cache_Fill_BW_2T zero division")
desc = """
Average per-core data fill bandwidth to the L1 data cache
[GB / sec]"""
class Metric_L2_Cache_Fill_BW_2T:
name = "L2_Cache_Fill_BW_2T"
domain = "Core_Metric"
maxval = 0
errcount = 0
area = "Info.Memory.Core"
metricgroup = frozenset(['Mem', 'MemoryBW'])
sibling = None
def compute(self, EV):
try:
self.val = L2_Cache_Fill_BW_2T(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2_Cache_Fill_BW_2T zero division")
desc = """
Average per-core data fill bandwidth to the L2 cache [GB /
sec]"""
class Metric_L3_Cache_Fill_BW_2T:
name = "L3_Cache_Fill_BW_2T"
domain = "Core_Metric"
maxval = 0
errcount = 0
area = "Info.Memory.Core"
metricgroup = frozenset(['Mem', 'MemoryBW'])
sibling = None
def compute(self, EV):
try:
self.val = L3_Cache_Fill_BW_2T(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L3_Cache_Fill_BW_2T zero division")
desc = """
Average per-core data fill bandwidth to the L3 cache [GB /
sec]"""
class Metric_Load_L2_Miss_Latency:
name = "Load_L2_Miss_Latency"
domain = "Clocks_Latency"
maxval = 1000
errcount = 0
area = "Info.Memory.Latency"
metricgroup = frozenset(['Memory_Lat', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = Load_L2_Miss_Latency(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Load_L2_Miss_Latency zero division")
desc = """
Average Latency for L2 cache miss demand Loads"""
class Metric_Load_L2_MLP:
name = "Load_L2_MLP"
domain = "Metric"
maxval = 100
errcount = 0
area = "Info.Memory.Latency"
metricgroup = frozenset(['Memory_BW', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = Load_L2_MLP(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Load_L2_MLP zero division")
desc = """
Average Parallel L2 cache miss demand Loads"""
class Metric_Data_L2_MLP:
name = "Data_L2_MLP"
domain = "Metric"
maxval = 100
errcount = 0
area = "Info.Memory.Latency"
metricgroup = frozenset(['Memory_BW', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = Data_L2_MLP(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Data_L2_MLP zero division")
desc = """
Average Parallel L2 cache miss data reads"""
class Metric_CPU_Utilization:
name = "CPU_Utilization"
domain = "Metric"
maxval = 1
errcount = 0
area = "Info.System"
metricgroup = frozenset(['HPC', 'Summary'])
sibling = None
def compute(self, EV):
try:
self.val = CPU_Utilization(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "CPU_Utilization zero division")
desc = """
Average CPU Utilization (percentage)"""
class Metric_CPUs_Utilized:
name = "CPUs_Utilized"
domain = "Metric"
maxval = 300
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Summary'])
sibling = None
def compute(self, EV):
try:
self.val = CPUs_Utilized(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "CPUs_Utilized zero division")
desc = """
Average number of utilized CPUs"""
class Metric_Core_Frequency:
name = "Core_Frequency"
domain = "SystemMetric"
maxval = 0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Summary', 'Power'])
sibling = None
def compute(self, EV):
try:
self.val = Core_Frequency(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Core_Frequency zero division")
desc = """
Measured Average Core Frequency for unhalted processors
[GHz]"""
class Metric_Uncore_Frequency:
name = "Uncore_Frequency"
domain = "SystemMetric"
maxval = 0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['SoC'])
sibling = None
def compute(self, EV):
try:
self.val = Uncore_Frequency(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Uncore_Frequency zero division")
desc = """
Measured Average Uncore Frequency for the SoC [GHz]"""
class Metric_GFLOPs:
name = "GFLOPs"
domain = "Metric"
maxval = 200
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Cor', 'Flops', 'HPC'])
sibling = None
def compute(self, EV):
try:
self.val = GFLOPs(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "GFLOPs zero division")
desc = """
Giga Floating Point Operations Per Second. Aggregate across
all supported options of: FP precisions, scalar and vector
instructions, vector-width"""
class Metric_Turbo_Utilization:
name = "Turbo_Utilization"
domain = "Core_Metric"
maxval = 10.0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Power'])
sibling = None
def compute(self, EV):
try:
self.val = Turbo_Utilization(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Turbo_Utilization zero division")
desc = """
Average Frequency Utilization relative nominal frequency"""
class Metric_SMT_2T_Utilization:
name = "SMT_2T_Utilization"
domain = "Core_Metric"
maxval = 1.0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['SMT'])
sibling = None
def compute(self, EV):
try:
self.val = SMT_2T_Utilization(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "SMT_2T_Utilization zero division")
desc = """
Fraction of cycles where both hardware Logical Processors
were active"""
class Metric_Kernel_Utilization:
name = "Kernel_Utilization"
domain = "Metric"
maxval = 1.0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['OS'])
sibling = None
def compute(self, EV):
try:
self.val = Kernel_Utilization(self, EV, 0)
self.thresh = (self.val > 0.05)
except ZeroDivisionError:
handle_error_metric(self, "Kernel_Utilization zero division")
desc = """
Fraction of cycles spent in the Operating System (OS) Kernel
mode"""
class Metric_Kernel_CPI:
name = "Kernel_CPI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['OS'])
sibling = None
def compute(self, EV):
try:
self.val = Kernel_CPI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Kernel_CPI zero division")
desc = """
Cycles Per Instruction for the Operating System (OS) Kernel
mode"""
class Metric_DRAM_BW_Use:
name = "DRAM_BW_Use"
domain = "GB/sec"
maxval = 200
errcount = 0
area = "Info.System"
metricgroup = frozenset(['HPC', 'MemOffcore', 'MemoryBW', 'SoC'])
sibling = None
def compute(self, EV):
try:
self.val = DRAM_BW_Use(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "DRAM_BW_Use zero division")
desc = """
Average external Memory Bandwidth Use for reads and writes
[GB / sec]"""
class Metric_MEM_Read_Latency:
name = "MEM_Read_Latency"
domain = "NanoSeconds"
maxval = 1000
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Mem', 'MemoryLat', 'SoC'])
sibling = None
def compute(self, EV):
try:
self.val = MEM_Read_Latency(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "MEM_Read_Latency zero division")
desc = """
Average latency of data read request to external memory (in
nanoseconds). Accounts for demand loads and L1/L2
prefetches."""
class Metric_MEM_Parallel_Reads:
name = "MEM_Parallel_Reads"
domain = "SystemMetric"
maxval = 100
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Mem', 'MemoryBW', 'SoC'])
sibling = None
def compute(self, EV):
try:
self.val = MEM_Parallel_Reads(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "MEM_Parallel_Reads zero division")
desc = """
Average number of parallel data read requests to external
memory. Accounts for demand loads and L1/L2 prefetches"""
class Metric_Time:
name = "Time"
domain = "Seconds"
maxval = 0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Summary'])
sibling = None
def compute(self, EV):
try:
self.val = Time(self, EV, 0)
self.thresh = (self.val < 1)
except ZeroDivisionError:
handle_error_metric(self, "Time zero division")
desc = """
Run duration time in seconds"""
class Metric_Socket_CLKS:
name = "Socket_CLKS"
domain = "Count"
maxval = 0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['SoC'])
sibling = None
def compute(self, EV):
try:
self.val = Socket_CLKS(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Socket_CLKS zero division")
desc = """
Socket actual clocks when any core is active on that socket"""
class Metric_IpFarBranch:
name = "IpFarBranch"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Branches', 'OS'])
sibling = None
def compute(self, EV):
try:
self.val = IpFarBranch(self, EV, 0)
self.thresh = (self.val < 1000000)
except ZeroDivisionError:
handle_error_metric(self, "IpFarBranch zero division")
desc = """
Instructions per Far Branch ( Far Branches apply upon
transition from application to operating system, handling
interrupts, exceptions) [lower number means higher
occurrence rate]"""
# Schedule
class Setup:
def __init__(self, r):
o = dict()
n = Frontend_Bound() ; r.run(n) ; o["Frontend_Bound"] = n
n = Fetch_Latency() ; r.run(n) ; o["Fetch_Latency"] = n
n = ICache_Misses() ; r.run(n) ; o["ICache_Misses"] = n
n = ITLB_Misses() ; r.run(n) ; o["ITLB_Misses"] = n
n = Branch_Resteers() ; r.run(n) ; o["Branch_Resteers"] = n
n = Mispredicts_Resteers() ; r.run(n) ; o["Mispredicts_Resteers"] = n
n = Clears_Resteers() ; r.run(n) ; o["Clears_Resteers"] = n
n = Unknown_Branches() ; r.run(n) ; o["Unknown_Branches"] = n
n = MS_Switches() ; r.run(n) ; o["MS_Switches"] = n
n = LCP() ; r.run(n) ; o["LCP"] = n
n = DSB_Switches() ; r.run(n) ; o["DSB_Switches"] = n
n = Fetch_Bandwidth() ; r.run(n) ; o["Fetch_Bandwidth"] = n
n = MITE() ; r.run(n) ; o["MITE"] = n
n = DSB() ; r.run(n) ; o["DSB"] = n
n = Bad_Speculation() ; r.run(n) ; o["Bad_Speculation"] = n
n = Branch_Mispredicts() ; r.run(n) ; o["Branch_Mispredicts"] = n
n = Machine_Clears() ; r.run(n) ; o["Machine_Clears"] = n
n = Backend_Bound() ; r.run(n) ; o["Backend_Bound"] = n
n = Memory_Bound() ; r.run(n) ; o["Memory_Bound"] = n
n = L1_Bound() ; r.run(n) ; o["L1_Bound"] = n
n = DTLB_Load() ; r.run(n) ; o["DTLB_Load"] = n
n = Store_Fwd_Blk() ; r.run(n) ; o["Store_Fwd_Blk"] = n
n = Lock_Latency() ; r.run(n) ; o["Lock_Latency"] = n
n = Split_Loads() ; r.run(n) ; o["Split_Loads"] = n
n = G4K_Aliasing() ; r.run(n) ; o["G4K_Aliasing"] = n
n = FB_Full() ; r.run(n) ; o["FB_Full"] = n
n = L2_Bound() ; r.run(n) ; o["L2_Bound"] = n
n = L3_Bound() ; r.run(n) ; o["L3_Bound"] = n
n = Contested_Accesses() ; r.run(n) ; o["Contested_Accesses"] = n
n = Data_Sharing() ; r.run(n) ; o["Data_Sharing"] = n
n = L3_Hit_Latency() ; r.run(n) ; o["L3_Hit_Latency"] = n
n = SQ_Full() ; r.run(n) ; o["SQ_Full"] = n
n = DRAM_Bound() ; r.run(n) ; o["DRAM_Bound"] = n
n = MEM_Bandwidth() ; r.run(n) ; o["MEM_Bandwidth"] = n
n = MEM_Latency() ; r.run(n) ; o["MEM_Latency"] = n
n = Local_MEM() ; r.run(n) ; o["Local_MEM"] = n
n = Remote_MEM() ; r.run(n) ; o["Remote_MEM"] = n
n = Remote_Cache() ; r.run(n) ; o["Remote_Cache"] = n
n = Store_Bound() ; r.run(n) ; o["Store_Bound"] = n
n = Store_Latency() ; r.run(n) ; o["Store_Latency"] = n
n = False_Sharing() ; r.run(n) ; o["False_Sharing"] = n
n = Split_Stores() ; r.run(n) ; o["Split_Stores"] = n
n = DTLB_Store() ; r.run(n) ; o["DTLB_Store"] = n
n = Core_Bound() ; r.run(n) ; o["Core_Bound"] = n
n = Divider() ; r.run(n) ; o["Divider"] = n
n = Ports_Utilization() ; r.run(n) ; o["Ports_Utilization"] = n
n = Ports_Utilized_0() ; r.run(n) ; o["Ports_Utilized_0"] = n
n = Ports_Utilized_1() ; r.run(n) ; o["Ports_Utilized_1"] = n
n = Ports_Utilized_2() ; r.run(n) ; o["Ports_Utilized_2"] = n
n = Ports_Utilized_3m() ; r.run(n) ; o["Ports_Utilized_3m"] = n
n = ALU_Op_Utilization() ; r.run(n) ; o["ALU_Op_Utilization"] = n
n = Port_0() ; r.run(n) ; o["Port_0"] = n
n = Port_1() ; r.run(n) ; o["Port_1"] = n
n = Port_5() ; r.run(n) ; o["Port_5"] = n
n = Port_6() ; r.run(n) ; o["Port_6"] = n
n = Load_Op_Utilization() ; r.run(n) ; o["Load_Op_Utilization"] = n
n = Port_2() ; r.run(n) ; o["Port_2"] = n
n = Port_3() ; r.run(n) ; o["Port_3"] = n
n = Store_Op_Utilization() ; r.run(n) ; o["Store_Op_Utilization"] = n
n = Port_4() ; r.run(n) ; o["Port_4"] = n
n = Port_7() ; r.run(n) ; o["Port_7"] = n
n = Retiring() ; r.run(n) ; o["Retiring"] = n
n = Light_Operations() ; r.run(n) ; o["Light_Operations"] = n
n = FP_Arith() ; r.run(n) ; o["FP_Arith"] = n
n = X87_Use() ; r.run(n) ; o["X87_Use"] = n
n = FP_Scalar() ; r.run(n) ; o["FP_Scalar"] = n
n = FP_Vector() ; r.run(n) ; o["FP_Vector"] = n
n = FP_Vector_128b() ; r.run(n) ; o["FP_Vector_128b"] = n
n = FP_Vector_256b() ; r.run(n) ; o["FP_Vector_256b"] = n
n = Heavy_Operations() ; r.run(n) ; o["Heavy_Operations"] = n
n = Microcode_Sequencer() ; r.run(n) ; o["Microcode_Sequencer"] = n
n = Assists() ; r.run(n) ; o["Assists"] = n
n = CISC() ; r.run(n) ; o["CISC"] = n
# parents
o["Fetch_Latency"].parent = o["Frontend_Bound"]
o["ICache_Misses"].parent = o["Fetch_Latency"]
o["ITLB_Misses"].parent = o["Fetch_Latency"]
o["Branch_Resteers"].parent = o["Fetch_Latency"]
o["Mispredicts_Resteers"].parent = o["Branch_Resteers"]
o["Clears_Resteers"].parent = o["Branch_Resteers"]
o["Unknown_Branches"].parent = o["Branch_Resteers"]
o["MS_Switches"].parent = o["Fetch_Latency"]
o["LCP"].parent = o["Fetch_Latency"]
o["DSB_Switches"].parent = o["Fetch_Latency"]
o["Fetch_Bandwidth"].parent = o["Frontend_Bound"]
o["MITE"].parent = o["Fetch_Bandwidth"]
o["DSB"].parent = o["Fetch_Bandwidth"]
o["Branch_Mispredicts"].parent = o["Bad_Speculation"]
o["Machine_Clears"].parent = o["Bad_Speculation"]
o["Memory_Bound"].parent = o["Backend_Bound"]
o["L1_Bound"].parent = o["Memory_Bound"]
o["DTLB_Load"].parent = o["L1_Bound"]
o["Store_Fwd_Blk"].parent = o["L1_Bound"]
o["Lock_Latency"].parent = o["L1_Bound"]
o["Split_Loads"].parent = o["L1_Bound"]
o["G4K_Aliasing"].parent = o["L1_Bound"]
o["FB_Full"].parent = o["L1_Bound"]
o["L2_Bound"].parent = o["Memory_Bound"]
o["L3_Bound"].parent = o["Memory_Bound"]
o["Contested_Accesses"].parent = o["L3_Bound"]
o["Data_Sharing"].parent = o["L3_Bound"]
o["L3_Hit_Latency"].parent = o["L3_Bound"]
o["SQ_Full"].parent = o["L3_Bound"]
o["DRAM_Bound"].parent = o["Memory_Bound"]
o["MEM_Bandwidth"].parent = o["DRAM_Bound"]
o["MEM_Latency"].parent = o["DRAM_Bound"]
o["Local_MEM"].parent = o["MEM_Latency"]
o["Remote_MEM"].parent = o["MEM_Latency"]
o["Remote_Cache"].parent = o["MEM_Latency"]
o["Store_Bound"].parent = o["Memory_Bound"]
o["Store_Latency"].parent = o["Store_Bound"]
o["False_Sharing"].parent = o["Store_Bound"]
o["Split_Stores"].parent = o["Store_Bound"]
o["DTLB_Store"].parent = o["Store_Bound"]
o["Core_Bound"].parent = o["Backend_Bound"]
o["Divider"].parent = o["Core_Bound"]
o["Ports_Utilization"].parent = o["Core_Bound"]
o["Ports_Utilized_0"].parent = o["Ports_Utilization"]
o["Ports_Utilized_1"].parent = o["Ports_Utilization"]
o["Ports_Utilized_2"].parent = o["Ports_Utilization"]
o["Ports_Utilized_3m"].parent = o["Ports_Utilization"]
o["ALU_Op_Utilization"].parent = o["Ports_Utilized_3m"]
o["Port_0"].parent = o["ALU_Op_Utilization"]
o["Port_1"].parent = o["ALU_Op_Utilization"]
o["Port_5"].parent = o["ALU_Op_Utilization"]
o["Port_6"].parent = o["ALU_Op_Utilization"]
o["Load_Op_Utilization"].parent = o["Ports_Utilized_3m"]
o["Port_2"].parent = o["Load_Op_Utilization"]
o["Port_3"].parent = o["Load_Op_Utilization"]
o["Store_Op_Utilization"].parent = o["Ports_Utilized_3m"]
o["Port_4"].parent = o["Store_Op_Utilization"]
o["Port_7"].parent = o["Store_Op_Utilization"]
o["Light_Operations"].parent = o["Retiring"]
o["FP_Arith"].parent = o["Light_Operations"]
o["X87_Use"].parent = o["FP_Arith"]
o["FP_Scalar"].parent = o["FP_Arith"]
o["FP_Vector"].parent = o["FP_Arith"]
o["FP_Vector_128b"].parent = o["FP_Vector"]
o["FP_Vector_256b"].parent = o["FP_Vector"]
o["Heavy_Operations"].parent = o["Retiring"]
o["Microcode_Sequencer"].parent = o["Heavy_Operations"]
o["Assists"].parent = o["Microcode_Sequencer"]
o["CISC"].parent = o["Microcode_Sequencer"]
# user visible metrics
n = Metric_IPC() ; r.metric(n) ; o["IPC"] = n
n = Metric_UopPI() ; r.metric(n) ; o["UopPI"] = n
n = Metric_UpTB() ; r.metric(n) ; o["UpTB"] = n
n = Metric_CPI() ; r.metric(n) ; o["CPI"] = n
n = Metric_CLKS() ; r.metric(n) ; o["CLKS"] = n
n = Metric_SLOTS() ; r.metric(n) ; o["SLOTS"] = n
n = Metric_Execute_per_Issue() ; r.metric(n) ; o["Execute_per_Issue"] = n
n = Metric_CoreIPC() ; r.metric(n) ; o["CoreIPC"] = n
n = Metric_FLOPc() ; r.metric(n) ; o["FLOPc"] = n
n = Metric_FP_Arith_Utilization() ; r.metric(n) ; o["FP_Arith_Utilization"] = n
n = Metric_ILP() ; r.metric(n) ; o["ILP"] = n
n = Metric_CORE_CLKS() ; r.metric(n) ; o["CORE_CLKS"] = n
n = Metric_IpLoad() ; r.metric(n) ; o["IpLoad"] = n
n = Metric_IpStore() ; r.metric(n) ; o["IpStore"] = n
n = Metric_IpBranch() ; r.metric(n) ; o["IpBranch"] = n
n = Metric_IpCall() ; r.metric(n) ; o["IpCall"] = n
n = Metric_IpTB() ; r.metric(n) ; o["IpTB"] = n
n = Metric_BpTkBranch() ; r.metric(n) ; o["BpTkBranch"] = n
n = Metric_IpFLOP() ; r.metric(n) ; o["IpFLOP"] = n
n = Metric_IpArith() ; r.metric(n) ; o["IpArith"] = n
n = Metric_IpArith_Scalar_SP() ; r.metric(n) ; o["IpArith_Scalar_SP"] = n
n = Metric_IpArith_Scalar_DP() ; r.metric(n) ; o["IpArith_Scalar_DP"] = n
n = Metric_IpArith_AVX128() ; r.metric(n) ; o["IpArith_AVX128"] = n
n = Metric_IpArith_AVX256() ; r.metric(n) ; o["IpArith_AVX256"] = n
n = Metric_Instructions() ; r.metric(n) ; o["Instructions"] = n
n = Metric_Retire() ; r.metric(n) ; o["Retire"] = n
n = Metric_Execute() ; r.metric(n) ; o["Execute"] = n
n = Metric_DSB_Coverage() ; r.metric(n) ; o["DSB_Coverage"] = n
n = Metric_IpUnknown_Branch() ; r.metric(n) ; o["IpUnknown_Branch"] = n
n = Metric_IpMispredict() ; r.metric(n) ; o["IpMispredict"] = n
n = Metric_IpMisp_Indirect() ; r.metric(n) ; o["IpMisp_Indirect"] = n
n = Metric_Load_Miss_Real_Latency() ; r.metric(n) ; o["Load_Miss_Real_Latency"] = n
n = Metric_MLP() ; r.metric(n) ; o["MLP"] = n
n = Metric_L1MPKI() ; r.metric(n) ; o["L1MPKI"] = n
n = Metric_L2MPKI() ; r.metric(n) ; o["L2MPKI"] = n
n = Metric_L2MPKI_All() ; r.metric(n) ; o["L2MPKI_All"] = n
n = Metric_L2MPKI_Load() ; r.metric(n) ; o["L2MPKI_Load"] = n
n = Metric_L2MPKI_RFO() ; r.metric(n) ; o["L2MPKI_RFO"] = n
n = Metric_L2HPKI_All() ; r.metric(n) ; o["L2HPKI_All"] = n
n = Metric_L2HPKI_Load() ; r.metric(n) ; o["L2HPKI_Load"] = n
n = Metric_L3MPKI() ; r.metric(n) ; o["L3MPKI"] = n
n = Metric_L1D_Cache_Fill_BW() ; r.metric(n) ; o["L1D_Cache_Fill_BW"] = n
n = Metric_L2_Cache_Fill_BW() ; r.metric(n) ; o["L2_Cache_Fill_BW"] = n
n = Metric_L3_Cache_Fill_BW() ; r.metric(n) ; o["L3_Cache_Fill_BW"] = n
n = Metric_Page_Walks_Utilization() ; r.metric(n) ; o["Page_Walks_Utilization"] = n
n = Metric_L1D_Cache_Fill_BW_2T() ; r.metric(n) ; o["L1D_Cache_Fill_BW_2T"] = n
n = Metric_L2_Cache_Fill_BW_2T() ; r.metric(n) ; o["L2_Cache_Fill_BW_2T"] = n
n = Metric_L3_Cache_Fill_BW_2T() ; r.metric(n) ; o["L3_Cache_Fill_BW_2T"] = n
n = Metric_Load_L2_Miss_Latency() ; r.metric(n) ; o["Load_L2_Miss_Latency"] = n
n = Metric_Load_L2_MLP() ; r.metric(n) ; o["Load_L2_MLP"] = n
n = Metric_Data_L2_MLP() ; r.metric(n) ; o["Data_L2_MLP"] = n
n = Metric_CPU_Utilization() ; r.metric(n) ; o["CPU_Utilization"] = n
n = Metric_CPUs_Utilized() ; r.metric(n) ; o["CPUs_Utilized"] = n
n = Metric_Core_Frequency() ; r.metric(n) ; o["Core_Frequency"] = n
n = Metric_Uncore_Frequency() ; r.metric(n) ; o["Uncore_Frequency"] = n
n = Metric_GFLOPs() ; r.metric(n) ; o["GFLOPs"] = n
n = Metric_Turbo_Utilization() ; r.metric(n) ; o["Turbo_Utilization"] = n
n = Metric_SMT_2T_Utilization() ; r.metric(n) ; o["SMT_2T_Utilization"] = n
n = Metric_Kernel_Utilization() ; r.metric(n) ; o["Kernel_Utilization"] = n
n = Metric_Kernel_CPI() ; r.metric(n) ; o["Kernel_CPI"] = n
n = Metric_DRAM_BW_Use() ; r.metric(n) ; o["DRAM_BW_Use"] = n
n = Metric_MEM_Read_Latency() ; r.metric(n) ; o["MEM_Read_Latency"] = n
n = Metric_MEM_Parallel_Reads() ; r.metric(n) ; o["MEM_Parallel_Reads"] = n
n = Metric_Time() ; r.metric(n) ; o["Time"] = n
n = Metric_Socket_CLKS() ; r.metric(n) ; o["Socket_CLKS"] = n
n = Metric_IpFarBranch() ; r.metric(n) ; o["IpFarBranch"] = n
# references between groups
o["Mispredicts_Resteers"].Branch_Resteers = o["Branch_Resteers"]
o["Clears_Resteers"].Branch_Resteers = o["Branch_Resteers"]
o["Unknown_Branches"].Clears_Resteers = o["Clears_Resteers"]
o["Unknown_Branches"].Branch_Resteers = o["Branch_Resteers"]
o["Unknown_Branches"].Mispredicts_Resteers = o["Mispredicts_Resteers"]
o["Fetch_Bandwidth"].Frontend_Bound = o["Frontend_Bound"]
o["Fetch_Bandwidth"].Fetch_Latency = o["Fetch_Latency"]
o["Branch_Mispredicts"].Bad_Speculation = o["Bad_Speculation"]
o["Machine_Clears"].Bad_Speculation = o["Bad_Speculation"]
o["Machine_Clears"].Branch_Mispredicts = o["Branch_Mispredicts"]
o["Backend_Bound"].Retiring = o["Retiring"]
o["Backend_Bound"].Bad_Speculation = o["Bad_Speculation"]
o["Backend_Bound"].Frontend_Bound = o["Frontend_Bound"]
o["Memory_Bound"].Retiring = o["Retiring"]
o["Memory_Bound"].Bad_Speculation = o["Bad_Speculation"]
o["Memory_Bound"].Frontend_Bound = o["Frontend_Bound"]
o["Memory_Bound"].Backend_Bound = o["Backend_Bound"]
o["Memory_Bound"].Fetch_Latency = o["Fetch_Latency"]
o["MEM_Latency"].MEM_Bandwidth = o["MEM_Bandwidth"]
o["Core_Bound"].Retiring = o["Retiring"]
o["Core_Bound"].Frontend_Bound = o["Frontend_Bound"]
o["Core_Bound"].Memory_Bound = o["Memory_Bound"]
o["Core_Bound"].Backend_Bound = o["Backend_Bound"]
o["Core_Bound"].Bad_Speculation = o["Bad_Speculation"]
o["Core_Bound"].Fetch_Latency = o["Fetch_Latency"]
o["Ports_Utilization"].Fetch_Latency = o["Fetch_Latency"]
o["Ports_Utilized_0"].Fetch_Latency = o["Fetch_Latency"]
o["Retiring"].Heavy_Operations = o["Heavy_Operations"]
o["Light_Operations"].Retiring = o["Retiring"]
o["Light_Operations"].Heavy_Operations = o["Heavy_Operations"]
o["Light_Operations"].Microcode_Sequencer = o["Microcode_Sequencer"]
o["FP_Arith"].FP_Scalar = o["FP_Scalar"]
o["FP_Arith"].X87_Use = o["X87_Use"]
o["FP_Arith"].FP_Vector = o["FP_Vector"]
o["Heavy_Operations"].Microcode_Sequencer = o["Microcode_Sequencer"]
o["CISC"].Microcode_Sequencer = o["Microcode_Sequencer"]
o["CISC"].Assists = o["Assists"]
# siblings cross-tree
o["Mispredicts_Resteers"].sibling = (o["Branch_Mispredicts"],)
o["Clears_Resteers"].sibling = (o["MS_Switches"], o["Machine_Clears"], o["L1_Bound"], o["Microcode_Sequencer"],)
o["MS_Switches"].sibling = (o["Clears_Resteers"], o["Machine_Clears"], o["L1_Bound"], o["Microcode_Sequencer"],)
o["LCP"].sibling = (o["DSB_Switches"], o["Fetch_Bandwidth"],)
o["DSB_Switches"].sibling = (o["LCP"], o["Fetch_Bandwidth"],)
o["Fetch_Bandwidth"].sibling = (o["LCP"], o["DSB_Switches"],)
o["Branch_Mispredicts"].sibling = (o["Mispredicts_Resteers"],)
o["Machine_Clears"].sibling = (o["Clears_Resteers"], o["MS_Switches"], o["L1_Bound"], o["Contested_Accesses"], o["Data_Sharing"], o["Remote_Cache"], o["False_Sharing"], o["Microcode_Sequencer"],)
o["L1_Bound"].sibling = (o["Clears_Resteers"], o["MS_Switches"], o["Machine_Clears"], o["Ports_Utilized_1"], o["Microcode_Sequencer"],)
o["DTLB_Load"].sibling = (o["DTLB_Store"],)
o["Lock_Latency"].sibling = (o["Store_Latency"],)
o["FB_Full"].sibling = (o["SQ_Full"], o["MEM_Bandwidth"], o["Store_Latency"],)
o["Contested_Accesses"].sibling = (o["Machine_Clears"], o["Data_Sharing"], o["Remote_Cache"], o["False_Sharing"],)
o["Data_Sharing"].sibling = (o["Machine_Clears"], o["Contested_Accesses"], o["Remote_Cache"], o["False_Sharing"],)
o["L3_Hit_Latency"].sibling = (o["MEM_Latency"],)
o["L3_Hit_Latency"].overlap = True
o["SQ_Full"].sibling = (o["FB_Full"], o["MEM_Bandwidth"],)
o["MEM_Bandwidth"].sibling = (o["FB_Full"], o["SQ_Full"],)
o["MEM_Latency"].sibling = (o["L3_Hit_Latency"],)
o["Remote_Cache"].sibling = (o["Machine_Clears"], o["Contested_Accesses"], o["Data_Sharing"], o["False_Sharing"],)
o["Store_Latency"].sibling = (o["Lock_Latency"], o["FB_Full"],)
o["Store_Latency"].overlap = True
o["False_Sharing"].sibling = (o["Machine_Clears"], o["Contested_Accesses"], o["Data_Sharing"], o["Remote_Cache"],)
o["Split_Stores"].sibling = (o["Port_4"],)
o["DTLB_Store"].sibling = (o["DTLB_Load"],)
o["Ports_Utilized_1"].sibling = (o["L1_Bound"],)
o["Ports_Utilized_2"].sibling = (o["Port_0"], o["Port_1"], o["Port_5"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"],)
o["Port_0"].sibling = (o["Ports_Utilized_2"], o["Port_1"], o["Port_5"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"],)
o["Port_1"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_5"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"],)
o["Port_5"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"],)
o["Port_6"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"],)
o["Port_4"].sibling = (o["Split_Stores"],)
o["FP_Scalar"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"], o["Port_6"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"],)
o["FP_Vector"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"], o["Port_6"], o["FP_Scalar"], o["FP_Vector_128b"], o["FP_Vector_256b"],)
o["FP_Vector_128b"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_256b"],)
o["FP_Vector_256b"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"],)
o["Microcode_Sequencer"].sibling = (o["Clears_Resteers"], o["MS_Switches"], o["Machine_Clears"], o["L1_Bound"],)
o["IpTB"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],)
o["DSB_Coverage"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],)
o["DRAM_BW_Use"].sibling = (o["FB_Full"], o["SQ_Full"], o["MEM_Bandwidth"],)
| 148,580 | Python | .py | 3,656 | 34.950492 | 320 | 0.656843 | andikleen/pmu-tools | 1,984 | 331 | 178 | GPL-2.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,883 | tl-xlsx.py | andikleen_pmu-tools/tl-xlsx.py | #!/usr/bin/env python3
# Copyright (c) 2020, Intel Corporation
# Author: Andi Kleen
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# convert toplev output to xlsx files using xlsxwriter
# toplev.py --all --valcsv xv.log --perf-output xp.log -A -a --split-output --per-socket --global --summary \
# --per-core --per-thread -x, -o x.log -I 1000 sleep 10
# tl_xlsx.py --valcsv xv.log --perf xp.log --socket x-socket.log --global x-global.log --core x-core.log --thread x-thread.log x.xlsx
from __future__ import print_function
import sys
import argparse
import re
import collections
import csv
try:
import xlsxwriter
except ImportError:
sys.exit("Please install xlswriter with 'pip%s install xlsxwriter'" % sys.version[0])
import gen_level
ap = argparse.ArgumentParser(description="Convert toplev CSV files to xlsx")
ap.add_argument('xlsxfile', help="xlsx output name")
ap.add_argument('--socket', type=argparse.FileType('r'), help="toplev socket csv file", metavar="csvfile")
ap.add_argument('--global', type=argparse.FileType('r'), help="toplev global csv file", dest='global_', metavar="csvfile")
ap.add_argument('--core', type=argparse.FileType('r'), help="toplev core csv file", metavar="csvfile")
ap.add_argument('--program', type=argparse.FileType('r'), help="toplev program csv file", metavar="csvfile")
ap.add_argument('--thread', type=argparse.FileType('r'), help="toplev thread csv file", metavar="csvfile")
ap.add_argument('--add', nargs=2, help="toplev thread generic csv file. Specify csvfile and sheet name as two arguments",
metavar="name", action="append")
ap.add_argument('--valcsv', type=argparse.FileType('r'), help="toplev valcsv input file", metavar="csvfile")
ap.add_argument('--perf', type=argparse.FileType('r'), help="toplev perf values csv file")
ap.add_argument('--cpuinfo', type=argparse.FileType('r'), help="cpuinfo file")
ap.add_argument('--chart', help="add sheet with plots of normalized sheet. argument is normalied sheet name",
action="append")
ap.add_argument('--no-summary', help='Do not generate summary charts', action='store_true')
args = ap.parse_args()
workbook = xlsxwriter.Workbook(args.xlsxfile, {'constant_memory': True})
bold = workbook.add_format({'bold': True})
valueformat = workbook.add_format({'num_format': '###,###,###,###,##0.0'})
valueformat_bold = workbook.add_format({'num_format': '###,###,###,###,##0.0',
'bold': True})
#valueformat.set_num_format(1)
def set_columns(worksheet, c, lengths):
for col, j in enumerate(c):
if j == "Value":
j = " " * 18
if j == "Description":
j = "Descr"
lengths[col] = max(len(j) + 5, lengths[col])
worksheet.set_column(col, col, lengths[col])
worksheets = {}
rows = {}
headers = {}
def get_worksheet(name):
if name in worksheets:
return worksheets[name]
worksheet = workbook.add_worksheet(name)
worksheets[name] = worksheet
rows[name] = 1
return worksheet
def to_float(n):
if re.match(r'-?[,.0-9]+$', n):
n = float(n)
return n
def create_sheet(name, infh, delimiter=',', version=None):
lengths = collections.defaultdict(lambda: 0)
worksheet = get_worksheet(name)
cf = csv.reader(infh, delimiter=delimiter)
row = 0
title = {}
titlerow = []
summary = False
for c in cf:
if len(c) > 0 and len(c[0]) > 0 and c[0][0] == "#":
version = c
continue
if row == 0:
title = collections.OrderedDict()
for i, k in enumerate(c):
title[k] = i
titlerow = c
headers[name] = title
if row < 10:
set_columns(worksheet, c, lengths)
if not summary and len(c) > 0 and c[0] == "SUMMARY":
if args.no_summary:
continue
rows[name] = row
sname = name + " summary"
worksheet = get_worksheet(sname)
set_columns(worksheet, c, lengths)
worksheet.write_row(0, 0, titlerow)
headers[sname] = titlerow
row = rows[sname]
summary = True
elif summary and len(c) > 0 and c[0] != "SUMMARY" and c[0][0] != "#":
worksheet = get_worksheet(name)
summary = False
rows[sname] = row
row = rows[name]
c = list(map(to_float, c))
worksheet.write_row(row, 0, c)
isbn = False
if "Bottleneck" in title:
bn = title["Bottleneck"]
if len(c) > bn and c[bn] == "<==":
worksheet.write(row, title["Area"], c[title["Area"]], bold)
worksheet.write(row, title["Value"], c[title["Value"]], bold)
if "CPUs" in title:
worksheet.write(row, title["CPUs"], c[title["CPUs"]], bold)
isbn = True
worksheet.write(row, bn, c[bn], bold)
if "Value" in title and len(c) > title["Value"] and isinstance(c[title["Value"]], float):
worksheet.write_number(row, title["Value"], c[title["Value"]],
valueformat_bold if isbn else valueformat)
elif "0" in title:
num = 0
while num in title:
col = title["%d" % num]
if len(c) > col and re.match(r'[0-9]+', c[col]):
worksheet.write_number(row, col, float(c[col]), valueformat)
num += 1
row += 1
return version
ROW_SCALE = 18
MIN_ROWS = 15
GRAPH_ROWS = 15
def gen_chart(source):
if source not in headers:
print("source %s for chart not found" % source, file=sys.stderr)
return
worksheet = get_worksheet(source + " chart")
charts = collections.OrderedDict()
for n, ind in headers[source].items():
if n == "Timestamp":
continue
ns = n.split()
if len(ns) > 1:
prefix = ns[0] + " "
nn = " ".join(ns[1:])
else:
prefix = ''
nn = n
if gen_level.is_metric(nn):
chart = workbook.add_chart({'type': 'line'})
charts[n] = chart
chart.set_title({'name': n})
else:
key = n[:n.rindex('.')] if '.' in n else prefix
if key not in charts:
charts[key] = workbook.add_chart(
{'type': 'column', 'subtype': 'percent_stacked' })
chart = charts[key]
chart.set_title({
'name': '%s Level %d %s' % (prefix, n.count('.') + 1,
key[key.index(' '):] if ' ' in key else key) })
chart.set_x_axis({'name': 'Timestamp'})
chart.add_series({
'name': [source, 0, ind],
'categories': [source, 1, 0, rows[source], 0],
'values': [source, 1, ind, rows[source], ind]
})
chart.set_size({'width': (rows[source] + MIN_ROWS ) * ROW_SCALE})
chart.set_legend({
'overlay': True,
'layout': { 'x': 0.01, 'y': 0.01, 'width': 0.12, 'height': 0.12 },
'fill': { 'none': True, 'transparency': True }
})
row = 1
for j in charts.keys():
worksheet.insert_chart('A%d' % row, charts[j])
row += GRAPH_ROWS
version = None
if args.global_:
version = create_sheet("global", args.global_, version=version)
if args.socket:
version = create_sheet("socket", args.socket, version=version)
if args.core:
version = create_sheet("core", args.core, version=version)
if args.thread:
version = create_sheet("thread", args.thread, version=version)
if args.program:
version = create_sheet("prog", args.program, version=version)
if args.add:
for fn, name in args.add:
version = create_sheet(name, open(fn), version=version)
if args.chart:
for cname in args.chart:
gen_chart(cname)
if args.valcsv:
create_sheet("event values", args.valcsv)
if args.perf:
create_sheet("raw perf output", args.perf, delimiter=';')
if args.cpuinfo:
create_sheet("cpuinfo", args.cpuinfo, delimiter=':')
if version:
worksheet = workbook.add_worksheet("version")
worksheet.write_row(0, 0, version)
workbook.close()
| 8,633 | Python | .py | 207 | 34.120773 | 133 | 0.597741 | andikleen/pmu-tools | 1,984 | 331 | 178 | GPL-2.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,884 | cputop.py | andikleen_pmu-tools/cputop.py | #!/usr/bin/env python3
# query cpu topology and print all matching cpu numbers
# cputop "query" ["format"]
# query is a python expression, using variables:
# socket, core, thread, type, cpu
# or "offline" to query all offline cpus
# or "atom" or "core" to select core types
# type can be "atom" or "core"
# cpu is the cpu number
# format is a printf format with %d
# %d will be replaced with the cpu number
# format can be offline to offline the cpu or online to online
# Author: Andi Kleen
from __future__ import print_function
import sys
import os
import re
import argparse
import glob
def numfile(fn):
f = open(fn, "r")
v = int(f.read())
f.close()
return v
outstr = ""
def output(p, fmt):
if fmt:
if fmt == "taskset":
global outstr
if outstr:
outstr += ","
else:
outstr += "taskset -c "
outstr += "%d" % p
else:
print(fmt % (p,))
else:
print(p)
ap = argparse.ArgumentParser(description='''
query cpu topology and print all matching cpu numbers
cputop "query" ["format"]
query is a python expression, using variables:
socket, core, thread, type, cpu
type is "core" or "atom" on a hybrid system
cpu is the cpu number
or "offline" to query all offline cpus
format is a printf format with %d
%d will be replaced with the cpu number, or online/offline
to generate online/offline commands, or taskset to generate taskset command line''',
epilog='''
Examples:
print all cores on socket 0
cputop "socket == 0"
print all first threads in each core on socket 0
cputop "thread == 0 and socket == 0"
disable all second threads (disable hyper threading)
cputop "thread == 1" offline
reenable all offlined cpus
cputop offline online
print all online cpus
cputop True ''', formatter_class=argparse.RawTextHelpFormatter)
ap.add_argument('expr', help='python expression with socket/core/thread')
ap.add_argument('fmt', help='Output format string with %%d, or online/offline', nargs='?')
args = ap.parse_args()
if args.expr == "atom":
args.expr = 'type == "atom"'
if args.expr == "core":
args.expr = 'type == "core"'
special = {
"offline": "echo 0 > /sys/devices/system/cpu/cpu%d/online",
"online": "echo 1 > /sys/devices/system/cpu/cpu%d/online",
}
if args.fmt in special:
args.fmt = special[args.fmt]
types = dict()
for fn in glob.glob("/sys/devices/cpu_*/cpus"):
typ = os.path.basename(fn.replace("/cpus", "")).replace("cpu_","")
cpus = open(fn).read()
for j in cpus.split(","):
m = re.match(r'(\d+)(-\d+)?', j)
if m is None:
continue
if m.group(2):
for k in range(int(m.group(1)), int(m.group(2)[1:])+1):
types[k] = typ
else:
types[int(m.group(1))] = typ
base = "/sys/devices/system/cpu/"
p = {}
l = os.listdir(base)
for d in l:
m = re.match(r"cpu([0-9]+)", d)
if not m:
continue
proc = int(m.group(1))
top = base + d + "/topology"
if not os.path.exists(top):
if args.expr == "offline":
output(proc, args.fmt)
continue
socket = numfile(top + "/physical_package_id")
core = numfile(top + "/core_id")
n = 0
while (socket, core, n) in p:
n += 1
p[(socket, core, n)] = proc
if args.expr == "offline":
sys.exit(0)
for j in sorted(p.keys()):
socket, core, thread = j
cpu = p[j]
type = "any"
if cpu in types:
type = types[cpu]
if eval(args.expr):
output(p[j], args.fmt)
if outstr:
print(outstr)
| 3,577 | Python | .py | 118 | 25.923729 | 90 | 0.634331 | andikleen/pmu-tools | 1,984 | 331 | 178 | GPL-2.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,885 | spr_server_ratios.py | andikleen_pmu-tools/spr_server_ratios.py | # -*- coding: latin-1 -*-
#
# auto generated TopDown/TMA 4.8-full-perf description for Intel Xeon Scalable Processors 4th gen (code name Sapphire Rapids)
# Please see http://ark.intel.com for more details on these CPUs.
#
# References:
# http://bit.ly/tma-ispass14
# http://halobates.de/blog/p/262
# https://sites.google.com/site/analysismethods/yasin-pubs
# https://download.01.org/perfmon/
# https://github.com/andikleen/pmu-tools/wiki/toplev-manual
#
# Helpers
print_error = lambda msg: False
smt_enabled = False
ebs_mode = False
version = "4.8-full-perf"
base_frequency = -1.0
Memory = 1
Average_Frequency = 0.0
num_cores = 1
num_threads = 1
num_sockets = 1
topdown_use_fixed = False
def handle_error(obj, msg):
print_error(msg)
obj.errcount += 1
obj.val = 0
obj.thresh = False
def handle_error_metric(obj, msg):
print_error(msg)
obj.errcount += 1
obj.val = 0
# Constants
Exe_Ports = 12
Mem_L2_Store_Cost = 10
Mem_STLB_Hit_Cost = 7
MS_Switches_Cost = 3
Avg_Assist_Cost = ( 99 *3 + 63 + 30 ) / 5
Pipeline_Width = 6
OneMillion = 1000000
OneBillion = 1000000000
Energy_Unit = 61
Errata_Whitelist = "SPR121;SPR103"
Memory = 0
PMM_App_Direct = 1 if Memory == 1 else 0
HBM = 1 if Memory > 1 else 0
PERF_METRICS_MSR = 1
FP16 = 1
DS = 1
# Aux. formulas
def Br_DoI_Jumps(self, EV, level):
return EV("BR_INST_RETIRED.NEAR_TAKEN", level) - EV("BR_INST_RETIRED.COND_TAKEN", level) - 2 * EV("BR_INST_RETIRED.NEAR_CALL", level)
def Branching_Retired(self, EV, level):
return (EV("BR_INST_RETIRED.ALL_BRANCHES", level) + 2 * EV("BR_INST_RETIRED.NEAR_CALL", level) + EV("INST_RETIRED.NOP", level)) / SLOTS(self, EV, level)
def Serialize_Core(self, EV, level):
return self.Core_Bound.compute(EV) * (self.Serializing_Operation.compute(EV) + EV("RS.EMPTY:u1", level) / CLKS(self, EV, level) * self.Ports_Utilized_0.compute(EV)) / (self.Serializing_Operation.compute(EV) + self.Ports_Utilization.compute(EV) + self.AMX_Busy.compute(EV) + self.Divider.compute(EV))
def Umisp(self, EV, level):
return 10 * self.Microcode_Sequencer.compute(EV) * self.Other_Mispredicts.compute(EV) / self.Branch_Mispredicts.compute(EV)
def Assist(self, EV, level):
return (self.Microcode_Sequencer.compute(EV) / (self.Microcode_Sequencer.compute(EV) + self.Few_Uops_Instructions.compute(EV))) * (self.Assists.compute(EV) / self.Microcode_Sequencer.compute(EV))
def Assist_Frontend(self, EV, level):
return (1 - EV("INST_RETIRED.REP_ITERATION", level) / EV("UOPS_RETIRED.MS:c1", level)) * (self.Fetch_Latency.compute(EV) * (self.MS_Switches.compute(EV) + self.Branch_Resteers.compute(EV) * (self.Clears_Resteers.compute(EV) + self.Mispredicts_Resteers.compute(EV) * self.Other_Mispredicts.compute(EV) / self.Branch_Mispredicts.compute(EV)) / (self.Clears_Resteers.compute(EV) + self.Unknown_Branches.compute(EV) + self.Mispredicts_Resteers.compute(EV))) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV)))
def Assist_Retired(self, EV, level):
return Assist(self, EV, level) * self.Heavy_Operations.compute(EV)
def Core_Bound_Cycles(self, EV, level):
return self.Ports_Utilized_0.compute(EV) * CLKS(self, EV, level) + Few_Uops_Executed_Threshold(self, EV, level)
def DurationTimeInSeconds(self, EV, level):
return EV("interval-ms", 0) / 1000
def Execute_Cycles(self, EV, level):
return (EV("UOPS_EXECUTED.CORE_CYCLES_GE_1", level) / 2) if smt_enabled else EV("UOPS_EXECUTED.THREAD:c1", level)
# factor used for metrics associating fixed costs for FB Hits - according to probability theory if all FB Hits come at a random rate in original L1_Miss cost interval then the average cost for each one is 0.5 of the fixed cost
def FB_Factor(self, EV, level):
return 1 + FBHit_per_L1Miss(self, EV, level) / 2
def FBHit_per_L1Miss(self, EV, level):
return EV("MEM_LOAD_RETIRED.FB_HIT", level) / EV("MEM_LOAD_RETIRED.L1_MISS", level)
def Fetched_Uops(self, EV, level):
return EV("UOPS_ISSUED.ANY", level)
def Few_Uops_Executed_Threshold(self, EV, level):
return EV("EXE_ACTIVITY.1_PORTS_UTIL", level) + self.Retiring.compute(EV) * EV("EXE_ACTIVITY.2_PORTS_UTIL:u0xc", level)
# Floating Point computational (arithmetic) Operations Count
def FLOP_Count(self, EV, level):
EV("FP_ARITH_INST_RETIRED2.128B_PACKED_HALF", level)
EV("FP_ARITH_INST_RETIRED.SCALAR", level)
EV("FP_ARITH_INST_RETIRED2.256B_PACKED_HALF", level)
EV("FP_ARITH_INST_RETIRED2.SCALAR_HALF", level)
EV("FP_ARITH_INST_RETIRED.8_FLOPS", level)
EV("FP_ARITH_INST_RETIRED2.COMPLEX_SCALAR_HALF", level)
EV("FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE", level)
EV("FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", level)
EV("FP_ARITH_INST_RETIRED2.512B_PACKED_HALF", level)
EV("FP_ARITH_INST_RETIRED.4_FLOPS", level)
return (1 *(EV("FP_ARITH_INST_RETIRED.SCALAR", level) + EV("FP_ARITH_INST_RETIRED2.SCALAR_HALF", level)) + 2 *(EV("FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", level) + EV("FP_ARITH_INST_RETIRED2.COMPLEX_SCALAR_HALF", level)) + 4 * EV("FP_ARITH_INST_RETIRED.4_FLOPS", level) + 8 *(EV("FP_ARITH_INST_RETIRED2.128B_PACKED_HALF", level) + EV("FP_ARITH_INST_RETIRED.8_FLOPS", level)) + 16 *(EV("FP_ARITH_INST_RETIRED2.256B_PACKED_HALF", level) + EV("FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE", level)) + 32 * EV("FP_ARITH_INST_RETIRED2.512B_PACKED_HALF", level)) if FP16 else(1 * EV("FP_ARITH_INST_RETIRED.SCALAR", level) + 2 * EV("FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", level) + 4 * EV("FP_ARITH_INST_RETIRED.4_FLOPS", level) + 8 * EV("FP_ARITH_INST_RETIRED.8_FLOPS", level) + 16 * EV("FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE", level))
# Floating Point computational (arithmetic) Operations Count
def FP_Arith_Scalar(self, EV, level):
EV("FP_ARITH_INST_RETIRED2.SCALAR", level)
EV("FP_ARITH_INST_RETIRED.SCALAR", level)
return EV("FP_ARITH_INST_RETIRED.SCALAR", level) + EV("FP_ARITH_INST_RETIRED2.SCALAR", level) if FP16 else EV("FP_ARITH_INST_RETIRED.SCALAR", level)
# Floating Point computational (arithmetic) Operations Count
def FP_Arith_Vector(self, EV, level):
EV("FP_ARITH_INST_RETIRED.VECTOR", level)
EV("FP_ARITH_INST_RETIRED2.VECTOR", level)
return EV("FP_ARITH_INST_RETIRED.VECTOR", level) + EV("FP_ARITH_INST_RETIRED2.VECTOR", level) if FP16 else EV("FP_ARITH_INST_RETIRED.VECTOR", level)
def HighIPC(self, EV, level):
val = IPC(self, EV, level) / Pipeline_Width
return val
def Light_Ops_Sum(self, EV, level):
return self.FP_Arith.compute(EV) + self.Int_Operations.compute(EV) + self.Memory_Operations.compute(EV) + self.Fused_Instructions.compute(EV) + self.Non_Fused_Branches.compute(EV)
def LOAD_L3_HIT(self, EV, level):
return EV("MEM_LOAD_RETIRED.L3_HIT", level) * FB_Factor(self, EV, level)
def LOAD_LCL_MEM(self, EV, level):
return EV("MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM", level) * (1 + FBHit_per_L1Miss(self, EV, level))
def LOAD_LCL_PMM(self, EV, level):
EV("MEM_LOAD_RETIRED.LOCAL_PMM", level)
return EV("MEM_LOAD_RETIRED.LOCAL_PMM", level) * (1 + FBHit_per_L1Miss(self, EV, level)) if DS else 0
def LOAD_RMT_FWD(self, EV, level):
EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD", level)
return EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD", level) * (1 + FBHit_per_L1Miss(self, EV, level)) if DS else 0
def LOAD_RMT_HITM(self, EV, level):
EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM", level)
return EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM", level) * (1 + FBHit_per_L1Miss(self, EV, level)) if DS else 0
def LOAD_RMT_MEM(self, EV, level):
EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM", level)
return EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM", level) * (1 + FBHit_per_L1Miss(self, EV, level)) if DS else 0
def LOAD_RMT_PMM(self, EV, level):
EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM", level)
return EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM", level) * (1 + FBHit_per_L1Miss(self, EV, level)) if DS else 0
def LOAD_XSNP_HIT(self, EV, level):
return EV("MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD", level) + EV("MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD", level) * (1 - True_XSNP_HitM_Fraction(self, EV, level))
def LOAD_XSNP_HITM(self, EV, level):
return EV("MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD", level) * True_XSNP_HitM_Fraction(self, EV, level)
def LOAD_XSNP_MISS(self, EV, level):
return EV("MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS", level)
def MEM_Bound_Ratio(self, EV, level):
return EV("MEMORY_ACTIVITY.STALLS_L3_MISS", level) / CLKS(self, EV, level)
def Mem_DDR_Hit_Fraction(self, EV, level):
return (19 * LOAD_RMT_MEM(self, EV, level) + 10 *(LOAD_LCL_MEM(self, EV, level) + LOAD_RMT_FWD(self, EV, level) + LOAD_RMT_HITM(self, EV, level))) / ((19 * LOAD_RMT_MEM(self, EV, level) + 10 *(LOAD_LCL_MEM(self, EV, level) + LOAD_RMT_FWD(self, EV, level) + LOAD_RMT_HITM(self, EV, level))) + (25 * LOAD_LCL_PMM(self, EV, level) + 33 * LOAD_RMT_PMM(self, EV, level))) if DS else 1
def Mem_Lock_St_Fraction(self, EV, level):
return EV("MEM_INST_RETIRED.LOCK_LOADS", level) / EV("MEM_INST_RETIRED.ALL_STORES", level)
def Mispred_Clears_Fraction(self, EV, level):
return self.Branch_Mispredicts.compute(EV) / self.Bad_Speculation.compute(EV)
def ORO_Demand_RFO_C1(self, EV, level):
return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO", level)) , level )
def ORO_DRD_Any_Cycles(self, EV, level):
return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", level)) , level )
def ORO_DRD_BW_Cycles(self, EV, level):
return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD:c4", level)) , level )
def Store_L2_Hit_Cycles(self, EV, level):
return EV("MEM_STORE_RETIRED.L2_HIT", level) * Mem_L2_Store_Cost *(1 - Mem_Lock_St_Fraction(self, EV, level))
def True_XSNP_HitM_Fraction(self, EV, level):
return EV("OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM", level) / (EV("OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM", level) + EV("OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD", level))
def Mem_XSNP_HitM_Cost(self, EV, level):
return 81 * Core_Frequency(self, EV, level)
def Mem_XSNP_Hit_Cost(self, EV, level):
return 79 * Core_Frequency(self, EV, level)
def Mem_XSNP_None_Cost(self, EV, level):
return 37 * Core_Frequency(self, EV, level)
def Mem_Local_DRAM_Cost(self, EV, level):
return 109 * Core_Frequency(self, EV, level)
def Mem_Remote_DRAM_Cost(self, EV, level):
return 190 * Core_Frequency(self, EV, level)
def Mem_Remote_HitM_Cost(self, EV, level):
return 170 * Core_Frequency(self, EV, level)
def Mem_Remote_Fwd_Cost(self, EV, level):
return 170 * Core_Frequency(self, EV, level)
def Mem_L2_Hit_Cost(self, EV, level):
return 4.4 * Core_Frequency(self, EV, level)
def PERF_METRICS_SUM(self, EV, level):
return (EV("PERF_METRICS.FRONTEND_BOUND", level) / EV("TOPDOWN.SLOTS", level)) + (EV("PERF_METRICS.BAD_SPECULATION", level) / EV("TOPDOWN.SLOTS", level)) + (EV("PERF_METRICS.RETIRING", level) / EV("TOPDOWN.SLOTS", level)) + (EV("PERF_METRICS.BACKEND_BOUND", level) / EV("TOPDOWN.SLOTS", level))
def Retire_Fraction(self, EV, level):
return EV("UOPS_RETIRED.SLOTS", level) / EV("UOPS_ISSUED.ANY", level)
# Retired slots per Logical Processor
def Retired_Slots(self, EV, level):
return self.Retiring.compute(EV) * SLOTS(self, EV, level)
# Number of logical processors (enabled or online) on the target system
def Num_CPUs(self, EV, level):
return num_cores * num_sockets * num_threads if num_cores else 224 /(2 - smt_enabled )
# A system parameter for dependent-loads (pointer chasing like access pattern) of the workload. An integer fraction in range from 0 (no dependent loads) to 100 (all loads are dependent loads)
def Dependent_Loads_Weight(self, EV, level):
return 20
# Total pipeline cost of Branch Misprediction related bottlenecks
def Mispredictions(self, EV, level):
val = 100 *(1 - Umisp(self, EV, level)) * (self.Branch_Mispredicts.compute(EV) + self.Fetch_Latency.compute(EV) * self.Mispredicts_Resteers.compute(EV) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV)))
self.thresh = (val > 20)
return val
# Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)
def Big_Code(self, EV, level):
val = 100 * self.Fetch_Latency.compute(EV) * (self.ITLB_Misses.compute(EV) + self.ICache_Misses.compute(EV) + self.Unknown_Branches.compute(EV)) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV))
self.thresh = (val > 20)
return val
# Total pipeline cost of instruction fetch bandwidth related bottlenecks (when the front-end could not sustain operations delivery to the back-end)
def Instruction_Fetch_BW(self, EV, level):
val = 100 *(self.Frontend_Bound.compute(EV) - (1 - Umisp(self, EV, level)) * self.Fetch_Latency.compute(EV) * self.Mispredicts_Resteers.compute(EV) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV)) - Assist_Frontend(self, EV, level)) - Big_Code(self, EV, level)
self.thresh = (val > 20)
return val
# Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks
def Cache_Memory_Bandwidth(self, EV, level):
val = 100 *((self.Memory_Bound.compute(EV) * (self.DRAM_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV))) * (self.MEM_Bandwidth.compute(EV) / (self.MEM_Latency.compute(EV) + self.MEM_Bandwidth.compute(EV)))) + (self.Memory_Bound.compute(EV) * (self.L3_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV))) * (self.SQ_Full.compute(EV) / (self.L3_Hit_Latency.compute(EV) + self.Contested_Accesses.compute(EV) + self.SQ_Full.compute(EV) + self.Data_Sharing.compute(EV)))) + (self.Memory_Bound.compute(EV) * (self.L1_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV))) * (self.FB_Full.compute(EV) / (self.Store_Fwd_Blk.compute(EV) + self.L1_Hit_Latency.compute(EV) + self.DTLB_Load.compute(EV) + self.Lock_Latency.compute(EV) + self.Split_Loads.compute(EV) + self.FB_Full.compute(EV)))))
self.thresh = (val > 20)
return val
# Total pipeline cost of external Memory- or Cache-Latency related bottlenecks
def Cache_Memory_Latency(self, EV, level):
val = 100 *((self.Memory_Bound.compute(EV) * (self.DRAM_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV))) * (self.MEM_Latency.compute(EV) / (self.MEM_Latency.compute(EV) + self.MEM_Bandwidth.compute(EV)))) + (self.Memory_Bound.compute(EV) * (self.L3_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV))) * (self.L3_Hit_Latency.compute(EV) / (self.L3_Hit_Latency.compute(EV) + self.Contested_Accesses.compute(EV) + self.SQ_Full.compute(EV) + self.Data_Sharing.compute(EV)))) + (self.Memory_Bound.compute(EV) * self.L2_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV))) + (self.Memory_Bound.compute(EV) * (self.Store_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV))) * (self.Store_Latency.compute(EV) / (self.Split_Stores.compute(EV) + self.DTLB_Store.compute(EV) + self.Streaming_Stores.compute(EV) + self.Store_Latency.compute(EV) + self.False_Sharing.compute(EV)))) + (self.Memory_Bound.compute(EV) * (self.L1_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV))) * (self.L1_Hit_Latency.compute(EV) / (self.Store_Fwd_Blk.compute(EV) + self.L1_Hit_Latency.compute(EV) + self.DTLB_Load.compute(EV) + self.Lock_Latency.compute(EV) + self.Split_Loads.compute(EV) + self.FB_Full.compute(EV)))))
self.thresh = (val > 20)
return val
# Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)
def Memory_Data_TLBs(self, EV, level):
val = 100 *(self.Memory_Bound.compute(EV) * (self.L1_Bound.compute(EV) / max(self.Memory_Bound.compute(EV) , (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV)))) * (self.DTLB_Load.compute(EV) / max(self.L1_Bound.compute(EV) , (self.Store_Fwd_Blk.compute(EV) + self.L1_Hit_Latency.compute(EV) + self.DTLB_Load.compute(EV) + self.Lock_Latency.compute(EV) + self.Split_Loads.compute(EV) + self.FB_Full.compute(EV)))) + (self.Memory_Bound.compute(EV) * (self.Store_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV))) * (self.DTLB_Store.compute(EV) / (self.Split_Stores.compute(EV) + self.DTLB_Store.compute(EV) + self.Streaming_Stores.compute(EV) + self.Store_Latency.compute(EV) + self.False_Sharing.compute(EV)))))
self.thresh = (val > 20)
return val
# Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors)
def Memory_Synchronization(self, EV, level):
val = 100 *(self.Memory_Bound.compute(EV) * ((self.L3_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV))) * (self.Contested_Accesses.compute(EV) + self.Data_Sharing.compute(EV)) / (self.L3_Hit_Latency.compute(EV) + self.Contested_Accesses.compute(EV) + self.SQ_Full.compute(EV) + self.Data_Sharing.compute(EV)) + (self.Store_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.PMM_Bound.compute(EV) + self.L2_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV))) * self.False_Sharing.compute(EV) / ((self.Split_Stores.compute(EV) + self.DTLB_Store.compute(EV) + self.Streaming_Stores.compute(EV) + self.Store_Latency.compute(EV) + self.False_Sharing.compute(EV)) - self.Store_Latency.compute(EV))) + self.Machine_Clears.compute(EV) * (1 - self.Other_Nukes.compute(EV) / (self.Other_Nukes.compute(EV))))
self.thresh = (val > 10)
return val
# Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy.
def Compute_Bound_Est(self, EV, level):
val = 100 *((self.Core_Bound.compute(EV) * self.Divider.compute(EV) / (self.Serializing_Operation.compute(EV) + self.Ports_Utilization.compute(EV) + self.AMX_Busy.compute(EV) + self.Divider.compute(EV))) + (self.Core_Bound.compute(EV) * self.AMX_Busy.compute(EV) / (self.Serializing_Operation.compute(EV) + self.Ports_Utilization.compute(EV) + self.AMX_Busy.compute(EV) + self.Divider.compute(EV))) + (self.Core_Bound.compute(EV) * (self.Ports_Utilization.compute(EV) / (self.Serializing_Operation.compute(EV) + self.Ports_Utilization.compute(EV) + self.AMX_Busy.compute(EV) + self.Divider.compute(EV))) * (self.Ports_Utilized_3m.compute(EV) / (self.Ports_Utilized_0.compute(EV) + self.Ports_Utilized_1.compute(EV) + self.Ports_Utilized_2.compute(EV) + self.Ports_Utilized_3m.compute(EV)))))
self.thresh = (val > 20)
return val
# Total pipeline cost of irregular execution (e.g. FP-assists in HPC, Wait time with work imbalance multithreaded workloads, overhead in system services or virtualized environments)
def Irregular_Overhead(self, EV, level):
val = 100 *(Assist_Frontend(self, EV, level) + Umisp(self, EV, level) * self.Branch_Mispredicts.compute(EV) + (self.Machine_Clears.compute(EV) * self.Other_Nukes.compute(EV) / (self.Other_Nukes.compute(EV))) + Serialize_Core(self, EV, level) + Assist_Retired(self, EV, level))
self.thresh = (val > 10)
return val
# Total pipeline cost of remaining bottlenecks in the back-end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls.
def Other_Bottlenecks(self, EV, level):
val = 100 -(Big_Code(self, EV, level) + Instruction_Fetch_BW(self, EV, level) + Mispredictions(self, EV, level) + Cache_Memory_Bandwidth(self, EV, level) + Cache_Memory_Latency(self, EV, level) + Memory_Data_TLBs(self, EV, level) + Memory_Synchronization(self, EV, level) + Compute_Bound_Est(self, EV, level) + Irregular_Overhead(self, EV, level) + Branching_Overhead(self, EV, level) + Useful_Work(self, EV, level))
self.thresh = (val > 20)
return val
# Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA. Examples include function calls; loops and alignments. (A lower bound). Consider Loop Unrolling or function inlining optimizations
def Branching_Overhead(self, EV, level):
val = 100 * Branching_Retired(self, EV, level)
self.thresh = (val > 5)
return val
# Total pipeline cost of "useful operations" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead.
def Useful_Work(self, EV, level):
val = 100 *(self.Retiring.compute(EV) - Branching_Retired(self, EV, level) - Assist_Retired(self, EV, level))
self.thresh = (val > 20)
return val
# Probability of Core Bound bottleneck hidden by SMT-profiling artifacts. Tip: consider analysis with SMT disabled
def Core_Bound_Likely(self, EV, level):
val = 100 *(1 - self.Core_Bound.compute(EV) / self.Ports_Utilization.compute(EV) if self.Core_Bound.compute(EV)< self.Ports_Utilization.compute(EV) else 1) if SMT_2T_Utilization(self, EV, level)> 0.5 else 0
self.thresh = (val > 0.5)
return val
# Instructions Per Cycle (per Logical Processor)
def IPC(self, EV, level):
return EV("INST_RETIRED.ANY", level) / CLKS(self, EV, level)
# Uops Per Instruction
def UopPI(self, EV, level):
val = Retired_Slots(self, EV, level) / EV("INST_RETIRED.ANY", level)
self.thresh = (val > 1.05)
return val
# Uops per taken branch
def UpTB(self, EV, level):
val = Retired_Slots(self, EV, level) / EV("BR_INST_RETIRED.NEAR_TAKEN", level)
self.thresh = val < Pipeline_Width * 1.5
return val
# Cycles Per Instruction (per Logical Processor)
def CPI(self, EV, level):
return 1 / IPC(self, EV, level)
# Per-Logical Processor actual clocks when the Logical Processor is active.
def CLKS(self, EV, level):
return EV("CPU_CLK_UNHALTED.THREAD", level)
# Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward)
def SLOTS(self, EV, level):
return EV("TOPDOWN.SLOTS", level) if topdown_use_fixed else EV("TOPDOWN.SLOTS", level)
# Fraction of Physical Core issue-slots utilized by this Logical Processor
def Slots_Utilization(self, EV, level):
return SLOTS(self, EV, level) / (EV("TOPDOWN.SLOTS:percore", level) / 2) if smt_enabled else 1
# The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of "execute" at rename stage.
def Execute_per_Issue(self, EV, level):
return EV("UOPS_EXECUTED.THREAD", level) / EV("UOPS_ISSUED.ANY", level)
# Instructions Per Cycle across hyper-threads (per physical core)
def CoreIPC(self, EV, level):
return EV("INST_RETIRED.ANY", level) / CORE_CLKS(self, EV, level)
# Floating Point Operations Per Cycle
def FLOPc(self, EV, level):
return FLOP_Count(self, EV, level) / CORE_CLKS(self, EV, level)
# Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to Fused-Multiply Add use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common.
def FP_Arith_Utilization(self, EV, level):
return (EV("FP_ARITH_DISPATCHED.PORT_0", level) + EV("FP_ARITH_DISPATCHED.PORT_1", level) + EV("FP_ARITH_DISPATCHED.PORT_5", level)) / (2 * CORE_CLKS(self, EV, level))
# Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor)
def ILP(self, EV, level):
return EV("UOPS_EXECUTED.THREAD", level) / EV("UOPS_EXECUTED.THREAD:c1", level)
# uops Executed per Cycle
def EPC(self, EV, level):
return EV("UOPS_EXECUTED.THREAD", level) / CLKS(self, EV, level)
# Core actual clocks when any Logical Processor is active on the Physical Core
def CORE_CLKS(self, EV, level):
return EV("CPU_CLK_UNHALTED.DISTRIBUTED", level) if smt_enabled else CLKS(self, EV, level)
# Instructions per Load (lower number means higher occurrence rate). Tip: reduce memory accesses. #Link Opt Guide section: Minimize Register Spills
def IpLoad(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("MEM_INST_RETIRED.ALL_LOADS", level)
self.thresh = (val < 3)
return val
# Instructions per Store (lower number means higher occurrence rate). Tip: reduce memory accesses. #Link Opt Guide section: Minimize Register Spills
def IpStore(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("MEM_INST_RETIRED.ALL_STORES", level)
self.thresh = (val < 8)
return val
# Instructions per Branch (lower number means higher occurrence rate)
def IpBranch(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.ALL_BRANCHES", level)
self.thresh = (val < 8)
return val
# Instructions per (near) call (lower number means higher occurrence rate)
def IpCall(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.NEAR_CALL", level)
self.thresh = (val < 200)
return val
# Instructions per taken branch
def IpTB(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.NEAR_TAKEN", level)
self.thresh = val < Pipeline_Width * 2 + 1
return val
# Branch instructions per taken branch. . Can be used to approximate PGO-likelihood for non-loopy codes.
def BpTkBranch(self, EV, level):
return EV("BR_INST_RETIRED.ALL_BRANCHES", level) / EV("BR_INST_RETIRED.NEAR_TAKEN", level)
# Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate). Reference: Tuning Performance via Metrics with Expectations. https://doi.org/10.1109/LCA.2019.2916408
def IpFLOP(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / FLOP_Count(self, EV, level)
self.thresh = (val < 10)
return val
# Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW.
def IpArith(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / (FP_Arith_Scalar(self, EV, level) + FP_Arith_Vector(self, EV, level))
self.thresh = (val < 10)
return val
# Instructions per FP Arithmetic Scalar Half-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.
def IpArith_Scalar_HP(self, EV, level):
EV("FP_ARITH_INST_RETIRED2.SCALAR", level)
EV("INST_RETIRED.ANY", level)
val = EV("INST_RETIRED.ANY", level) / EV("FP_ARITH_INST_RETIRED2.SCALAR", level) if FP16 else 0
self.thresh = (val < 10)
return val
# Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.
def IpArith_Scalar_SP(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("FP_ARITH_INST_RETIRED.SCALAR_SINGLE", level)
self.thresh = (val < 10)
return val
# Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.
def IpArith_Scalar_DP(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("FP_ARITH_INST_RETIRED.SCALAR_DOUBLE", level)
self.thresh = (val < 10)
return val
# Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.
def IpArith_AVX128(self, EV, level):
EV("FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE", level)
EV("FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", level)
EV("INST_RETIRED.ANY", level)
EV("FP_ARITH_INST_RETIRED2.128B_PACKED_HALF", level)
val = EV("INST_RETIRED.ANY", level) / (EV("FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", level) + EV("FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE", level) + EV("FP_ARITH_INST_RETIRED2.128B_PACKED_HALF", level)) if FP16 else EV("INST_RETIRED.ANY", level) / (EV("FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", level) + EV("FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE", level))
self.thresh = (val < 10)
return val
# Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.
def IpArith_AVX256(self, EV, level):
EV("FP_ARITH_INST_RETIRED2.256B_PACKED_HALF", level)
EV("FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE", level)
EV("INST_RETIRED.ANY", level)
EV("FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE", level)
val = EV("INST_RETIRED.ANY", level) / (EV("FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE", level) + EV("FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE", level) + EV("FP_ARITH_INST_RETIRED2.256B_PACKED_HALF", level)) if FP16 else EV("INST_RETIRED.ANY", level) / (EV("FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE", level) + EV("FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE", level))
self.thresh = (val < 10)
return val
# Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.
def IpArith_AVX512(self, EV, level):
EV("FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE", level)
EV("FP_ARITH_INST_RETIRED2.512B_PACKED_HALF", level)
EV("FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE", level)
EV("INST_RETIRED.ANY", level)
val = EV("INST_RETIRED.ANY", level) / (EV("FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE", level) + EV("FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE", level) + EV("FP_ARITH_INST_RETIRED2.512B_PACKED_HALF", level)) if FP16 else EV("INST_RETIRED.ANY", level) / (EV("FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE", level) + EV("FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE", level))
self.thresh = (val < 10)
return val
# Instructions per PAUSE (lower number means higher occurrence rate)
def IpPause(self, EV, level):
return Instructions(self, EV, level) / EV("CPU_CLK_UNHALTED.PAUSE_INST", level)
# Instructions per Software prefetch instruction (of any type: NTA/T0/T1/T2/Prefetch) (lower number means higher occurrence rate)
def IpSWPF(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("SW_PREFETCH_ACCESS.T0:u0xF", level)
self.thresh = (val < 100)
return val
# Total number of retired Instructions
def Instructions(self, EV, level):
return EV("INST_RETIRED.ANY", level)
# Average number of Uops retired in cycles where at least one uop has retired.
def Retire(self, EV, level):
return Retired_Slots(self, EV, level) / EV("UOPS_RETIRED.SLOTS:c1", level)
# Estimated fraction of retirement-cycles dealing with repeat instructions
def Strings_Cycles(self, EV, level):
val = EV("INST_RETIRED.REP_ITERATION", level) / EV("UOPS_RETIRED.SLOTS:c1", level)
self.thresh = (val > 0.1)
return val
# Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate)
def IpAssist(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("ASSISTS.ANY", level)
self.thresh = (val < 100000)
return val
def Execute(self, EV, level):
return EV("UOPS_EXECUTED.THREAD", level) / Execute_Cycles(self, EV, level)
# Average number of uops fetched from LSD per cycle
def Fetch_LSD(self, EV, level):
return EV("LSD.UOPS", level) / EV("LSD.CYCLES_ACTIVE", level)
# Average number of uops fetched from DSB per cycle
def Fetch_DSB(self, EV, level):
return EV("IDQ.DSB_UOPS", level) / EV("IDQ.DSB_CYCLES_ANY", level)
# Average number of uops fetched from MITE per cycle
def Fetch_MITE(self, EV, level):
return EV("IDQ.MITE_UOPS", level) / EV("IDQ.MITE_CYCLES_ANY", level)
# Average number of Uops issued by front-end when it issued something
def Fetch_UpC(self, EV, level):
return EV("UOPS_ISSUED.ANY", level) / EV("UOPS_ISSUED.ANY:c1", level)
# Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). See section 'Decoded ICache' in Optimization Manual. http://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-optimization-manual.html
def DSB_Coverage(self, EV, level):
val = EV("IDQ.DSB_UOPS", level) / Fetched_Uops(self, EV, level)
self.thresh = (val < 0.7) and HighIPC(self, EV, 1)
return val
# Average number of cycles the front-end was delayed due to an Unknown Branch detection. See Unknown_Branches node.
def Unknown_Branch_Cost(self, EV, level):
return EV("INT_MISC.UNKNOWN_BRANCH_CYCLES", level) / EV("INT_MISC.UNKNOWN_BRANCH_CYCLES:c1:e1", level)
# Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.
def DSB_Switch_Cost(self, EV, level):
return EV("DSB2MITE_SWITCHES.PENALTY_CYCLES", level) / EV("DSB2MITE_SWITCHES.PENALTY_CYCLES:c1:e1", level)
# Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck.
def DSB_Misses(self, EV, level):
val = 100 *(self.Fetch_Latency.compute(EV) * self.DSB_Switches.compute(EV) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV)) + self.Fetch_Bandwidth.compute(EV) * self.MITE.compute(EV) / (self.MITE.compute(EV) + self.DSB.compute(EV)))
self.thresh = (val > 10)
return val
# Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck.
def DSB_Bandwidth(self, EV, level):
val = 100 *(self.Frontend_Bound.compute(EV) * (self.Fetch_Bandwidth.compute(EV) / (self.Fetch_Bandwidth.compute(EV) + self.Fetch_Latency.compute(EV))) * (self.DSB.compute(EV) / (self.MITE.compute(EV) + self.DSB.compute(EV))))
self.thresh = (val > 10)
return val
# Average Latency for L1 instruction cache misses
def ICache_Miss_Latency(self, EV, level):
return EV("ICACHE_DATA.STALLS", level) / EV("ICACHE_DATA.STALLS:c1:e1", level)
# Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck.
def IC_Misses(self, EV, level):
val = 100 *(self.Fetch_Latency.compute(EV) * self.ICache_Misses.compute(EV) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV)))
self.thresh = (val > 5)
return val
# Instructions per non-speculative DSB miss (lower number means higher occurrence rate)
def IpDSB_Miss_Ret(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("FRONTEND_RETIRED.ANY_DSB_MISS", level)
self.thresh = (val < 50)
return val
# Instructions per speculative Unknown Branch Misprediction (BAClear) (lower number means higher occurrence rate)
def IpUnknown_Branch(self, EV, level):
return Instructions(self, EV, level) / EV("BACLEARS.ANY", level)
# L2 cache true code cacheline misses per kilo instruction
def L2MPKI_Code(self, EV, level):
return 1000 * EV("FRONTEND_RETIRED.L2_MISS", level) / EV("INST_RETIRED.ANY", level)
# L2 cache speculative code cacheline misses per kilo instruction
def L2MPKI_Code_All(self, EV, level):
return 1000 * EV("L2_RQSTS.CODE_RD_MISS", level) / EV("INST_RETIRED.ANY", level)
# Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)
def IpMispredict(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("BR_MISP_RETIRED.ALL_BRANCHES", level)
self.thresh = (val < 200)
return val
# Instructions per retired Mispredicts for conditional non-taken branches (lower number means higher occurrence rate).
def IpMisp_Cond_Ntaken(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("BR_MISP_RETIRED.COND_NTAKEN", level)
self.thresh = (val < 200)
return val
# Instructions per retired Mispredicts for conditional taken branches (lower number means higher occurrence rate).
def IpMisp_Cond_Taken(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("BR_MISP_RETIRED.COND_TAKEN", level)
self.thresh = (val < 200)
return val
# Instructions per retired Mispredicts for return branches (lower number means higher occurrence rate).
def IpMisp_Ret(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("BR_MISP_RETIRED.RET", level)
self.thresh = (val < 500)
return val
# Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).
def IpMisp_Indirect(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("BR_MISP_RETIRED.INDIRECT", level)
self.thresh = (val < 1000)
return val
# Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)
def Branch_Misprediction_Cost(self, EV, level):
return Mispredictions(self, EV, level) * SLOTS(self, EV, level) / EV("BR_MISP_RETIRED.ALL_BRANCHES", level) / 100
# Speculative to Retired ratio of all clears (covering Mispredicts and nukes)
def Spec_Clears_Ratio(self, EV, level):
return EV("INT_MISC.CLEARS_COUNT", level) / (EV("BR_MISP_RETIRED.ALL_BRANCHES", level) + EV("MACHINE_CLEARS.COUNT", level))
# Fraction of branches that are non-taken conditionals
def Cond_NT(self, EV, level):
return EV("BR_INST_RETIRED.COND_NTAKEN", level) / EV("BR_INST_RETIRED.ALL_BRANCHES", level)
# Fraction of branches that are taken conditionals
def Cond_TK(self, EV, level):
return EV("BR_INST_RETIRED.COND_TAKEN", level) / EV("BR_INST_RETIRED.ALL_BRANCHES", level)
# Fraction of branches that are CALL or RET
def CallRet(self, EV, level):
return (EV("BR_INST_RETIRED.NEAR_CALL", level) + EV("BR_INST_RETIRED.NEAR_RETURN", level)) / EV("BR_INST_RETIRED.ALL_BRANCHES", level)
# Fraction of branches that are unconditional (direct or indirect) jumps
def Jump(self, EV, level):
return Br_DoI_Jumps(self, EV, level) / EV("BR_INST_RETIRED.ALL_BRANCHES", level)
# Fraction of branches of other types (not individually covered by other metrics in Info.Branches group)
def Other_Branches(self, EV, level):
return 1 -(Cond_NT(self, EV, level) + Cond_TK(self, EV, level) + CallRet(self, EV, level) + Jump(self, EV, level))
# Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)
def Load_Miss_Real_Latency(self, EV, level):
return EV("L1D_PEND_MISS.PENDING", level) / EV("MEM_LOAD_COMPLETED.L1_MISS_ANY", level)
# Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)
def MLP(self, EV, level):
return EV("L1D_PEND_MISS.PENDING", level) / EV("L1D_PEND_MISS.PENDING_CYCLES", level)
# L1 cache true misses per kilo instruction for retired demand loads
def L1MPKI(self, EV, level):
return 1000 * EV("MEM_LOAD_RETIRED.L1_MISS", level) / EV("INST_RETIRED.ANY", level)
# L1 cache true misses per kilo instruction for all demand loads (including speculative)
def L1MPKI_Load(self, EV, level):
return 1000 * EV("L2_RQSTS.ALL_DEMAND_DATA_RD", level) / EV("INST_RETIRED.ANY", level)
# L2 cache true misses per kilo instruction for retired demand loads
def L2MPKI(self, EV, level):
return 1000 * EV("MEM_LOAD_RETIRED.L2_MISS", level) / EV("INST_RETIRED.ANY", level)
# L2 cache true misses per kilo instruction for all request types (including speculative)
def L2MPKI_All(self, EV, level):
return 1000 * EV("L2_RQSTS.MISS", level) / EV("INST_RETIRED.ANY", level)
# L2 cache true misses per kilo instruction for all demand loads (including speculative)
def L2MPKI_Load(self, EV, level):
return 1000 * EV("L2_RQSTS.DEMAND_DATA_RD_MISS", level) / EV("INST_RETIRED.ANY", level)
# Offcore requests (L2 cache miss) per kilo instruction for demand RFOs
def L2MPKI_RFO(self, EV, level):
return 1000 * EV("L2_RQSTS.RFO_MISS", level) / EV("INST_RETIRED.ANY", level)
# L2 cache hits per kilo instruction for all request types (including speculative)
def L2HPKI_All(self, EV, level):
return 1000 *(EV("L2_RQSTS.REFERENCES", level) - EV("L2_RQSTS.MISS", level)) / EV("INST_RETIRED.ANY", level)
# L2 cache hits per kilo instruction for all demand loads (including speculative)
def L2HPKI_Load(self, EV, level):
return 1000 * EV("L2_RQSTS.DEMAND_DATA_RD_HIT", level) / EV("INST_RETIRED.ANY", level)
# L3 cache true misses per kilo instruction for retired demand loads
def L3MPKI(self, EV, level):
return 1000 * EV("MEM_LOAD_RETIRED.L3_MISS", level) / EV("INST_RETIRED.ANY", level)
# Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries)
def FB_HPKI(self, EV, level):
return 1000 * EV("MEM_LOAD_RETIRED.FB_HIT", level) / EV("INST_RETIRED.ANY", level)
def L1D_Cache_Fill_BW(self, EV, level):
return 64 * EV("L1D.REPLACEMENT", level) / OneBillion / Time(self, EV, level)
def L2_Cache_Fill_BW(self, EV, level):
return 64 * EV("L2_LINES_IN.ALL", level) / OneBillion / Time(self, EV, level)
def L3_Cache_Fill_BW(self, EV, level):
return 64 * EV("LONGEST_LAT_CACHE.MISS", level) / OneBillion / Time(self, EV, level)
def L3_Cache_Access_BW(self, EV, level):
return 64 * EV("OFFCORE_REQUESTS.ALL_REQUESTS", level) / OneBillion / Time(self, EV, level)
# Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses
def Page_Walks_Utilization(self, EV, level):
val = (EV("ITLB_MISSES.WALK_PENDING", level) + EV("DTLB_LOAD_MISSES.WALK_PENDING", level) + EV("DTLB_STORE_MISSES.WALK_PENDING", level)) / (4 * CORE_CLKS(self, EV, level))
self.thresh = (val > 0.5)
return val
# STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)
def Code_STLB_MPKI(self, EV, level):
return 1000 * EV("ITLB_MISSES.WALK_COMPLETED", level) / EV("INST_RETIRED.ANY", level)
# STLB (2nd level TLB) data load speculative misses per kilo instruction (misses of any page-size that complete the page walk)
def Load_STLB_MPKI(self, EV, level):
return 1000 * EV("DTLB_LOAD_MISSES.WALK_COMPLETED", level) / EV("INST_RETIRED.ANY", level)
# STLB (2nd level TLB) data store speculative misses per kilo instruction (misses of any page-size that complete the page walk)
def Store_STLB_MPKI(self, EV, level):
return 1000 * EV("DTLB_STORE_MISSES.WALK_COMPLETED", level) / EV("INST_RETIRED.ANY", level)
# Average per-core data fill bandwidth to the L1 data cache [GB / sec]
def L1D_Cache_Fill_BW_2T(self, EV, level):
return L1D_Cache_Fill_BW(self, EV, level)
# Average per-core data fill bandwidth to the L2 cache [GB / sec]
def L2_Cache_Fill_BW_2T(self, EV, level):
return L2_Cache_Fill_BW(self, EV, level)
# Average per-core data fill bandwidth to the L3 cache [GB / sec]
def L3_Cache_Fill_BW_2T(self, EV, level):
return L3_Cache_Fill_BW(self, EV, level)
# Average per-core data access bandwidth to the L3 cache [GB / sec]
def L3_Cache_Access_BW_2T(self, EV, level):
return L3_Cache_Access_BW(self, EV, level)
# Rate of silent evictions from the L2 cache per Kilo instruction where the evicted lines are dropped (no writeback to L3 or memory)
def L2_Evictions_Silent_PKI(self, EV, level):
return 1000 * EV("L2_LINES_OUT.SILENT", level) / Instructions(self, EV, level)
# Rate of non silent evictions from the L2 cache per Kilo instruction
def L2_Evictions_NonSilent_PKI(self, EV, level):
return 1000 * EV("L2_LINES_OUT.NON_SILENT", level) / Instructions(self, EV, level)
# Average Latency for L2 cache miss demand Loads
def Load_L2_Miss_Latency(self, EV, level):
return EV("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD", level) / EV("OFFCORE_REQUESTS.DEMAND_DATA_RD", level)
# Average Latency for L3 cache miss demand Loads
def Load_L3_Miss_Latency(self, EV, level):
return EV("OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD", level) / EV("OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD", level)
# Average Parallel L2 cache miss demand Loads
def Load_L2_MLP(self, EV, level):
return EV("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD", level) / EV("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD:c1", level)
# Average Parallel L2 cache miss data reads
def Data_L2_MLP(self, EV, level):
return EV("OFFCORE_REQUESTS_OUTSTANDING.DATA_RD", level) / EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", level)
# Off-core accesses per kilo instruction for reads-to-core requests (speculative; including in-core HW prefetches)
def Offcore_Read_Any_PKI(self, EV, level):
return 1000 * EV("OCR.READS_TO_CORE.ANY_RESPONSE", level) / Instructions(self, EV, level)
# L3 cache misses per kilo instruction for reads-to-core requests (speculative; including in-core HW prefetches)
def Offcore_Read_L3M_PKI(self, EV, level):
return 1000 * EV("OCR.READS_TO_CORE.L3_MISS", level) / Instructions(self, EV, level)
# Off-core accesses per kilo instruction for modified write requests
def Offcore_MWrite_Any_PKI(self, EV, level):
return 1000 * EV("OCR.MODIFIED_WRITE.ANY_RESPONSE", level) / Instructions(self, EV, level)
# Un-cacheable retired load per kilo instruction
def UC_Load_PKI(self, EV, level):
return 1000 * EV("MEM_LOAD_MISC_RETIRED.UC", level) / EV("INST_RETIRED.ANY", level)
# "Bus lock" per kilo instruction
def Bus_Lock_PKI(self, EV, level):
return 1000 * EV("SQ_MISC.BUS_LOCK", level) / EV("INST_RETIRED.ANY", level)
# Average CPU Utilization (percentage)
def CPU_Utilization(self, EV, level):
return CPUs_Utilized(self, EV, level) / Num_CPUs(self, EV, level)
# Average number of utilized CPUs
def CPUs_Utilized(self, EV, level):
return EV("CPU_CLK_UNHALTED.REF_TSC", level) / EV("msr/tsc/", 0)
# Measured Average Core Frequency for unhalted processors [GHz]
def Core_Frequency(self, EV, level):
return Turbo_Utilization(self, EV, level) * EV("msr/tsc/", 0) / OneBillion / Time(self, EV, level)
# Measured Average Uncore Frequency for the SoC [GHz]
def Uncore_Frequency(self, EV, level):
return Socket_CLKS(self, EV, level) / 1e9 / Time(self, EV, level)
# Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width
def GFLOPs(self, EV, level):
return (FLOP_Count(self, EV, level) / OneBillion) / Time(self, EV, level)
# Average Frequency Utilization relative nominal frequency
def Turbo_Utilization(self, EV, level):
return CLKS(self, EV, level) / EV("CPU_CLK_UNHALTED.REF_TSC", level)
# Fraction of cycles where both hardware Logical Processors were active
def SMT_2T_Utilization(self, EV, level):
return 1 - EV("CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE", level) / EV("CPU_CLK_UNHALTED.REF_DISTRIBUTED", level) if smt_enabled else 0
# Fraction of cycles spent in the Operating System (OS) Kernel mode
def Kernel_Utilization(self, EV, level):
val = EV("CPU_CLK_UNHALTED.THREAD_P:SUP", level) / EV("CPU_CLK_UNHALTED.THREAD", level)
self.thresh = (val > 0.05)
return val
# Cycles Per Instruction for the Operating System (OS) Kernel mode
def Kernel_CPI(self, EV, level):
return EV("CPU_CLK_UNHALTED.THREAD_P:SUP", level) / EV("INST_RETIRED.ANY_P:SUP", level)
# Fraction of cycles the processor is waiting yet unhalted; covering legacy PAUSE instruction, as well as C0.1 / C0.2 power-performance optimized states. Sample code of TPAUSE: https://github.com/torvalds/linux/blob/master/arch/x86/lib/delay.c#L105. If running on Linux, please check the power control interface: https://github.com/torvalds/linux/blob/master/arch/x86/kernel/cpu/umwait.c and https://github.com/torvalds/linux/blob/master/Documentation/ABI/testing/sysfs-devices-system-cpu#L587
def C0_Wait(self, EV, level):
val = EV("CPU_CLK_UNHALTED.C0_WAIT", level) / CLKS(self, EV, level)
self.thresh = (val > 0.05)
return val
# Average external Memory Bandwidth Use for reads and writes [GB / sec]
def DRAM_BW_Use(self, EV, level):
return (64 *(EV("UNC_M_CAS_COUNT.RD", level) + EV("UNC_M_CAS_COUNT.WR", level)) / OneBillion) / Time(self, EV, level)
# Average Off-core access BW for Reads-to-Core (R2C). R2C account for demand or prefetch load/RFO/code access that fill data into the Core caches.
def R2C_Offcore_BW(self, EV, level):
return 64 * EV("OCR.READS_TO_CORE.ANY_RESPONSE", level) / 1e9 / Time(self, EV, level)
# Average L3-cache miss BW for Reads-to-Core (R2C). This covering going to DRAM or other memory off-chip memory tears. See R2C_Offcore_BW.
def R2C_L3M_BW(self, EV, level):
return 64 * EV("OCR.READS_TO_CORE.L3_MISS", level) / 1e9 / Time(self, EV, level)
# Average DRAM BW for Reads-to-Core (R2C) covering for memory attached to local- and remote-socket. See R2C_Offcore_BW.
def R2C_DRAM_BW(self, EV, level):
return 64 * EV("OCR.READS_TO_CORE.DRAM", level) / 1e9 / Time(self, EV, level)
# Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches. memory-controller only
def MEM_Read_Latency(self, EV, level):
return OneBillion *(EV("UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD", level) / EV("UNC_CHA_TOR_INSERTS.IA_MISS_DRD", level)) / (Socket_CLKS(self, EV, level) / Time(self, EV, level))
# Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches
def MEM_Parallel_Reads(self, EV, level):
return EV("UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD", level) / EV("UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD:c1", level)
# Average latency of data read request to external 3D X-Point memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches
def MEM_PMM_Read_Latency(self, EV, level):
EV("UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PMM", level)
EV("UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PMM", level)
return (OneBillion *(EV("UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PMM", level) / EV("UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PMM", level)) / EV("UNC_CHA_CLOCKTICKS:one_unit", level)) if PMM_App_Direct else 0
# Average latency of data read request to external DRAM memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches
def MEM_DRAM_Read_Latency(self, EV, level):
return OneBillion *(EV("UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_DDR", level) / EV("UNC_CHA_TOR_INSERTS.IA_MISS_DRD_DDR", level)) / EV("UNC_CHA_CLOCKTICKS:one_unit", level)
# Average 3DXP Memory Bandwidth Use for reads [GB / sec]
def PMM_Read_BW(self, EV, level):
return ((64 * EV("UNC_M_PMM_RPQ_INSERTS", level) / OneBillion) / Time(self, EV, level)) if PMM_App_Direct else 0
# Average 3DXP Memory Bandwidth Use for Writes [GB / sec]
def PMM_Write_BW(self, EV, level):
return ((64 * EV("UNC_M_PMM_WPQ_INSERTS", level) / OneBillion) / Time(self, EV, level)) if PMM_App_Direct else 0
# Average IO (network or disk) Bandwidth Use for Reads [GB / sec]. Bandwidth of IO reads that are initiated by end device controllers that are requesting memory from the CPU
def IO_Read_BW(self, EV, level):
return EV("UNC_CHA_TOR_INSERTS.IO_PCIRDCUR", level) * 64 / OneBillion / Time(self, EV, level)
# Average IO (network or disk) Bandwidth Use for Writes [GB / sec]. Bandwidth of IO writes that are initiated by end device controllers that are writing memory to the CPU
def IO_Write_BW(self, EV, level):
return (EV("UNC_CHA_TOR_INSERTS.IO_ITOM", level) + EV("UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR", level)) * 64 / OneBillion / Time(self, EV, level)
# Cross-socket Ultra Path Interconnect (UPI) data transmit bandwidth for data only [MB / sec]
def UPI_Data_Transmit_BW(self, EV, level):
return EV("UNC_UPI_TxL_FLITS.ALL_DATA", level) * 64 / 9 / 1000000
# Run duration time in seconds
def Time(self, EV, level):
val = EV("interval-s", 0)
self.thresh = (val < 1)
return val
# Socket actual clocks when any core is active on that socket
def Socket_CLKS(self, EV, level):
return EV("UNC_CHA_CLOCKTICKS:one_unit", level)
# Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]
def IpFarBranch(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.FAR_BRANCH:USER", level)
self.thresh = (val < 1000000)
return val
# Event groups
class Frontend_Bound:
name = "Frontend_Bound"
domain = "Slots"
area = "FE"
level = 1
htoff = False
sample = ['FRONTEND_RETIRED.LATENCY_GE_4:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['BvFB', 'BvIO', 'TmaL1', 'PGO'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("PERF_METRICS.FRONTEND_BOUND", 1) / EV("TOPDOWN.SLOTS", 1)) / PERF_METRICS_SUM(self, EV, 1) - EV("INT_MISC.UOP_DROPPING", 1) / SLOTS(self, EV, 1) if topdown_use_fixed else(EV("IDQ_BUBBLES.CORE", 1) - EV("INT_MISC.UOP_DROPPING", 1)) / SLOTS(self, EV, 1)
self.thresh = (self.val > 0.15)
except ZeroDivisionError:
handle_error(self, "Frontend_Bound zero division")
return self.val
desc = """
This category represents fraction of slots where the
processor's Frontend undersupplies its Backend. Frontend
denotes the first part of the processor core responsible to
fetch operations that are executed later on by the Backend
part. Within the Frontend; a branch predictor predicts the
next address to fetch; cache-lines are fetched from the
memory subsystem; parsed into instructions; and lastly
decoded into micro-operations (uops). Ideally the Frontend
can issue Pipeline_Width uops every cycle to the Backend.
Frontend Bound denotes unutilized issue-slots when there is
no Backend stall; i.e. bubbles where Frontend delivered no
uops while Backend could have accepted them. For example;
stalls due to instruction-cache misses would be categorized
under Frontend Bound."""
class Fetch_Latency:
name = "Fetch_Latency"
domain = "Slots"
area = "FE"
level = 2
htoff = False
sample = ['FRONTEND_RETIRED.LATENCY_GE_16:pp', 'FRONTEND_RETIRED.LATENCY_GE_8:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['Frontend', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = ((EV("PERF_METRICS.FETCH_LATENCY", 2) / EV("TOPDOWN.SLOTS", 2)) / PERF_METRICS_SUM(self, EV, 2) - EV("INT_MISC.UOP_DROPPING", 2) / SLOTS(self, EV, 2)) if topdown_use_fixed else(EV("IDQ_BUBBLES.CYCLES_0_UOPS_DELIV.CORE", 2) * Pipeline_Width - EV("INT_MISC.UOP_DROPPING", 2)) / SLOTS(self, EV, 2)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Fetch_Latency zero division")
return self.val
desc = """
This metric represents fraction of slots the CPU was stalled
due to Frontend latency issues. For example; instruction-
cache misses; iTLB misses or fetch stalls after a branch
misprediction are categorized under Frontend Latency. In
such cases; the Frontend eventually delivers no uops for
some period."""
class ICache_Misses:
name = "ICache_Misses"
domain = "Clocks"
area = "FE"
level = 3
htoff = False
sample = ['FRONTEND_RETIRED.L2_MISS:pp', 'FRONTEND_RETIRED.L1I_MISS:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['BigFootprint', 'BvBC', 'FetchLat', 'IcMiss'])
maxval = None
def compute(self, EV):
try:
self.val = EV("ICACHE_DATA.STALLS", 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "ICache_Misses zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due to instruction cache misses.. Using compiler's
Profile-Guided Optimization (PGO) can reduce i-cache misses
through improved hot code layout."""
class ITLB_Misses:
name = "ITLB_Misses"
domain = "Clocks"
area = "FE"
level = 3
htoff = False
sample = ['FRONTEND_RETIRED.STLB_MISS:pp', 'FRONTEND_RETIRED.ITLB_MISS:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['BigFootprint', 'BvBC', 'FetchLat', 'MemoryTLB'])
maxval = None
def compute(self, EV):
try:
self.val = EV("ICACHE_TAG.STALLS", 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "ITLB_Misses zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due to Instruction TLB (ITLB) misses.. Consider
large 2M pages for code (selectively prefer hot large-size
function, due to limited 2M entries). Linux options:
standard binaries use libhugetlbfs; Hfsort.. https://github.
com/libhugetlbfs/libhugetlbfs;https://research.fb.com/public
ations/optimizing-function-placement-for-large-scale-data-
center-applications-2/"""
class Branch_Resteers:
name = "Branch_Resteers"
domain = "Clocks"
area = "FE"
level = 3
htoff = False
sample = ['BR_MISP_RETIRED.ALL_BRANCHES']
errcount = 0
sibling = None
metricgroup = frozenset(['FetchLat'])
maxval = None
def compute(self, EV):
try:
self.val = EV("INT_MISC.CLEAR_RESTEER_CYCLES", 3) / CLKS(self, EV, 3) + self.Unknown_Branches.compute(EV)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Branch_Resteers zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due to Branch Resteers. Branch Resteers estimates
the Frontend delay in fetching operations from corrected
path; following all sorts of miss-predicted branches. For
example; branchy code with lots of miss-predictions might
get categorized under Branch Resteers. Note the value of
this node may overlap with its siblings."""
class Mispredicts_Resteers:
name = "Mispredicts_Resteers"
domain = "Clocks"
area = "FE"
level = 4
htoff = False
sample = ['INT_MISC.CLEAR_RESTEER_CYCLES']
errcount = 0
sibling = None
metricgroup = frozenset(['BadSpec', 'BrMispredicts', 'BvMP'])
maxval = None
def compute(self, EV):
try:
self.val = Mispred_Clears_Fraction(self, EV, 4) * EV("INT_MISC.CLEAR_RESTEER_CYCLES", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Mispredicts_Resteers zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due to Branch Resteers as a result of Branch
Misprediction at execution stage."""
class Clears_Resteers:
name = "Clears_Resteers"
domain = "Clocks"
area = "FE"
level = 4
htoff = False
sample = ['INT_MISC.CLEAR_RESTEER_CYCLES']
errcount = 0
sibling = None
metricgroup = frozenset(['BadSpec', 'MachineClears'])
maxval = None
def compute(self, EV):
try:
self.val = (1 - Mispred_Clears_Fraction(self, EV, 4)) * EV("INT_MISC.CLEAR_RESTEER_CYCLES", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Clears_Resteers zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due to Branch Resteers as a result of Machine
Clears."""
class Unknown_Branches:
name = "Unknown_Branches"
domain = "Clocks"
area = "FE"
level = 4
htoff = False
sample = ['FRONTEND_RETIRED.UNKNOWN_BRANCH']
errcount = 0
sibling = None
metricgroup = frozenset(['BigFootprint', 'BvBC', 'FetchLat'])
maxval = None
def compute(self, EV):
try:
self.val = EV("INT_MISC.UNKNOWN_BRANCH_CYCLES", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Unknown_Branches zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due to new branch address clears. These are fetched
branches the Branch Prediction Unit was unable to recognize
(e.g. first time the branch is fetched or hitting BTB
capacity limit) hence called Unknown Branches"""
class MS_Switches:
name = "MS_Switches"
domain = "Clocks_Estimated"
area = "FE"
level = 3
htoff = False
sample = ['FRONTEND_RETIRED.MS_FLOWS']
errcount = 0
sibling = None
metricgroup = frozenset(['FetchLat', 'MicroSeq'])
maxval = 1.0
def compute(self, EV):
try:
self.val = MS_Switches_Cost * EV("UOPS_RETIRED.MS:c1:e1", 3) / Retire_Fraction(self, EV, 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "MS_Switches zero division")
return self.val
desc = """
This metric estimates the fraction of cycles when the CPU
was stalled due to switches of uop delivery to the Microcode
Sequencer (MS). Commonly used instructions are optimized for
delivery by the DSB (decoded i-cache) or MITE (legacy
instruction decode) pipelines. Certain operations cannot be
handled natively by the execution pipeline; and must be
performed by microcode (small programs injected into the
execution stream). Switching to the MS too often can
negatively impact performance. The MS is designated to
deliver long uop flows required by CISC instructions like
CPUID; or uncommon conditions like Floating Point Assists
when dealing with Denormals."""
class LCP:
name = "LCP"
domain = "Clocks"
area = "FE"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['FetchLat'])
maxval = None
def compute(self, EV):
try:
self.val = EV("DECODE.LCP", 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "LCP zero division")
return self.val
desc = """
This metric represents fraction of cycles CPU was stalled
due to Length Changing Prefixes (LCPs). Using proper
compiler flags or Intel Compiler by default will certainly
avoid this."""
class DSB_Switches:
name = "DSB_Switches"
domain = "Clocks"
area = "FE"
level = 3
htoff = False
sample = ['FRONTEND_RETIRED.DSB_MISS:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['DSBmiss', 'FetchLat'])
maxval = None
def compute(self, EV):
try:
self.val = EV("DSB2MITE_SWITCHES.PENALTY_CYCLES", 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "DSB_Switches zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due to switches from DSB to MITE pipelines. The DSB
(decoded i-cache) is a Uop Cache where the front-end
directly delivers Uops (micro operations) avoiding heavy x86
decoding. The DSB pipeline has shorter latency and delivered
higher bandwidth than the MITE (legacy instruction decode
pipeline). Switching between the two pipelines can cause
penalties hence this metric measures the exposed penalty..
See section 'Optimization for Decoded Icache' in
Optimization Manual:. http://www.intel.com/content/www/us/en
/architecture-and-technology/64-ia-32-architectures-
optimization-manual.html"""
class Fetch_Bandwidth:
name = "Fetch_Bandwidth"
domain = "Slots"
area = "FE"
level = 2
htoff = False
sample = ['FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1:pp', 'FRONTEND_RETIRED.LATENCY_GE_1:pp', 'FRONTEND_RETIRED.LATENCY_GE_2:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['FetchBW', 'Frontend', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = max(0 , self.Frontend_Bound.compute(EV) - self.Fetch_Latency.compute(EV))
self.thresh = (self.val > 0.2)
except ZeroDivisionError:
handle_error(self, "Fetch_Bandwidth zero division")
return self.val
desc = """
This metric represents fraction of slots the CPU was stalled
due to Frontend bandwidth issues. For example;
inefficiencies at the instruction decoders; or restrictions
for caching in the DSB (decoded uops cache) are categorized
under Fetch Bandwidth. In such cases; the Frontend typically
delivers suboptimal amount of uops to the Backend."""
class MITE:
name = "MITE"
domain = "Slots_Estimated"
area = "FE"
level = 3
htoff = False
sample = ['FRONTEND_RETIRED.ANY_DSB_MISS']
errcount = 0
sibling = None
metricgroup = frozenset(['DSBmiss', 'FetchBW'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("IDQ.MITE_CYCLES_ANY", 3) - EV("IDQ.MITE_CYCLES_OK", 3)) / CORE_CLKS(self, EV, 3) / 2
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "MITE zero division")
return self.val
desc = """
This metric represents Core fraction of cycles in which CPU
was likely limited due to the MITE pipeline (the legacy
decode pipeline). This pipeline is used for code that was
not pre-cached in the DSB or LSD. For example;
inefficiencies due to asymmetric decoders; use of long
immediate or LCP can manifest as MITE fetch bandwidth
bottleneck.. Consider tuning codegen of 'small hotspots'
that can fit in DSB. Read about 'Decoded ICache' in
Optimization Manual:. http://www.intel.com/content/www/us/en
/architecture-and-technology/64-ia-32-architectures-
optimization-manual.html"""
class Decoder0_Alone:
name = "Decoder0_Alone"
domain = "Slots_Estimated"
area = "FE"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['DSBmiss', 'FetchBW'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("INST_DECODED.DECODERS:c1", 4) - EV("INST_DECODED.DECODERS:c2", 4)) / CORE_CLKS(self, EV, 4) / 2
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Decoder0_Alone zero division")
return self.val
desc = """
This metric represents fraction of cycles where decoder-0
was the only active decoder"""
class DSB:
name = "DSB"
domain = "Slots_Estimated"
area = "FE"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['DSB', 'FetchBW'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("IDQ.DSB_CYCLES_ANY", 3) - EV("IDQ.DSB_CYCLES_OK", 3)) / CORE_CLKS(self, EV, 3) / 2
self.thresh = (self.val > 0.15) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "DSB zero division")
return self.val
desc = """
This metric represents Core fraction of cycles in which CPU
was likely limited due to DSB (decoded uop cache) fetch
pipeline. For example; inefficient utilization of the DSB
cache structure or bank conflict when reading from it; are
categorized here."""
class Bad_Speculation:
name = "Bad_Speculation"
domain = "Slots"
area = "BAD"
level = 1
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['TmaL1'])
maxval = None
def compute(self, EV):
try:
self.val = max(1 -(self.Frontend_Bound.compute(EV) + self.Backend_Bound.compute(EV) + self.Retiring.compute(EV)) , 0 )
self.thresh = (self.val > 0.15)
except ZeroDivisionError:
handle_error(self, "Bad_Speculation zero division")
return self.val
desc = """
This category represents fraction of slots wasted due to
incorrect speculations. This include slots used to issue
uops that do not eventually get retired and slots for which
the issue-pipeline was blocked due to recovery from earlier
incorrect speculation. For example; wasted work due to miss-
predicted branches are categorized under Bad Speculation
category. Incorrect data speculation followed by Memory
Ordering Nukes is another example."""
class Branch_Mispredicts:
name = "Branch_Mispredicts"
domain = "Slots"
area = "BAD"
level = 2
htoff = False
sample = ['TOPDOWN.BR_MISPREDICT_SLOTS']
errcount = 0
sibling = None
metricgroup = frozenset(['BadSpec', 'BrMispredicts', 'BvMP', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("PERF_METRICS.BRANCH_MISPREDICTS", 2) / EV("TOPDOWN.SLOTS", 2)) / PERF_METRICS_SUM(self, EV, 2) if topdown_use_fixed else EV("TOPDOWN.BR_MISPREDICT_SLOTS", 2) / SLOTS(self, EV, 2)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Branch_Mispredicts zero division")
return self.val
desc = """
This metric represents fraction of slots the CPU has wasted
due to Branch Misprediction. These slots are either wasted
by uops fetched from an incorrectly speculated program path;
or stalls when the out-of-order part of the machine needs to
recover its state from a speculative path.. Using profile
feedback in the compiler may help. Please see the
Optimization Manual for general strategies for addressing
branch misprediction issues..
http://www.intel.com/content/www/us/en/architecture-and-
technology/64-ia-32-architectures-optimization-manual.html"""
class Other_Mispredicts:
name = "Other_Mispredicts"
domain = "Slots"
area = "BAD"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvIO', 'BrMispredicts'])
maxval = None
def compute(self, EV):
try:
self.val = max(self.Branch_Mispredicts.compute(EV) * (1 - EV("BR_MISP_RETIRED.ALL_BRANCHES", 3) / (EV("INT_MISC.CLEARS_COUNT", 3) - EV("MACHINE_CLEARS.COUNT", 3))) , 0.0001 )
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Other_Mispredicts zero division")
return self.val
desc = """
This metric estimates fraction of slots the CPU was stalled
due to other cases of misprediction (non-retired x86
branches or other types)."""
class Machine_Clears:
name = "Machine_Clears"
domain = "Slots"
area = "BAD"
level = 2
htoff = False
sample = ['MACHINE_CLEARS.COUNT']
errcount = 0
sibling = None
metricgroup = frozenset(['BadSpec', 'BvMS', 'MachineClears', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = max(0 , self.Bad_Speculation.compute(EV) - self.Branch_Mispredicts.compute(EV))
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Machine_Clears zero division")
return self.val
desc = """
This metric represents fraction of slots the CPU has wasted
due to Machine Clears. These slots are either wasted by
uops fetched prior to the clear; or stalls the out-of-order
portion of the machine needs to recover its state after the
clear. For example; this can happen due to memory ordering
Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code
(SMC) nukes.. See \"Memory Disambiguation\" in Optimization
Manual and:. https://software.intel.com/sites/default/files/
m/d/4/1/d/8/sma.pdf"""
class Other_Nukes:
name = "Other_Nukes"
domain = "Slots"
area = "BAD"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvIO', 'Machine_Clears'])
maxval = None
def compute(self, EV):
try:
self.val = max(self.Machine_Clears.compute(EV) * (1 - EV("MACHINE_CLEARS.MEMORY_ORDERING", 3) / EV("MACHINE_CLEARS.COUNT", 3)) , 0.0001 )
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Other_Nukes zero division")
return self.val
desc = """
This metric represents fraction of slots the CPU has wasted
due to Nukes (Machine Clears) not related to memory
ordering."""
class Backend_Bound:
name = "Backend_Bound"
domain = "Slots"
area = "BE"
level = 1
htoff = False
sample = ['TOPDOWN.BACKEND_BOUND_SLOTS']
errcount = 0
sibling = None
metricgroup = frozenset(['BvOB', 'TmaL1'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("PERF_METRICS.BACKEND_BOUND", 1) / EV("TOPDOWN.SLOTS", 1)) / PERF_METRICS_SUM(self, EV, 1) if topdown_use_fixed else EV("TOPDOWN.BACKEND_BOUND_SLOTS", 1) / SLOTS(self, EV, 1)
self.thresh = (self.val > 0.2)
except ZeroDivisionError:
handle_error(self, "Backend_Bound zero division")
return self.val
desc = """
This category represents fraction of slots where no uops are
being delivered due to a lack of required resources for
accepting new uops in the Backend. Backend is the portion of
the processor core where the out-of-order scheduler
dispatches ready uops into their respective execution units;
and once completed these uops get retired according to
program order. For example; stalls due to data-cache misses
or stalls due to the divider unit being overloaded are both
categorized under Backend Bound. Backend Bound is further
divided into two main categories: Memory Bound and Core
Bound."""
class Memory_Bound:
name = "Memory_Bound"
domain = "Slots"
area = "BE/Mem"
level = 2
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Backend', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("PERF_METRICS.MEMORY_BOUND", 2) / EV("TOPDOWN.SLOTS", 2)) / PERF_METRICS_SUM(self, EV, 2) if topdown_use_fixed else EV("TOPDOWN.MEMORY_BOUND_SLOTS", 2) / SLOTS(self, EV, 2)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Memory_Bound zero division")
return self.val
desc = """
This metric represents fraction of slots the Memory
subsystem within the Backend was a bottleneck. Memory Bound
estimates fraction of slots where pipeline is likely stalled
due to demand load or store instructions. This accounts
mainly for (1) non-completed in-flight memory demand loads
which coincides with execution units starvation; in addition
to (2) cases where stores could impose backpressure on the
pipeline when many of them get buffered at the same time
(less common out of the two)."""
class L1_Bound:
name = "L1_Bound"
domain = "Stalls"
area = "BE/Mem"
level = 3
htoff = False
sample = ['MEM_LOAD_RETIRED.L1_HIT:pp', 'MEM_LOAD_RETIRED.FB_HIT:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['CacheHits', 'MemoryBound', 'TmaL3mem'])
maxval = None
def compute(self, EV):
try:
self.val = max((EV("EXE_ACTIVITY.BOUND_ON_LOADS", 3) - EV("MEMORY_ACTIVITY.STALLS_L1D_MISS", 3)) / CLKS(self, EV, 3) , 0 )
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "L1_Bound zero division")
return self.val
desc = """
This metric estimates how often the CPU was stalled without
loads missing the L1 data cache. The L1 data cache
typically has the shortest latency. However; in certain
cases like loads blocked on older stores; a load might
suffer due to high latency even though it is being satisfied
by the L1. Another example is loads who miss in the TLB.
These cases are characterized by execution unit stalls;
while some non-completed demand load lives in the machine
without having that demand load missing the L1 cache."""
class DTLB_Load:
name = "DTLB_Load"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_INST_RETIRED.STLB_MISS_LOADS:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['BvMT', 'MemoryTLB'])
maxval = 1.0
def compute(self, EV):
try:
self.val = min(Mem_STLB_Hit_Cost * EV("DTLB_LOAD_MISSES.STLB_HIT:c1", 4) + EV("DTLB_LOAD_MISSES.WALK_ACTIVE", 4) , max(EV("CYCLE_ACTIVITY.CYCLES_MEM_ANY", 4) - EV("MEMORY_ACTIVITY.CYCLES_L1D_MISS", 4) , 0)) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "DTLB_Load zero division")
return self.val
desc = """
This metric roughly estimates the fraction of cycles where
the Data TLB (DTLB) was missed by load accesses. TLBs
(Translation Look-aside Buffers) are processor caches for
recently used entries out of the Page Tables that are used
to map virtual- to physical-addresses by the operating
system. This metric approximates the potential delay of
demand loads missing the first-level data TLB (assuming
worst case scenario with back to back misses to different
pages). This includes hitting in the second-level TLB (STLB)
as well as performing a hardware page walk on an STLB miss.."""
class Load_STLB_Hit:
name = "Load_STLB_Hit"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['MemoryTLB'])
maxval = 1.0
def compute(self, EV):
try:
self.val = self.DTLB_Load.compute(EV) - self.Load_STLB_Miss.compute(EV)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Load_STLB_Hit zero division")
return self.val
desc = """
This metric roughly estimates the fraction of cycles where
the (first level) DTLB was missed by load accesses, that
later on hit in second-level TLB (STLB)"""
class Load_STLB_Miss:
name = "Load_STLB_Miss"
domain = "Clocks_Calculated"
area = "BE/Mem"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['MemoryTLB'])
maxval = 1.0
def compute(self, EV):
try:
self.val = EV("DTLB_LOAD_MISSES.WALK_ACTIVE", 5) / CLKS(self, EV, 5)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Load_STLB_Miss zero division")
return self.val
desc = """
This metric estimates the fraction of cycles where the
Second-level TLB (STLB) was missed by load accesses,
performing a hardware page walk"""
class Store_Fwd_Blk:
name = "Store_Fwd_Blk"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = 1.0
def compute(self, EV):
try:
self.val = 13 * EV("LD_BLOCKS.STORE_FORWARD", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Store_Fwd_Blk zero division")
return self.val
desc = """
This metric roughly estimates fraction of cycles when the
memory subsystem had loads blocked since they could not
forward data from earlier (in program order) overlapping
stores. To streamline memory operations in the pipeline; a
load can avoid waiting for memory if a prior in-flight store
is writing the data that the load wants to read (store
forwarding process). However; in some cases the load may be
blocked for a significant time pending the store forward.
For example; when the prior store is writing a smaller
region than the load is reading."""
class L1_Hit_Latency:
name = "L1_Hit_Latency"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_LOAD_RETIRED.L1_HIT']
errcount = 0
sibling = None
metricgroup = frozenset(['BvML', 'MemoryLat'])
maxval = 1.0
def compute(self, EV):
try:
self.val = min(2 *(EV("MEM_INST_RETIRED.ALL_LOADS", 4) - EV("MEM_LOAD_RETIRED.FB_HIT", 4) - EV("MEM_LOAD_RETIRED.L1_MISS", 4)) * Dependent_Loads_Weight(self, EV, 4) / 100 , max(EV("CYCLE_ACTIVITY.CYCLES_MEM_ANY", 4) - EV("MEMORY_ACTIVITY.CYCLES_L1D_MISS", 4) , 0)) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "L1_Hit_Latency zero division")
return self.val
desc = """
This metric roughly estimates fraction of cycles with demand
load accesses that hit the L1 cache. The short latency of
the L1 data cache may be exposed in pointer-chasing memory
access patterns as an example."""
class Lock_Latency:
name = "Lock_Latency"
domain = "Clocks"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_INST_RETIRED.LOCK_LOADS']
errcount = 0
sibling = None
metricgroup = frozenset(['Offcore'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (16 * max(0 , EV("MEM_INST_RETIRED.LOCK_LOADS", 4) - EV("L2_RQSTS.ALL_RFO", 4)) + Mem_Lock_St_Fraction(self, EV, 4) * (Mem_L2_Store_Cost * EV("L2_RQSTS.RFO_HIT", 4) + ORO_Demand_RFO_C1(self, EV, 4))) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Lock_Latency zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU spent
handling cache misses due to lock operations. Due to the
microarchitecture handling of locks; they are classified as
L1_Bound regardless of what memory source satisfied them."""
class Split_Loads:
name = "Split_Loads"
domain = "Clocks_Calculated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_INST_RETIRED.SPLIT_LOADS:pp']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = 1.0
def compute(self, EV):
try:
self.val = Load_Miss_Real_Latency(self, EV, 4) * EV("LD_BLOCKS.NO_SR", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Split_Loads zero division")
return self.val
desc = """
This metric estimates fraction of cycles handling memory
load split accesses - load that cross 64-byte cache line
boundary. . Consider aligning data or hot structure fields.
See the Optimization Manual for more details"""
class FB_Full:
name = "FB_Full"
domain = "Clocks_Calculated"
area = "BE/Mem"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvMB', 'MemoryBW'])
maxval = None
def compute(self, EV):
try:
self.val = EV("L1D_PEND_MISS.FB_FULL", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.3)
except ZeroDivisionError:
handle_error(self, "FB_Full zero division")
return self.val
desc = """
This metric does a *rough estimation* of how often L1D Fill
Buffer unavailability limited additional L1D miss memory
access requests to proceed. The higher the metric value; the
deeper the memory hierarchy level the misses are satisfied
from (metric values >1 are valid). Often it hints on
approaching bandwidth limits (to L2 cache; L3 cache or
external memory).. See $issueBW and $issueSL hints. Avoid
software prefetches if indeed memory BW limited."""
class L2_Bound:
name = "L2_Bound"
domain = "Stalls"
area = "BE/Mem"
level = 3
htoff = False
sample = ['MEM_LOAD_RETIRED.L2_HIT:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['BvML', 'CacheHits', 'MemoryBound', 'TmaL3mem'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("MEMORY_ACTIVITY.STALLS_L1D_MISS", 3) - EV("MEMORY_ACTIVITY.STALLS_L2_MISS", 3)) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "L2_Bound zero division")
return self.val
desc = """
This metric estimates how often the CPU was stalled due to
L2 cache accesses by loads. Avoiding cache misses (i.e. L1
misses/L2 hits) can improve the latency and increase
performance."""
class L3_Bound:
name = "L3_Bound"
domain = "Stalls"
area = "BE/Mem"
level = 3
htoff = False
sample = ['MEM_LOAD_RETIRED.L3_HIT:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['CacheHits', 'MemoryBound', 'TmaL3mem'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("MEMORY_ACTIVITY.STALLS_L2_MISS", 3) - EV("MEMORY_ACTIVITY.STALLS_L3_MISS", 3)) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "L3_Bound zero division")
return self.val
desc = """
This metric estimates how often the CPU was stalled due to
loads accesses to L3 cache or contended with a sibling Core.
Avoiding cache misses (i.e. L2 misses/L3 hits) can improve
the latency and increase performance."""
class Contested_Accesses:
name = "Contested_Accesses"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD', 'MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS']
errcount = 0
sibling = None
metricgroup = frozenset(['BvMS', 'DataSharing', 'Offcore', 'Snoop'])
maxval = 1.0
def compute(self, EV):
try:
self.val = ((Mem_XSNP_HitM_Cost(self, EV, 4) - Mem_L2_Hit_Cost(self, EV, 4)) * LOAD_XSNP_HITM(self, EV, 4) + (Mem_XSNP_Hit_Cost(self, EV, 4) - Mem_L2_Hit_Cost(self, EV, 4)) * LOAD_XSNP_MISS(self, EV, 4)) * FB_Factor(self, EV, 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Contested_Accesses zero division")
return self.val
desc = """
This metric estimates fraction of cycles while the memory
subsystem was handling synchronizations due to contested
accesses. Contested accesses occur when data written by one
Logical Processor are read by another Logical Processor on a
different Physical Core. Examples of contested accesses
include synchronizations such as locks; true data sharing
such as modified locked variables; and false sharing."""
class Data_Sharing:
name = "Data_Sharing"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD']
errcount = 0
sibling = None
metricgroup = frozenset(['BvMS', 'Offcore', 'Snoop'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (Mem_XSNP_Hit_Cost(self, EV, 4) - Mem_L2_Hit_Cost(self, EV, 4)) * LOAD_XSNP_HIT(self, EV, 4) * FB_Factor(self, EV, 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Data_Sharing zero division")
return self.val
desc = """
This metric estimates fraction of cycles while the memory
subsystem was handling synchronizations due to data-sharing
accesses. Data shared by multiple Logical Processors (even
just read shared) may cause increased access latency due to
cache coherency. Excessive data sharing can drastically harm
multithreaded performance."""
class L3_Hit_Latency:
name = "L3_Hit_Latency"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_LOAD_RETIRED.L3_HIT:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['BvML', 'MemoryLat'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (Mem_XSNP_None_Cost(self, EV, 4) - Mem_L2_Hit_Cost(self, EV, 4)) * LOAD_L3_HIT(self, EV, 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "L3_Hit_Latency zero division")
return self.val
desc = """
This metric estimates fraction of cycles with demand load
accesses that hit the L3 cache under unloaded scenarios
(possibly L3 latency limited). Avoiding private cache
misses (i.e. L2 misses/L3 hits) will improve the latency;
reduce contention with sibling physical cores and increase
performance. Note the value of this node may overlap with
its siblings."""
class SQ_Full:
name = "SQ_Full"
domain = "Clocks"
area = "BE/Mem"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvMB', 'MemoryBW', 'Offcore'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("XQ.FULL_CYCLES", 4) + EV("L1D_PEND_MISS.L2_STALLS", 4)) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.3) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "SQ_Full zero division")
return self.val
desc = """
This metric measures fraction of cycles where the Super
Queue (SQ) was full taking into account all request-types
and both hardware SMT threads (Logical Processors)."""
class DRAM_Bound:
name = "DRAM_Bound"
domain = "Stalls"
area = "BE/Mem"
level = 3
htoff = False
sample = ['MEM_LOAD_RETIRED.L3_MISS:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['MemoryBound', 'TmaL3mem'])
maxval = 1.0
def compute(self, EV):
try:
self.val = MEM_Bound_Ratio(self, EV, 3)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "DRAM_Bound zero division")
return self.val
desc = """
This metric estimates how often the CPU was stalled on
accesses to external memory (DRAM) by loads. Better caching
can improve the latency and increase performance."""
class MEM_Bandwidth:
name = "MEM_Bandwidth"
domain = "Clocks"
area = "BE/Mem"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvMB', 'MemoryBW', 'Offcore'])
maxval = None
def compute(self, EV):
try:
self.val = ORO_DRD_BW_Cycles(self, EV, 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "MEM_Bandwidth zero division")
return self.val
desc = """
This metric estimates fraction of cycles where the core's
performance was likely hurt due to approaching bandwidth
limits of external memory - DRAM ([SPR-HBM] and/or HBM).
The underlying heuristic assumes that a similar off-core
traffic is generated by all IA cores. This metric does not
aggregate non-data-read requests by this logical processor;
requests from other IA Logical Processors/Physical
Cores/sockets; or other non-IA devices like GPU; hence the
maximum external memory bandwidth limits may or may not be
approached when this metric is flagged (see Uncore counters
for that).. Improve data accesses to reduce cacheline
transfers from/to memory. Examples: 1) Consume all bytes of
a each cacheline before it is evicted (e.g. reorder
structure elements and split non-hot ones), 2) merge
computed-limited with BW-limited loops, 3) NUMA
optimizations in multi-socket system. Note: software
prefetches will not help BW-limited application.."""
class MBA_Stalls:
name = "MBA_Stalls"
domain = "Clocks"
area = "BE/Mem"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['MemoryBW', 'Offcore', 'Server'])
maxval = None
def compute(self, EV):
try:
self.val = EV("INT_MISC.MBA_STALLS", 5) / CLKS(self, EV, 5)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "MBA_Stalls zero division")
return self.val
desc = """
This metric estimates fraction of cycles where the core's
performance was likely hurt due to memory bandwidth
Allocation feature (RDT's memory bandwidth throttling)."""
class MEM_Latency:
name = "MEM_Latency"
domain = "Clocks"
area = "BE/Mem"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvML', 'MemoryLat', 'Offcore'])
maxval = None
def compute(self, EV):
try:
self.val = ORO_DRD_Any_Cycles(self, EV, 4) / CLKS(self, EV, 4) - self.MEM_Bandwidth.compute(EV)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "MEM_Latency zero division")
return self.val
desc = """
This metric estimates fraction of cycles where the
performance was likely hurt due to latency from external
memory - DRAM ([SPR-HBM] and/or HBM). This metric does not
aggregate requests from other Logical Processors/Physical
Cores/sockets (see Uncore counters for that).. Improve data
accesses or interleave them with compute. Examples: 1) Data
layout re-structuring, 2) Software Prefetches (also through
the compiler).."""
class Local_MEM:
name = "Local_MEM"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 5
htoff = False
sample = ['MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM']
errcount = 0
sibling = None
metricgroup = frozenset(['Server'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (Mem_Local_DRAM_Cost(self, EV, 5) - Mem_XSNP_None_Cost(self, EV, 5)) * EV("MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM", 5) * FB_Factor(self, EV, 5) / CLKS(self, EV, 5)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Local_MEM zero division")
return self.val
desc = """
This metric estimates fraction of cycles while the memory
subsystem was handling loads from local memory. Caching will
improve the latency and increase performance."""
class Remote_MEM:
name = "Remote_MEM"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 5
htoff = False
sample = ['MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['Server', 'Snoop'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (Mem_Remote_DRAM_Cost(self, EV, 5) - Mem_XSNP_None_Cost(self, EV, 5)) * EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM", 5) * FB_Factor(self, EV, 5) / CLKS(self, EV, 5) if DS else 0
EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM", 5)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Remote_MEM zero division")
return self.val
desc = """
This metric estimates fraction of cycles while the memory
subsystem was handling loads from remote memory. This is
caused often due to non-optimal NUMA allocations."""
class Remote_Cache:
name = "Remote_Cache"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 5
htoff = False
sample = ['MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM:pp', 'MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['Offcore', 'Server', 'Snoop'])
maxval = 1.0
def compute(self, EV):
try:
self.val = ((Mem_Remote_HitM_Cost(self, EV, 5) - Mem_XSNP_None_Cost(self, EV, 5)) * EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM", 5) + (Mem_Remote_Fwd_Cost(self, EV, 5) - Mem_XSNP_None_Cost(self, EV, 5)) * EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD", 5)) * FB_Factor(self, EV, 5) / CLKS(self, EV, 5) if DS else 0
EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM", 5)
EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD", 5)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Remote_Cache zero division")
return self.val
desc = """
This metric estimates fraction of cycles while the memory
subsystem was handling loads from remote cache in other
sockets including synchronizations issues. This is caused
often due to non-optimal NUMA allocations."""
class PMM_Bound:
name = "PMM_Bound"
domain = "Stalls"
area = "BE/Mem"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['MemoryBound', 'Server', 'TmaL3mem'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (((1 - Mem_DDR_Hit_Fraction(self, EV, 3)) * MEM_Bound_Ratio(self, EV, 3)) if (OneMillion *(EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM", 3) + EV("MEM_LOAD_RETIRED.LOCAL_PMM", 3))> EV("MEM_LOAD_RETIRED.L1_MISS", 3)) else 0) if PMM_App_Direct else 0
EV("MEM_LOAD_RETIRED.LOCAL_PMM", 3)
EV("MEM_LOAD_RETIRED.L1_MISS", 3)
EV("MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM", 3)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "PMM_Bound zero division")
return self.val
desc = """
This metric roughly estimates (based on idle latencies) how
often the CPU was stalled on accesses to external 3D-Xpoint
(Crystal Ridge, a.k.a. IXP) memory by loads, PMM stands for
Persistent Memory Module. . Consider moving data-structure
from AEP to DDR memory for better latency/bandwidth."""
class Store_Bound:
name = "Store_Bound"
domain = "Stalls"
area = "BE/Mem"
level = 3
htoff = False
sample = ['MEM_INST_RETIRED.ALL_STORES:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['MemoryBound', 'TmaL3mem'])
maxval = None
def compute(self, EV):
try:
self.val = EV("EXE_ACTIVITY.BOUND_ON_STORES", 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Store_Bound zero division")
return self.val
desc = """
This metric estimates how often CPU was stalled due to RFO
store memory accesses; RFO store issue a read-for-ownership
request before the write. Even though store accesses do not
typically stall out-of-order CPUs; there are few cases where
stores can lead to actual stalls. This metric will be
flagged should RFO stores be a bottleneck."""
class Store_Latency:
name = "Store_Latency"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvML', 'MemoryLat', 'Offcore'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (Store_L2_Hit_Cycles(self, EV, 4) + (1 - Mem_Lock_St_Fraction(self, EV, 4)) * ORO_Demand_RFO_C1(self, EV, 4)) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Store_Latency zero division")
return self.val
desc = """
This metric estimates fraction of cycles the CPU spent
handling L1D store misses. Store accesses usually less
impact out-of-order core performance; however; holding
resources for longer time can lead into undesired
implications (e.g. contention on L1D fill-buffer entries -
see FB_Full). Consider to avoid/reduce unnecessary (or
easily load-able/computable) memory store."""
class False_Sharing:
name = "False_Sharing"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM']
errcount = 0
sibling = None
metricgroup = frozenset(['BvMS', 'DataSharing', 'Offcore', 'Snoop'])
maxval = 1.0
def compute(self, EV):
try:
self.val = Mem_XSNP_HitM_Cost(self, EV, 4) * EV("OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "False_Sharing zero division")
return self.val
desc = """
This metric roughly estimates how often CPU was handling
synchronizations due to False Sharing. False Sharing is a
multithreading hiccup; where multiple Logical Processors
contend on different data-elements mapped into the same
cache line. . False Sharing can be easily avoided by padding
to make Logical Processors access different lines."""
class Split_Stores:
name = "Split_Stores"
domain = "Core_Utilization"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_INST_RETIRED.SPLIT_STORES:pp']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = EV("MEM_INST_RETIRED.SPLIT_STORES", 4) / CORE_CLKS(self, EV, 4)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Split_Stores zero division")
return self.val
desc = """
This metric represents rate of split store accesses.
Consider aligning your data to the 64-byte cache line
granularity."""
class Streaming_Stores:
name = "Streaming_Stores"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['OCR.STREAMING_WR.ANY_RESPONSE']
errcount = 0
sibling = None
metricgroup = frozenset(['MemoryBW', 'Offcore'])
maxval = 1.0
def compute(self, EV):
try:
self.val = 9 * EV("OCR.STREAMING_WR.ANY_RESPONSE", 4) / CLKS(self, EV, 4) if DS else 0
EV("OCR.STREAMING_WR.ANY_RESPONSE", 4)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Streaming_Stores zero division")
return self.val
desc = """
This metric estimates how often CPU was stalled due to
Streaming store memory accesses; Streaming store optimize
out a read request required by RFO stores. Even though store
accesses do not typically stall out-of-order CPUs; there are
few cases where stores can lead to actual stalls. This
metric will be flagged should Streaming stores be a
bottleneck."""
class DTLB_Store:
name = "DTLB_Store"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_INST_RETIRED.STLB_MISS_STORES:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['BvMT', 'MemoryTLB'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (Mem_STLB_Hit_Cost * EV("DTLB_STORE_MISSES.STLB_HIT:c1", 4) + EV("DTLB_STORE_MISSES.WALK_ACTIVE", 4)) / CORE_CLKS(self, EV, 4)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "DTLB_Store zero division")
return self.val
desc = """
This metric roughly estimates the fraction of cycles spent
handling first-level data TLB store misses. As with
ordinary data caching; focus on improving data locality and
reducing working-set size to reduce DTLB overhead.
Additionally; consider using profile-guided optimization
(PGO) to collocate frequently-used data on the same page.
Try using larger page sizes for large amounts of frequently-
used data."""
class Store_STLB_Hit:
name = "Store_STLB_Hit"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['MemoryTLB'])
maxval = 1.0
def compute(self, EV):
try:
self.val = self.DTLB_Store.compute(EV) - self.Store_STLB_Miss.compute(EV)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Store_STLB_Hit zero division")
return self.val
desc = """
This metric roughly estimates the fraction of cycles where
the TLB was missed by store accesses, hitting in the second-
level TLB (STLB)"""
class Store_STLB_Miss:
name = "Store_STLB_Miss"
domain = "Clocks_Calculated"
area = "BE/Mem"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['MemoryTLB'])
maxval = 1.0
def compute(self, EV):
try:
self.val = EV("DTLB_STORE_MISSES.WALK_ACTIVE", 5) / CORE_CLKS(self, EV, 5)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Store_STLB_Miss zero division")
return self.val
desc = """
This metric estimates the fraction of cycles where the STLB
was missed by store accesses, performing a hardware page
walk"""
class Core_Bound:
name = "Core_Bound"
domain = "Slots"
area = "BE/Core"
level = 2
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Backend', 'TmaL2', 'Compute'])
maxval = None
def compute(self, EV):
try:
self.val = max(0 , self.Backend_Bound.compute(EV) - self.Memory_Bound.compute(EV))
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Core_Bound zero division")
return self.val
desc = """
This metric represents fraction of slots where Core non-
memory issues were of a bottleneck. Shortage in hardware
compute resources; or dependencies in software's
instructions are both categorized under Core Bound. Hence it
may indicate the machine ran out of an out-of-order
resource; certain execution units are overloaded or
dependencies in program's data- or instruction-flow are
limiting the performance (e.g. FP-chained long-latency
arithmetic operations).. Tip: consider Port Saturation
analysis as next step."""
class Divider:
name = "Divider"
domain = "Clocks"
area = "BE/Core"
level = 3
htoff = False
sample = ['ARITH.DIVIDER_ACTIVE']
errcount = 0
sibling = None
metricgroup = frozenset(['BvCB'])
maxval = 1.0
def compute(self, EV):
try:
self.val = EV("ARITH.DIV_ACTIVE", 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Divider zero division")
return self.val
desc = """
This metric represents fraction of cycles where the Divider
unit was active. Divide and square root instructions are
performed by the Divider unit and can take considerably
longer latency than integer or Floating Point addition;
subtraction; or multiplication."""
class Serializing_Operation:
name = "Serializing_Operation"
domain = "Clocks"
area = "BE/Core"
level = 3
htoff = False
sample = ['RESOURCE_STALLS.SCOREBOARD']
errcount = 0
sibling = None
metricgroup = frozenset(['BvIO', 'PortsUtil'])
maxval = None
def compute(self, EV):
try:
self.val = EV("RESOURCE_STALLS.SCOREBOARD", 3) / CLKS(self, EV, 3) + self.C02_WAIT.compute(EV)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Serializing_Operation zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU issue-
pipeline was stalled due to serializing operations.
Instructions like CPUID; WRMSR or LFENCE serialize the out-
of-order execution which may limit performance."""
class Slow_Pause:
name = "Slow_Pause"
domain = "Clocks"
area = "BE/Core"
level = 4
htoff = False
sample = ['CPU_CLK_UNHALTED.PAUSE_INST']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = EV("CPU_CLK_UNHALTED.PAUSE", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Slow_Pause zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due to PAUSE Instructions."""
class C01_WAIT:
name = "C01_WAIT"
domain = "Clocks"
area = "BE/Core"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['C0Wait'])
maxval = None
def compute(self, EV):
try:
self.val = EV("CPU_CLK_UNHALTED.C01", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "C01_WAIT zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due staying in C0.1 power-performance optimized
state (Faster wakeup time; Smaller power savings)."""
class C02_WAIT:
name = "C02_WAIT"
domain = "Clocks"
area = "BE/Core"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['C0Wait'])
maxval = None
def compute(self, EV):
try:
self.val = EV("CPU_CLK_UNHALTED.C02", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "C02_WAIT zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due staying in C0.2 power-performance optimized
state (Slower wakeup time; Larger power savings)."""
class Memory_Fence:
name = "Memory_Fence"
domain = "Clocks"
area = "BE/Core"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = 1.0
def compute(self, EV):
try:
self.val = 13 * EV("MISC2_RETIRED.LFENCE", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Memory_Fence zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due to LFENCE Instructions."""
class AMX_Busy:
name = "AMX_Busy"
domain = "Core_Clocks"
area = "BE/Core"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvCB', 'Compute', 'HPC', 'Server'])
maxval = None
def compute(self, EV):
try:
self.val = EV("EXE.AMX_BUSY", 3) / CORE_CLKS(self, EV, 3)
self.thresh = (self.val > 0.5) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "AMX_Busy zero division")
return self.val
desc = """
This metric estimates fraction of cycles where the Advanced
Matrix eXtensions (AMX) execution engine was busy with tile
(arithmetic) operations"""
class Ports_Utilization:
name = "Ports_Utilization"
domain = "Clocks"
area = "BE/Core"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['PortsUtil'])
maxval = None
def compute(self, EV):
try:
self.val = Core_Bound_Cycles(self, EV, 3) / CLKS(self, EV, 3) if (EV("ARITH.DIV_ACTIVE", 3)<(EV("CYCLE_ACTIVITY.STALLS_TOTAL", 3) - EV("EXE_ACTIVITY.BOUND_ON_LOADS", 3))) else Few_Uops_Executed_Threshold(self, EV, 3) / CLKS(self, EV, 3)
EV("EXE_ACTIVITY.BOUND_ON_LOADS", 3)
EV("CYCLE_ACTIVITY.STALLS_TOTAL", 3)
EV("ARITH.DIV_ACTIVE", 3)
self.thresh = (self.val > 0.15) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Ports_Utilization zero division")
return self.val
desc = """
This metric estimates fraction of cycles the CPU performance
was potentially limited due to Core computation issues (non
divider-related). Two distinct categories can be attributed
into this metric: (1) heavy data-dependency among contiguous
instructions would manifest in this metric - such cases are
often referred to as low Instruction Level Parallelism
(ILP). (2) Contention on some hardware execution unit other
than Divider. For example; when there are too many multiply
operations.. Loop Vectorization -most compilers feature
auto-Vectorization options today- reduces pressure on the
execution ports as multiple elements are calculated with
same uop."""
class Ports_Utilized_0:
name = "Ports_Utilized_0"
domain = "Clocks"
area = "BE/Core"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['PortsUtil'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("EXE_ACTIVITY.EXE_BOUND_0_PORTS", 4) + max(EV("RS.EMPTY:u1", 4) - EV("RESOURCE_STALLS.SCOREBOARD", 4) , 0)) / CLKS(self, EV, 4) * (EV("CYCLE_ACTIVITY.STALLS_TOTAL", 4) - EV("EXE_ACTIVITY.BOUND_ON_LOADS", 4)) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Ports_Utilized_0 zero division")
return self.val
desc = """
This metric represents fraction of cycles CPU executed no
uops on any execution port (Logical Processor cycles since
ICL, Physical Core cycles otherwise). Long-latency
instructions like divides may contribute to this metric..
Check assembly view and Appendix C in Optimization Manual to
find out instructions with say 5 or more cycles latency..
http://www.intel.com/content/www/us/en/architecture-and-
technology/64-ia-32-architectures-optimization-manual.html"""
class Mixing_Vectors:
name = "Mixing_Vectors"
domain = "Clocks"
area = "BE/Core"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = 1.0
def compute(self, EV):
try:
self.val = 160 * EV("ASSISTS.SSE_AVX_MIX", 5) / CLKS(self, EV, 5)
self.thresh = (self.val > 0.05)
except ZeroDivisionError:
handle_error(self, "Mixing_Vectors zero division")
return self.val
desc = """
This metric estimates penalty in terms of percentage of
cycles. Usually a Mixing_Vectors over 5% is worth
investigating. Read more in Appendix B1 of the Optimizations
Guide for this topic."""
class Ports_Utilized_1:
name = "Ports_Utilized_1"
domain = "Clocks"
area = "BE/Core"
level = 4
htoff = False
sample = ['EXE_ACTIVITY.1_PORTS_UTIL']
errcount = 0
sibling = None
metricgroup = frozenset(['PortsUtil'])
maxval = None
def compute(self, EV):
try:
self.val = EV("EXE_ACTIVITY.1_PORTS_UTIL", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Ports_Utilized_1 zero division")
return self.val
desc = """
This metric represents fraction of cycles where the CPU
executed total of 1 uop per cycle on all execution ports
(Logical Processor cycles since ICL, Physical Core cycles
otherwise). This can be due to heavy data-dependency among
software instructions; or over oversubscribing a particular
hardware resource. In some other cases with high
1_Port_Utilized and L1_Bound; this metric can point to L1
data-cache latency bottleneck that may not necessarily
manifest with complete execution starvation (due to the
short L1 latency e.g. walking a linked list) - looking at
the assembly can be helpful."""
class Ports_Utilized_2:
name = "Ports_Utilized_2"
domain = "Clocks"
area = "BE/Core"
level = 4
htoff = False
sample = ['EXE_ACTIVITY.2_PORTS_UTIL']
errcount = 0
sibling = None
metricgroup = frozenset(['PortsUtil'])
maxval = None
def compute(self, EV):
try:
self.val = EV("EXE_ACTIVITY.2_PORTS_UTIL", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.15) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Ports_Utilized_2 zero division")
return self.val
desc = """
This metric represents fraction of cycles CPU executed total
of 2 uops per cycle on all execution ports (Logical
Processor cycles since ICL, Physical Core cycles otherwise).
Loop Vectorization -most compilers feature auto-
Vectorization options today- reduces pressure on the
execution ports as multiple elements are calculated with
same uop."""
class Ports_Utilized_3m:
name = "Ports_Utilized_3m"
domain = "Clocks"
area = "BE/Core"
level = 4
htoff = False
sample = ['UOPS_EXECUTED.CYCLES_GE_3']
errcount = 0
sibling = None
metricgroup = frozenset(['BvCB', 'PortsUtil'])
maxval = None
def compute(self, EV):
try:
self.val = EV("UOPS_EXECUTED.CYCLES_GE_3", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.4) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Ports_Utilized_3m zero division")
return self.val
desc = """
This metric represents fraction of cycles CPU executed total
of 3 or more uops per cycle on all execution ports (Logical
Processor cycles since ICL, Physical Core cycles otherwise)."""
class ALU_Op_Utilization:
name = "ALU_Op_Utilization"
domain = "Core_Execution"
area = "BE/Core"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = (EV("UOPS_DISPATCHED.PORT_0", 5) + EV("UOPS_DISPATCHED.PORT_1", 5) + EV("UOPS_DISPATCHED.PORT_5_11", 5) + EV("UOPS_DISPATCHED.PORT_6", 5)) / (5 * CORE_CLKS(self, EV, 5))
self.thresh = (self.val > 0.4)
except ZeroDivisionError:
handle_error(self, "ALU_Op_Utilization zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution ports for ALU operations."""
class Port_0:
name = "Port_0"
domain = "Core_Clocks"
area = "BE/Core"
level = 6
htoff = False
sample = ['UOPS_DISPATCHED.PORT_0']
errcount = 0
sibling = None
metricgroup = frozenset(['Compute'])
maxval = None
def compute(self, EV):
try:
self.val = EV("UOPS_DISPATCHED.PORT_0", 6) / CORE_CLKS(self, EV, 6)
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Port_0 zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution port 0 ALU and 2nd branch"""
class Port_1:
name = "Port_1"
domain = "Core_Clocks"
area = "BE/Core"
level = 6
htoff = False
sample = ['UOPS_DISPATCHED.PORT_1']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = EV("UOPS_DISPATCHED.PORT_1", 6) / CORE_CLKS(self, EV, 6)
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Port_1 zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution port 1 (ALU)"""
class Port_6:
name = "Port_6"
domain = "Core_Clocks"
area = "BE/Core"
level = 6
htoff = False
sample = ['UOPS_DISPATCHED.PORT_6']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = EV("UOPS_DISPATCHED.PORT_6", 6) / CORE_CLKS(self, EV, 6)
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Port_6 zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution port 6 Primary Branch and
simple ALU"""
class Load_Op_Utilization:
name = "Load_Op_Utilization"
domain = "Core_Execution"
area = "BE/Core"
level = 5
htoff = False
sample = ['UOPS_DISPATCHED.PORT_2_3_10']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = EV("UOPS_DISPATCHED.PORT_2_3_10", 5) / (3 * CORE_CLKS(self, EV, 5))
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Load_Op_Utilization zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution port for Load operations"""
class Store_Op_Utilization:
name = "Store_Op_Utilization"
domain = "Core_Execution"
area = "BE/Core"
level = 5
htoff = False
sample = ['UOPS_DISPATCHED.PORT_7_8']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = (EV("UOPS_DISPATCHED.PORT_4_9", 5) + EV("UOPS_DISPATCHED.PORT_7_8", 5)) / (4 * CORE_CLKS(self, EV, 5))
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Store_Op_Utilization zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution port for Store operations"""
class Retiring:
name = "Retiring"
domain = "Slots"
area = "RET"
level = 1
htoff = False
sample = ['UOPS_RETIRED.SLOTS']
errcount = 0
sibling = None
metricgroup = frozenset(['BvUW', 'TmaL1'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("PERF_METRICS.RETIRING", 1) / EV("TOPDOWN.SLOTS", 1)) / PERF_METRICS_SUM(self, EV, 1) if topdown_use_fixed else EV("UOPS_RETIRED.SLOTS", 1) / SLOTS(self, EV, 1)
self.thresh = (self.val > 0.7) or self.Heavy_Operations.thresh
except ZeroDivisionError:
handle_error(self, "Retiring zero division")
return self.val
desc = """
This category represents fraction of slots utilized by
useful work i.e. issued uops that eventually get retired.
Ideally; all pipeline slots would be attributed to the
Retiring category. Retiring of 100% would indicate the
maximum Pipeline_Width throughput was achieved. Maximizing
Retiring typically increases the Instructions-per-cycle (see
IPC metric). Note that a high Retiring value does not
necessary mean there is no room for more performance. For
example; Heavy-operations or Microcode Assists are
categorized under Retiring. They often indicate suboptimal
performance and can often be optimized or avoided. . A high
Retiring value for non-vectorized code may be a good hint
for programmer to consider vectorizing his code. Doing so
essentially lets more computations be done without
significantly increasing number of instructions thus
improving the performance."""
class Light_Operations:
name = "Light_Operations"
domain = "Slots"
area = "RET"
level = 2
htoff = False
sample = ['INST_RETIRED.PREC_DIST']
errcount = 0
sibling = None
metricgroup = frozenset(['Retire', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = max(0 , self.Retiring.compute(EV) - self.Heavy_Operations.compute(EV))
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Light_Operations zero division")
return self.val
desc = """
This metric represents fraction of slots where the CPU was
retiring light-weight operations -- instructions that
require no more than one uop (micro-operation). This
correlates with total number of instructions used by the
program. A uops-per-instruction (see UopPI metric) ratio of
1 or less should be expected for decently optimized code
running on Intel Core/Xeon products. While this often
indicates efficient X86 instructions were executed; high
value does not necessarily mean better performance cannot be
achieved. .. Focus on techniques that reduce instruction
count or result in more efficient instructions generation
such as vectorization."""
class FP_Arith:
name = "FP_Arith"
domain = "Uops"
area = "RET"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['HPC'])
maxval = None
def compute(self, EV):
try:
self.val = self.X87_Use.compute(EV) + self.FP_Scalar.compute(EV) + self.FP_Vector.compute(EV)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "FP_Arith zero division")
return self.val
desc = """
This metric represents overall arithmetic floating-point
(FP) operations fraction the CPU has executed (retired).
Note this metric's value may exceed its parent due to use of
\"Uops\" CountDomain and FMA double-counting."""
class X87_Use:
name = "X87_Use"
domain = "Uops"
area = "RET"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Compute'])
maxval = None
def compute(self, EV):
try:
self.val = self.Retiring.compute(EV) * EV("UOPS_EXECUTED.X87", 4) / EV("UOPS_EXECUTED.THREAD", 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "X87_Use zero division")
return self.val
desc = """
This metric serves as an approximation of legacy x87 usage.
It accounts for instructions beyond X87 FP arithmetic
operations; hence may be used as a thermometer to avoid X87
high usage and preferably upgrade to modern ISA. See Tip
under Tuning Hint.. Tip: consider compiler flags to generate
newer AVX (or SSE) instruction sets; which typically perform
better and feature vectors."""
class FP_Scalar:
name = "FP_Scalar"
domain = "Uops"
area = "RET"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Compute', 'Flops'])
maxval = None
def compute(self, EV):
try:
self.val = FP_Arith_Scalar(self, EV, 4) / Retired_Slots(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "FP_Scalar zero division")
return self.val
desc = """
This metric approximates arithmetic floating-point (FP)
scalar uops fraction the CPU has retired. May overcount due
to FMA double counting.. Investigate what limits (compiler)
generation of vector code."""
class FP_Vector:
name = "FP_Vector"
domain = "Uops"
area = "RET"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Compute', 'Flops'])
maxval = 1.0
def compute(self, EV):
try:
self.val = FP_Arith_Vector(self, EV, 4) / Retired_Slots(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "FP_Vector zero division")
return self.val
desc = """
This metric approximates arithmetic floating-point (FP)
vector uops fraction the CPU has retired aggregated across
all vector widths. May overcount due to FMA double
counting.. Check if vector width is expected"""
class FP_Vector_128b:
name = "FP_Vector_128b"
domain = "Uops"
area = "RET"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Compute', 'Flops'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (EV("FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", 5) + EV("FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE", 5) + EV("FP_ARITH_INST_RETIRED2.128B_PACKED_HALF", 5)) / Retired_Slots(self, EV, 5) if FP16 else(EV("FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", 5) + EV("FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE", 5)) / Retired_Slots(self, EV, 5)
EV("FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE", 5)
EV("FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", 5)
EV("FP_ARITH_INST_RETIRED2.128B_PACKED_HALF", 5)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "FP_Vector_128b zero division")
return self.val
desc = """
This metric approximates arithmetic FP vector uops fraction
the CPU has retired for 128-bit wide vectors. May overcount
due to FMA double counting.. Try to exploit wider vector
length"""
class FP_Vector_256b:
name = "FP_Vector_256b"
domain = "Uops"
area = "RET"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Compute', 'Flops'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (EV("FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE", 5) + EV("FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE", 5) + EV("FP_ARITH_INST_RETIRED2.256B_PACKED_HALF", 5)) / Retired_Slots(self, EV, 5) if FP16 else(EV("FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE", 5) + EV("FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE", 5)) / Retired_Slots(self, EV, 5)
EV("FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE", 5)
EV("FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE", 5)
EV("FP_ARITH_INST_RETIRED2.256B_PACKED_HALF", 5)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "FP_Vector_256b zero division")
return self.val
desc = """
This metric approximates arithmetic FP vector uops fraction
the CPU has retired for 256-bit wide vectors. May overcount
due to FMA double counting.. Try to exploit wider vector
length"""
class FP_Vector_512b:
name = "FP_Vector_512b"
domain = "Uops"
area = "RET"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Compute', 'Flops'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (EV("FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE", 5) + EV("FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE", 5) + EV("FP_ARITH_INST_RETIRED2.512B_PACKED_HALF", 5)) / Retired_Slots(self, EV, 5) if FP16 else(EV("FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE", 5) + EV("FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE", 5)) / Retired_Slots(self, EV, 5)
EV("FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE", 5)
EV("FP_ARITH_INST_RETIRED2.512B_PACKED_HALF", 5)
EV("FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE", 5)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "FP_Vector_512b zero division")
return self.val
desc = """
This metric approximates arithmetic FP vector uops fraction
the CPU has retired for 512-bit wide vectors. May overcount
due to FMA double counting."""
class Int_Operations:
name = "Int_Operations"
domain = "Uops"
area = "RET"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Pipeline'])
maxval = None
def compute(self, EV):
try:
self.val = self.Int_Vector_128b.compute(EV) + self.Int_Vector_256b.compute(EV)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Int_Operations zero division")
return self.val
desc = """
This metric represents overall Integer (Int) select
operations fraction the CPU has executed (retired).
Vector/Matrix Int operations and shuffles are counted. Note
this metric's value may exceed its parent due to use of
\"Uops\" CountDomain."""
class Int_Vector_128b:
name = "Int_Vector_128b"
domain = "Uops"
area = "RET"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Compute', 'IntVector', 'Pipeline'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("INT_VEC_RETIRED.ADD_128", 4) + EV("INT_VEC_RETIRED.VNNI_128", 4)) / Retired_Slots(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Int_Vector_128b zero division")
return self.val
desc = """
This metric represents 128-bit vector Integer ADD/SUB/SAD or
VNNI (Vector Neural Network Instructions) uops fraction the
CPU has retired."""
class Int_Vector_256b:
name = "Int_Vector_256b"
domain = "Uops"
area = "RET"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Compute', 'IntVector', 'Pipeline'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("INT_VEC_RETIRED.ADD_256", 4) + EV("INT_VEC_RETIRED.MUL_256", 4) + EV("INT_VEC_RETIRED.VNNI_256", 4)) / Retired_Slots(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Int_Vector_256b zero division")
return self.val
desc = """
This metric represents 256-bit vector Integer
ADD/SUB/SAD/MUL or VNNI (Vector Neural Network Instructions)
uops fraction the CPU has retired."""
class Memory_Operations:
name = "Memory_Operations"
domain = "Slots"
area = "RET"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Pipeline'])
maxval = None
def compute(self, EV):
try:
self.val = self.Light_Operations.compute(EV) * EV("MEM_UOP_RETIRED.ANY", 3) / Retired_Slots(self, EV, 3)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Memory_Operations zero division")
return self.val
desc = """
This metric represents fraction of slots where the CPU was
retiring memory operations -- uops for memory load or store
accesses."""
class Fused_Instructions:
name = "Fused_Instructions"
domain = "Slots"
area = "RET"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Branches', 'BvBO', 'Pipeline'])
maxval = None
def compute(self, EV):
try:
self.val = self.Light_Operations.compute(EV) * EV("INST_RETIRED.MACRO_FUSED", 3) / Retired_Slots(self, EV, 3)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Fused_Instructions zero division")
return self.val
desc = """
This metric represents fraction of slots where the CPU was
retiring fused instructions -- where one uop can represent
multiple contiguous instructions. CMP+JCC or DEC+JCC are
common examples of legacy fusions. {}. See section
'Optimizing for Macro-fusion' in Optimization Manual:"""
class Non_Fused_Branches:
name = "Non_Fused_Branches"
domain = "Slots"
area = "RET"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Branches', 'BvBO', 'Pipeline'])
maxval = None
def compute(self, EV):
try:
self.val = self.Light_Operations.compute(EV) * (EV("BR_INST_RETIRED.ALL_BRANCHES", 3) - EV("INST_RETIRED.MACRO_FUSED", 3)) / Retired_Slots(self, EV, 3)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Non_Fused_Branches zero division")
return self.val
desc = """
This metric represents fraction of slots where the CPU was
retiring branch instructions that were not fused. Non-
conditional branches like direct JMP or CALL would count
here. Can be used to examine fusible conditional jumps that
were not fused."""
class Other_Light_Ops:
name = "Other_Light_Ops"
domain = "Slots"
area = "RET"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Pipeline'])
maxval = None
def compute(self, EV):
try:
self.val = max(0 , self.Light_Operations.compute(EV) - Light_Ops_Sum(self, EV, 3))
self.thresh = (self.val > 0.3) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Other_Light_Ops zero division")
return self.val
desc = """
This metric represents the remaining light uops fraction the
CPU has executed - remaining means not covered by other
sibling nodes. May undercount due to FMA double counting"""
class Nop_Instructions:
name = "Nop_Instructions"
domain = "Slots"
area = "RET"
level = 4
htoff = False
sample = ['INST_RETIRED.NOP']
errcount = 0
sibling = None
metricgroup = frozenset(['BvBO', 'Pipeline'])
maxval = None
def compute(self, EV):
try:
self.val = self.Light_Operations.compute(EV) * EV("INST_RETIRED.NOP", 4) / Retired_Slots(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Nop_Instructions zero division")
return self.val
desc = """
This metric represents fraction of slots where the CPU was
retiring NOP (no op) instructions. Compilers often use NOPs
for certain address alignments - e.g. start address of a
function or loop body.. Improve Codegen by correctly placing
NOPs outside hot sections (e.g. outside loop body)."""
class Shuffles_256b:
name = "Shuffles_256b"
domain = "Slots"
area = "RET"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['HPC', 'Pipeline'])
maxval = None
def compute(self, EV):
try:
self.val = self.Light_Operations.compute(EV) * EV("INT_VEC_RETIRED.SHUFFLES", 4) / Retired_Slots(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Shuffles_256b zero division")
return self.val
desc = """
This metric represents fraction of slots where the CPU was
retiring Shuffle operations of 256-bit vector size (FP or
Integer). Shuffles may incur slow cross \"vector lane\" data
transfers."""
class Heavy_Operations:
name = "Heavy_Operations"
domain = "Slots"
area = "RET"
level = 2
htoff = False
sample = ['UOPS_RETIRED.HEAVY']
errcount = 0
sibling = None
metricgroup = frozenset(['Retire', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("PERF_METRICS.HEAVY_OPERATIONS", 2) / EV("TOPDOWN.SLOTS", 2)) / PERF_METRICS_SUM(self, EV, 2) if topdown_use_fixed else EV("UOPS_RETIRED.HEAVY", 2) / SLOTS(self, EV, 2)
self.thresh = (self.val > 0.1)
except ZeroDivisionError:
handle_error(self, "Heavy_Operations zero division")
return self.val
desc = """
This metric represents fraction of slots where the CPU was
retiring heavy-weight operations -- instructions that
require two or more uops or micro-coded sequences. This
highly-correlates with the uop length of these
instructions/sequences. ."""
class Few_Uops_Instructions:
name = "Few_Uops_Instructions"
domain = "Slots"
area = "RET"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = max(0 , self.Heavy_Operations.compute(EV) - self.Microcode_Sequencer.compute(EV))
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Few_Uops_Instructions zero division")
return self.val
desc = """
This metric represents fraction of slots where the CPU was
retiring instructions that that are decoder into two or up
to five uops. This highly-correlates with the number of
uops in such instructions."""
class Microcode_Sequencer:
name = "Microcode_Sequencer"
domain = "Slots"
area = "RET"
level = 3
htoff = False
sample = ['UOPS_RETIRED.MS']
errcount = 0
sibling = None
metricgroup = frozenset(['MicroSeq'])
maxval = None
def compute(self, EV):
try:
self.val = EV("UOPS_RETIRED.MS", 3) / SLOTS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Microcode_Sequencer zero division")
return self.val
desc = """
This metric represents fraction of slots the CPU was
retiring uops fetched by the Microcode Sequencer (MS) unit.
The MS is used for CISC instructions not supported by the
default decoders (like repeat move strings; or CPUID); or by
microcode assists used to address some operation modes (like
in Floating Point assists). These cases can often be
avoided.."""
class Assists:
name = "Assists"
domain = "Slots_Estimated"
area = "RET"
level = 4
htoff = False
sample = ['ASSISTS.ANY']
errcount = 0
sibling = None
metricgroup = frozenset(['BvIO'])
maxval = 1.0
def compute(self, EV):
try:
self.val = Avg_Assist_Cost * EV("ASSISTS.ANY", 4) / SLOTS(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Assists zero division")
return self.val
desc = """
This metric estimates fraction of slots the CPU retired uops
delivered by the Microcode_Sequencer as a result of Assists.
Assists are long sequences of uops that are required in
certain corner-cases for operations that cannot be handled
natively by the execution pipeline. For example; when
working with very small floating point values (so-called
Denormals); the FP units are not set up to perform these
operations natively. Instead; a sequence of instructions to
perform the computation on the Denormals is injected into
the pipeline. Since these microcode sequences might be
dozens of uops long; Assists can be extremely deleterious to
performance and they can be avoided in many cases."""
class Page_Faults:
name = "Page_Faults"
domain = "Slots_Estimated"
area = "RET"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = 99 * EV("ASSISTS.PAGE_FAULT", 5) / SLOTS(self, EV, 5)
self.thresh = (self.val > 0.05)
except ZeroDivisionError:
handle_error(self, "Page_Faults zero division")
return self.val
desc = """
This metric roughly estimates fraction of slots the CPU
retired uops as a result of handing Page Faults. A Page
Fault may apply on first application access to a memory
page. Note operating system handling of page faults accounts
for the majority of its cost."""
class FP_Assists:
name = "FP_Assists"
domain = "Slots_Estimated"
area = "RET"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['HPC'])
maxval = None
def compute(self, EV):
try:
self.val = 30 * EV("ASSISTS.FP", 5) / SLOTS(self, EV, 5)
self.thresh = (self.val > 0.1)
except ZeroDivisionError:
handle_error(self, "FP_Assists zero division")
return self.val
desc = """
This metric roughly estimates fraction of slots the CPU
retired uops as a result of handing Floating Point (FP)
Assists. FP Assist may apply when working with very small
floating point values (so-called Denormals).. Consider DAZ
(Denormals Are Zero) and/or FTZ (Flush To Zero) options in
your compiler; \"-ffast-math\" with -O2 in GCC for example.
This option may improve performance if the denormal values
are not critical in your application. Also note that the DAZ
and FTZ modes are not compatible with the IEEE Standard
754.. https://www.intel.com/content/www/us/en/develop/docume
ntation/vtune-help/top/reference/cpu-metrics-reference/bad-
speculation-back-end-bound-pipeline-slots/fp-assists.html"""
class AVX_Assists:
name = "AVX_Assists"
domain = "Slots_Estimated"
area = "RET"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['HPC'])
maxval = None
def compute(self, EV):
try:
self.val = 63 * EV("ASSISTS.SSE_AVX_MIX", 5) / SLOTS(self, EV, 5)
self.thresh = (self.val > 0.1)
except ZeroDivisionError:
handle_error(self, "AVX_Assists zero division")
return self.val
desc = """
This metric estimates fraction of slots the CPU retired uops
as a result of handing SSE to AVX* or AVX* to SSE transition
Assists."""
class CISC:
name = "CISC"
domain = "Slots"
area = "RET"
level = 4
htoff = False
sample = ['FRONTEND_RETIRED.MS_FLOWS']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = max(0 , self.Microcode_Sequencer.compute(EV) - self.Assists.compute(EV))
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "CISC zero division")
return self.val
desc = """
This metric estimates fraction of cycles the CPU retired
uops originated from CISC (complex instruction set computer)
instruction. A CISC instruction has multiple uops that are
required to perform the instruction's functionality as in
the case of read-modify-write as an example. Since these
instructions require multiple uops they may or may not imply
sub-optimal use of machine resources."""
class Metric_Mispredictions:
name = "Mispredictions"
domain = "Scaled_Slots"
maxval = 0
errcount = 0
area = "Info.Bottleneck"
metricgroup = frozenset(['Bad', 'BadSpec', 'BrMispredicts', 'BvMP'])
sibling = None
def compute(self, EV):
try:
self.val = Mispredictions(self, EV, 0)
self.thresh = (self.val > 20)
except ZeroDivisionError:
handle_error_metric(self, "Mispredictions zero division")
desc = """
Total pipeline cost of Branch Misprediction related
bottlenecks"""
class Metric_Big_Code:
name = "Big_Code"
domain = "Scaled_Slots"
maxval = 0
errcount = 0
area = "Info.Bottleneck"
metricgroup = frozenset(['BvBC', 'BigFootprint', 'Fed', 'Frontend', 'IcMiss', 'MemoryTLB'])
sibling = None
def compute(self, EV):
try:
self.val = Big_Code(self, EV, 0)
self.thresh = (self.val > 20)
except ZeroDivisionError:
handle_error_metric(self, "Big_Code zero division")
desc = """
Total pipeline cost of instruction fetch related bottlenecks
by large code footprint programs (i-side cache; TLB and BTB
misses)"""
class Metric_Instruction_Fetch_BW:
name = "Instruction_Fetch_BW"
domain = "Scaled_Slots"
maxval = 0
errcount = 0
area = "Info.Bottleneck"
metricgroup = frozenset(['BvFB', 'Fed', 'FetchBW', 'Frontend'])
sibling = None
def compute(self, EV):
try:
self.val = Instruction_Fetch_BW(self, EV, 0)
self.thresh = (self.val > 20)
except ZeroDivisionError:
handle_error_metric(self, "Instruction_Fetch_BW zero division")
desc = """
Total pipeline cost of instruction fetch bandwidth related
bottlenecks (when the front-end could not sustain operations
delivery to the back-end)"""
class Metric_Cache_Memory_Bandwidth:
name = "Cache_Memory_Bandwidth"
domain = "Scaled_Slots"
maxval = 0
errcount = 0
area = "Info.Bottleneck"
metricgroup = frozenset(['BvMB', 'Mem', 'MemoryBW', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = Cache_Memory_Bandwidth(self, EV, 0)
self.thresh = (self.val > 20)
except ZeroDivisionError:
handle_error_metric(self, "Cache_Memory_Bandwidth zero division")
desc = """
Total pipeline cost of external Memory- or Cache-Bandwidth
related bottlenecks"""
class Metric_Cache_Memory_Latency:
name = "Cache_Memory_Latency"
domain = "Scaled_Slots"
maxval = 0
errcount = 0
area = "Info.Bottleneck"
metricgroup = frozenset(['BvML', 'Mem', 'MemoryLat', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = Cache_Memory_Latency(self, EV, 0)
self.thresh = (self.val > 20)
except ZeroDivisionError:
handle_error_metric(self, "Cache_Memory_Latency zero division")
desc = """
Total pipeline cost of external Memory- or Cache-Latency
related bottlenecks"""
class Metric_Memory_Data_TLBs:
name = "Memory_Data_TLBs"
domain = "Scaled_Slots"
maxval = 0
errcount = 0
area = "Info.Bottleneck"
metricgroup = frozenset(['BvMT', 'Mem', 'MemoryTLB', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = Memory_Data_TLBs(self, EV, 0)
self.thresh = (self.val > 20)
except ZeroDivisionError:
handle_error_metric(self, "Memory_Data_TLBs zero division")
desc = """
Total pipeline cost of Memory Address Translation related
bottlenecks (data-side TLBs)"""
class Metric_Memory_Synchronization:
name = "Memory_Synchronization"
domain = "Scaled_Slots"
maxval = 0
errcount = 0
area = "Info.Bottleneck"
metricgroup = frozenset(['BvMS', 'Mem', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = Memory_Synchronization(self, EV, 0)
self.thresh = (self.val > 10)
except ZeroDivisionError:
handle_error_metric(self, "Memory_Synchronization zero division")
desc = """
Total pipeline cost of Memory Synchronization related
bottlenecks (data transfers and coherency updates across
processors)"""
class Metric_Compute_Bound_Est:
name = "Compute_Bound_Est"
domain = "Scaled_Slots"
maxval = 0
errcount = 0
area = "Info.Bottleneck"
metricgroup = frozenset(['BvCB', 'Cor'])
sibling = None
def compute(self, EV):
try:
self.val = Compute_Bound_Est(self, EV, 0)
self.thresh = (self.val > 20)
except ZeroDivisionError:
handle_error_metric(self, "Compute_Bound_Est zero division")
desc = """
Total pipeline cost when the execution is compute-bound - an
estimation. Covers Core Bound when High ILP as well as when
long-latency execution units are busy."""
class Metric_Irregular_Overhead:
name = "Irregular_Overhead"
domain = "Scaled_Slots"
maxval = 0
errcount = 0
area = "Info.Bottleneck"
metricgroup = frozenset(['Bad', 'BvIO', 'Cor', 'Ret'])
sibling = None
def compute(self, EV):
try:
self.val = Irregular_Overhead(self, EV, 0)
self.thresh = (self.val > 10)
except ZeroDivisionError:
handle_error_metric(self, "Irregular_Overhead zero division")
desc = """
Total pipeline cost of irregular execution (e.g. FP-assists
in HPC, Wait time with work imbalance multithreaded
workloads, overhead in system services or virtualized
environments)"""
class Metric_Other_Bottlenecks:
name = "Other_Bottlenecks"
domain = "Scaled_Slots"
maxval = 0
errcount = 0
area = "Info.Bottleneck"
metricgroup = frozenset(['BvOB', 'Cor', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = Other_Bottlenecks(self, EV, 0)
self.thresh = (self.val > 20)
except ZeroDivisionError:
handle_error_metric(self, "Other_Bottlenecks zero division")
desc = """
Total pipeline cost of remaining bottlenecks in the back-
end. Examples include data-dependencies (Core Bound when Low
ILP) and other unlisted memory-related stalls."""
class Metric_Branching_Overhead:
name = "Branching_Overhead"
domain = "Scaled_Slots"
maxval = 0
errcount = 0
area = "Info.Bottleneck"
metricgroup = frozenset(['BvBO', 'Ret'])
sibling = None
def compute(self, EV):
try:
self.val = Branching_Overhead(self, EV, 0)
self.thresh = (self.val > 5)
except ZeroDivisionError:
handle_error_metric(self, "Branching_Overhead zero division")
desc = """
Total pipeline cost of instructions used for program
control-flow - a subset of the Retiring category in TMA.
Examples include function calls; loops and alignments. (A
lower bound). Consider Loop Unrolling or function inlining
optimizations"""
class Metric_Useful_Work:
name = "Useful_Work"
domain = "Scaled_Slots"
maxval = 0
errcount = 0
area = "Info.Bottleneck"
metricgroup = frozenset(['BvUW', 'Ret'])
sibling = None
def compute(self, EV):
try:
self.val = Useful_Work(self, EV, 0)
self.thresh = (self.val > 20)
except ZeroDivisionError:
handle_error_metric(self, "Useful_Work zero division")
desc = """
Total pipeline cost of \"useful operations\" - the portion
of Retiring category not covered by Branching_Overhead nor
Irregular_Overhead."""
class Metric_Core_Bound_Likely:
name = "Core_Bound_Likely"
domain = "Metric"
maxval = 1.0
errcount = 0
area = "Info.Botlnk.L0"
metricgroup = frozenset(['Cor', 'SMT'])
sibling = None
def compute(self, EV):
try:
self.val = Core_Bound_Likely(self, EV, 0)
self.thresh = (self.val > 0.5)
except ZeroDivisionError:
handle_error_metric(self, "Core_Bound_Likely zero division")
desc = """
Probability of Core Bound bottleneck hidden by SMT-profiling
artifacts. Tip: consider analysis with SMT disabled"""
class Metric_IPC:
name = "IPC"
domain = "Metric"
maxval = Pipeline_Width + 2
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['Ret', 'Summary'])
sibling = None
def compute(self, EV):
try:
self.val = IPC(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "IPC zero division")
desc = """
Instructions Per Cycle (per Logical Processor)"""
class Metric_UopPI:
name = "UopPI"
domain = "Metric"
maxval = 2.0
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['Pipeline', 'Ret', 'Retire'])
sibling = None
def compute(self, EV):
try:
self.val = UopPI(self, EV, 0)
self.thresh = (self.val > 1.05)
except ZeroDivisionError:
handle_error_metric(self, "UopPI zero division")
desc = """
Uops Per Instruction"""
class Metric_UpTB:
name = "UpTB"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['Branches', 'Fed', 'FetchBW'])
sibling = None
def compute(self, EV):
try:
self.val = UpTB(self, EV, 0)
self.thresh = self.val < Pipeline_Width * 1.5
except ZeroDivisionError:
handle_error_metric(self, "UpTB zero division")
desc = """
Uops per taken branch"""
class Metric_CPI:
name = "CPI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['Pipeline', 'Mem'])
sibling = None
def compute(self, EV):
try:
self.val = CPI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "CPI zero division")
desc = """
Cycles Per Instruction (per Logical Processor)"""
class Metric_CLKS:
name = "CLKS"
domain = "Count"
maxval = 0
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['Pipeline'])
sibling = None
def compute(self, EV):
try:
self.val = CLKS(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "CLKS zero division")
desc = """
Per-Logical Processor actual clocks when the Logical
Processor is active."""
class Metric_SLOTS:
name = "SLOTS"
domain = "Count"
maxval = 0
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['TmaL1'])
sibling = None
def compute(self, EV):
try:
self.val = SLOTS(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "SLOTS zero division")
desc = """
Total issue-pipeline slots (per-Physical Core till ICL; per-
Logical Processor ICL onward)"""
class Metric_Slots_Utilization:
name = "Slots_Utilization"
domain = "Metric"
maxval = 1.0
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['SMT', 'TmaL1'])
sibling = None
def compute(self, EV):
try:
self.val = Slots_Utilization(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Slots_Utilization zero division")
desc = """
Fraction of Physical Core issue-slots utilized by this
Logical Processor"""
class Metric_Execute_per_Issue:
name = "Execute_per_Issue"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['Cor', 'Pipeline'])
sibling = None
def compute(self, EV):
try:
self.val = Execute_per_Issue(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Execute_per_Issue zero division")
desc = """
The ratio of Executed- by Issued-Uops. Ratio > 1 suggests
high rate of uop micro-fusions. Ratio < 1 suggest high rate
of \"execute\" at rename stage."""
class Metric_CoreIPC:
name = "CoreIPC"
domain = "Core_Metric"
maxval = Pipeline_Width + 2
errcount = 0
area = "Info.Core"
metricgroup = frozenset(['Ret', 'SMT', 'TmaL1'])
sibling = None
def compute(self, EV):
try:
self.val = CoreIPC(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "CoreIPC zero division")
desc = """
Instructions Per Cycle across hyper-threads (per physical
core)"""
class Metric_FLOPc:
name = "FLOPc"
domain = "Core_Metric"
maxval = 10.0
errcount = 0
area = "Info.Core"
metricgroup = frozenset(['Ret', 'Flops'])
sibling = None
def compute(self, EV):
try:
self.val = FLOPc(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "FLOPc zero division")
desc = """
Floating Point Operations Per Cycle"""
class Metric_FP_Arith_Utilization:
name = "FP_Arith_Utilization"
domain = "Core_Metric"
maxval = 2.0
errcount = 0
area = "Info.Core"
metricgroup = frozenset(['Cor', 'Flops', 'HPC'])
sibling = None
def compute(self, EV):
try:
self.val = FP_Arith_Utilization(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "FP_Arith_Utilization zero division")
desc = """
Actual per-core usage of the Floating Point non-X87
execution units (regardless of precision or vector-width).
Values > 1 are possible due to Fused-Multiply Add use all
of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less
common."""
class Metric_ILP:
name = "ILP"
domain = "Metric"
maxval = Exe_Ports
errcount = 0
area = "Info.Core"
metricgroup = frozenset(['Backend', 'Cor', 'Pipeline', 'PortsUtil'])
sibling = None
def compute(self, EV):
try:
self.val = ILP(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "ILP zero division")
desc = """
Instruction-Level-Parallelism (average number of uops
executed when there is execution) per thread (logical-
processor)"""
class Metric_EPC:
name = "EPC"
domain = "Metric"
maxval = 20.0
errcount = 0
area = "Info.Core"
metricgroup = frozenset(['Power'])
sibling = None
def compute(self, EV):
try:
self.val = EPC(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "EPC zero division")
desc = """
uops Executed per Cycle"""
class Metric_CORE_CLKS:
name = "CORE_CLKS"
domain = "Count"
maxval = 0
errcount = 0
area = "Info.Core"
metricgroup = frozenset(['SMT'])
sibling = None
def compute(self, EV):
try:
self.val = CORE_CLKS(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "CORE_CLKS zero division")
desc = """
Core actual clocks when any Logical Processor is active on
the Physical Core"""
class Metric_IpLoad:
name = "IpLoad"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpLoad(self, EV, 0)
self.thresh = (self.val < 3)
except ZeroDivisionError:
handle_error_metric(self, "IpLoad zero division")
desc = """
Instructions per Load (lower number means higher occurrence
rate). Tip: reduce memory accesses."""
class Metric_IpStore:
name = "IpStore"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpStore(self, EV, 0)
self.thresh = (self.val < 8)
except ZeroDivisionError:
handle_error_metric(self, "IpStore zero division")
desc = """
Instructions per Store (lower number means higher occurrence
rate). Tip: reduce memory accesses."""
class Metric_IpBranch:
name = "IpBranch"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Branches', 'Fed', 'InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpBranch(self, EV, 0)
self.thresh = (self.val < 8)
except ZeroDivisionError:
handle_error_metric(self, "IpBranch zero division")
desc = """
Instructions per Branch (lower number means higher
occurrence rate)"""
class Metric_IpCall:
name = "IpCall"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Branches', 'Fed', 'PGO'])
sibling = None
def compute(self, EV):
try:
self.val = IpCall(self, EV, 0)
self.thresh = (self.val < 200)
except ZeroDivisionError:
handle_error_metric(self, "IpCall zero division")
desc = """
Instructions per (near) call (lower number means higher
occurrence rate)"""
class Metric_IpTB:
name = "IpTB"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Branches', 'Fed', 'FetchBW', 'Frontend', 'PGO'])
sibling = None
def compute(self, EV):
try:
self.val = IpTB(self, EV, 0)
self.thresh = self.val < Pipeline_Width * 2 + 1
except ZeroDivisionError:
handle_error_metric(self, "IpTB zero division")
desc = """
Instructions per taken branch"""
class Metric_BpTkBranch:
name = "BpTkBranch"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Branches', 'Fed', 'PGO'])
sibling = None
def compute(self, EV):
try:
self.val = BpTkBranch(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "BpTkBranch zero division")
desc = """
Branch instructions per taken branch. . Can be used to
approximate PGO-likelihood for non-loopy codes."""
class Metric_IpFLOP:
name = "IpFLOP"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Flops', 'InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpFLOP(self, EV, 0)
self.thresh = (self.val < 10)
except ZeroDivisionError:
handle_error_metric(self, "IpFLOP zero division")
desc = """
Instructions per Floating Point (FP) Operation (lower number
means higher occurrence rate). Reference: Tuning Performance
via Metrics with Expectations.
https://doi.org/10.1109/LCA.2019.2916408"""
class Metric_IpArith:
name = "IpArith"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Flops', 'InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpArith(self, EV, 0)
self.thresh = (self.val < 10)
except ZeroDivisionError:
handle_error_metric(self, "IpArith zero division")
desc = """
Instructions per FP Arithmetic instruction (lower number
means higher occurrence rate). Values < 1 are possible due
to intentional FMA double counting. Approximated prior to
BDW."""
class Metric_IpArith_Scalar_HP:
name = "IpArith_Scalar_HP"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Flops', 'FpScalar', 'InsType', 'Server'])
sibling = None
def compute(self, EV):
try:
self.val = IpArith_Scalar_HP(self, EV, 0)
self.thresh = (self.val < 10)
except ZeroDivisionError:
handle_error_metric(self, "IpArith_Scalar_HP zero division")
desc = """
Instructions per FP Arithmetic Scalar Half-Precision
instruction (lower number means higher occurrence rate).
Values < 1 are possible due to intentional FMA double
counting."""
class Metric_IpArith_Scalar_SP:
name = "IpArith_Scalar_SP"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Flops', 'FpScalar', 'InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpArith_Scalar_SP(self, EV, 0)
self.thresh = (self.val < 10)
except ZeroDivisionError:
handle_error_metric(self, "IpArith_Scalar_SP zero division")
desc = """
Instructions per FP Arithmetic Scalar Single-Precision
instruction (lower number means higher occurrence rate).
Values < 1 are possible due to intentional FMA double
counting."""
class Metric_IpArith_Scalar_DP:
name = "IpArith_Scalar_DP"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Flops', 'FpScalar', 'InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpArith_Scalar_DP(self, EV, 0)
self.thresh = (self.val < 10)
except ZeroDivisionError:
handle_error_metric(self, "IpArith_Scalar_DP zero division")
desc = """
Instructions per FP Arithmetic Scalar Double-Precision
instruction (lower number means higher occurrence rate).
Values < 1 are possible due to intentional FMA double
counting."""
class Metric_IpArith_AVX128:
name = "IpArith_AVX128"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Flops', 'FpVector', 'InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpArith_AVX128(self, EV, 0)
self.thresh = (self.val < 10)
except ZeroDivisionError:
handle_error_metric(self, "IpArith_AVX128 zero division")
desc = """
Instructions per FP Arithmetic AVX/SSE 128-bit instruction
(lower number means higher occurrence rate). Values < 1 are
possible due to intentional FMA double counting."""
class Metric_IpArith_AVX256:
name = "IpArith_AVX256"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Flops', 'FpVector', 'InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpArith_AVX256(self, EV, 0)
self.thresh = (self.val < 10)
except ZeroDivisionError:
handle_error_metric(self, "IpArith_AVX256 zero division")
desc = """
Instructions per FP Arithmetic AVX* 256-bit instruction
(lower number means higher occurrence rate). Values < 1 are
possible due to intentional FMA double counting."""
class Metric_IpArith_AVX512:
name = "IpArith_AVX512"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Flops', 'FpVector', 'InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpArith_AVX512(self, EV, 0)
self.thresh = (self.val < 10)
except ZeroDivisionError:
handle_error_metric(self, "IpArith_AVX512 zero division")
desc = """
Instructions per FP Arithmetic AVX 512-bit instruction
(lower number means higher occurrence rate). Values < 1 are
possible due to intentional FMA double counting."""
class Metric_IpPause:
name = "IpPause"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Flops', 'FpVector', 'InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpPause(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "IpPause zero division")
desc = """
Instructions per PAUSE (lower number means higher occurrence
rate)"""
class Metric_IpSWPF:
name = "IpSWPF"
domain = "Inst_Metric"
maxval = 1000
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Prefetches'])
sibling = None
def compute(self, EV):
try:
self.val = IpSWPF(self, EV, 0)
self.thresh = (self.val < 100)
except ZeroDivisionError:
handle_error_metric(self, "IpSWPF zero division")
desc = """
Instructions per Software prefetch instruction (of any type:
NTA/T0/T1/T2/Prefetch) (lower number means higher occurrence
rate)"""
class Metric_Instructions:
name = "Instructions"
domain = "Count"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Summary', 'TmaL1'])
sibling = None
def compute(self, EV):
try:
self.val = Instructions(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Instructions zero division")
desc = """
Total number of retired Instructions"""
class Metric_Retire:
name = "Retire"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Pipeline"
metricgroup = frozenset(['Pipeline', 'Ret'])
sibling = None
def compute(self, EV):
try:
self.val = Retire(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Retire zero division")
desc = """
Average number of Uops retired in cycles where at least one
uop has retired."""
class Metric_Strings_Cycles:
name = "Strings_Cycles"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Pipeline"
metricgroup = frozenset(['MicroSeq', 'Pipeline', 'Ret'])
sibling = None
def compute(self, EV):
try:
self.val = Strings_Cycles(self, EV, 0)
self.thresh = (self.val > 0.1)
except ZeroDivisionError:
handle_error_metric(self, "Strings_Cycles zero division")
desc = """
Estimated fraction of retirement-cycles dealing with repeat
instructions"""
class Metric_IpAssist:
name = "IpAssist"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Pipeline"
metricgroup = frozenset(['MicroSeq', 'Pipeline', 'Ret', 'Retire'])
sibling = None
def compute(self, EV):
try:
self.val = IpAssist(self, EV, 0)
self.thresh = (self.val < 100000)
except ZeroDivisionError:
handle_error_metric(self, "IpAssist zero division")
desc = """
Instructions per a microcode Assist invocation. See Assists
tree node for details (lower number means higher occurrence
rate)"""
class Metric_Execute:
name = "Execute"
domain = "Metric"
maxval = Exe_Ports
errcount = 0
area = "Info.Pipeline"
metricgroup = frozenset(['Cor', 'Pipeline', 'PortsUtil', 'SMT'])
sibling = None
def compute(self, EV):
try:
self.val = Execute(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Execute zero division")
desc = """
"""
class Metric_Fetch_LSD:
name = "Fetch_LSD"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Pipeline"
metricgroup = frozenset(['Fed', 'FetchBW'])
sibling = None
def compute(self, EV):
try:
self.val = Fetch_LSD(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Fetch_LSD zero division")
desc = """
Average number of uops fetched from LSD per cycle"""
class Metric_Fetch_DSB:
name = "Fetch_DSB"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Pipeline"
metricgroup = frozenset(['Fed', 'FetchBW'])
sibling = None
def compute(self, EV):
try:
self.val = Fetch_DSB(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Fetch_DSB zero division")
desc = """
Average number of uops fetched from DSB per cycle"""
class Metric_Fetch_MITE:
name = "Fetch_MITE"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Pipeline"
metricgroup = frozenset(['Fed', 'FetchBW'])
sibling = None
def compute(self, EV):
try:
self.val = Fetch_MITE(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Fetch_MITE zero division")
desc = """
Average number of uops fetched from MITE per cycle"""
class Metric_Fetch_UpC:
name = "Fetch_UpC"
domain = "Metric"
maxval = 6.0
errcount = 0
area = "Info.Frontend"
metricgroup = frozenset(['Fed', 'FetchBW'])
sibling = None
def compute(self, EV):
try:
self.val = Fetch_UpC(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Fetch_UpC zero division")
desc = """
Average number of Uops issued by front-end when it issued
something"""
class Metric_DSB_Coverage:
name = "DSB_Coverage"
domain = "Metric"
maxval = 1.0
errcount = 0
area = "Info.Frontend"
metricgroup = frozenset(['DSB', 'Fed', 'FetchBW'])
sibling = None
def compute(self, EV):
try:
self.val = DSB_Coverage(self, EV, 0)
self.thresh = (self.val < 0.7) and HighIPC(self, EV, 1)
except ZeroDivisionError:
handle_error_metric(self, "DSB_Coverage zero division")
desc = """
Fraction of Uops delivered by the DSB (aka Decoded ICache;
or Uop Cache). See section 'Decoded ICache' in Optimization
Manual. http://www.intel.com/content/www/us/en/architecture-
and-technology/64-ia-32-architectures-optimization-
manual.html"""
class Metric_Unknown_Branch_Cost:
name = "Unknown_Branch_Cost"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Frontend"
metricgroup = frozenset(['Fed'])
sibling = None
def compute(self, EV):
try:
self.val = Unknown_Branch_Cost(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Unknown_Branch_Cost zero division")
desc = """
Average number of cycles the front-end was delayed due to an
Unknown Branch detection. See Unknown_Branches node."""
class Metric_DSB_Switch_Cost:
name = "DSB_Switch_Cost"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Frontend"
metricgroup = frozenset(['DSBmiss'])
sibling = None
def compute(self, EV):
try:
self.val = DSB_Switch_Cost(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "DSB_Switch_Cost zero division")
desc = """
Average number of cycles of a switch from the DSB fetch-unit
to MITE fetch unit - see DSB_Switches tree node for details."""
class Metric_DSB_Misses:
name = "DSB_Misses"
domain = "Scaled_Slots"
maxval = 0
errcount = 0
area = "Info.Botlnk.L2"
metricgroup = frozenset(['DSBmiss', 'Fed'])
sibling = None
def compute(self, EV):
try:
self.val = DSB_Misses(self, EV, 0)
self.thresh = (self.val > 10)
except ZeroDivisionError:
handle_error_metric(self, "DSB_Misses zero division")
desc = """
Total pipeline cost of DSB (uop cache) misses - subset of
the Instruction_Fetch_BW Bottleneck."""
class Metric_DSB_Bandwidth:
name = "DSB_Bandwidth"
domain = "Scaled_Slots"
maxval = 0
errcount = 0
area = "Info.Botlnk.L2"
metricgroup = frozenset(['DSB', 'FetchBW'])
sibling = None
def compute(self, EV):
try:
self.val = DSB_Bandwidth(self, EV, 0)
self.thresh = (self.val > 10)
except ZeroDivisionError:
handle_error_metric(self, "DSB_Bandwidth zero division")
desc = """
Total pipeline cost of DSB (uop cache) hits - subset of the
Instruction_Fetch_BW Bottleneck."""
class Metric_ICache_Miss_Latency:
name = "ICache_Miss_Latency"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Frontend"
metricgroup = frozenset(['Fed', 'FetchLat', 'IcMiss'])
sibling = None
def compute(self, EV):
try:
self.val = ICache_Miss_Latency(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "ICache_Miss_Latency zero division")
desc = """
Average Latency for L1 instruction cache misses"""
class Metric_IC_Misses:
name = "IC_Misses"
domain = "Scaled_Slots"
maxval = 0
errcount = 0
area = "Info.Botlnk.L2"
metricgroup = frozenset(['Fed', 'FetchLat', 'IcMiss'])
sibling = None
def compute(self, EV):
try:
self.val = IC_Misses(self, EV, 0)
self.thresh = (self.val > 5)
except ZeroDivisionError:
handle_error_metric(self, "IC_Misses zero division")
desc = """
Total pipeline cost of Instruction Cache misses - subset of
the Big_Code Bottleneck."""
class Metric_IpDSB_Miss_Ret:
name = "IpDSB_Miss_Ret"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Frontend"
metricgroup = frozenset(['DSBmiss', 'Fed'])
sibling = None
def compute(self, EV):
try:
self.val = IpDSB_Miss_Ret(self, EV, 0)
self.thresh = (self.val < 50)
except ZeroDivisionError:
handle_error_metric(self, "IpDSB_Miss_Ret zero division")
desc = """
Instructions per non-speculative DSB miss (lower number
means higher occurrence rate)"""
class Metric_IpUnknown_Branch:
name = "IpUnknown_Branch"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Frontend"
metricgroup = frozenset(['Fed'])
sibling = None
def compute(self, EV):
try:
self.val = IpUnknown_Branch(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "IpUnknown_Branch zero division")
desc = """
Instructions per speculative Unknown Branch Misprediction
(BAClear) (lower number means higher occurrence rate)"""
class Metric_L2MPKI_Code:
name = "L2MPKI_Code"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Frontend"
metricgroup = frozenset(['IcMiss'])
sibling = None
def compute(self, EV):
try:
self.val = L2MPKI_Code(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2MPKI_Code zero division")
desc = """
L2 cache true code cacheline misses per kilo instruction"""
class Metric_L2MPKI_Code_All:
name = "L2MPKI_Code_All"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Frontend"
metricgroup = frozenset(['IcMiss'])
sibling = None
def compute(self, EV):
try:
self.val = L2MPKI_Code_All(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2MPKI_Code_All zero division")
desc = """
L2 cache speculative code cacheline misses per kilo
instruction"""
class Metric_IpMispredict:
name = "IpMispredict"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Bad_Spec"
metricgroup = frozenset(['Bad', 'BadSpec', 'BrMispredicts'])
sibling = None
def compute(self, EV):
try:
self.val = IpMispredict(self, EV, 0)
self.thresh = (self.val < 200)
except ZeroDivisionError:
handle_error_metric(self, "IpMispredict zero division")
desc = """
Number of Instructions per non-speculative Branch
Misprediction (JEClear) (lower number means higher
occurrence rate)"""
class Metric_IpMisp_Cond_Ntaken:
name = "IpMisp_Cond_Ntaken"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Bad_Spec"
metricgroup = frozenset(['Bad', 'BrMispredicts'])
sibling = None
def compute(self, EV):
try:
self.val = IpMisp_Cond_Ntaken(self, EV, 0)
self.thresh = (self.val < 200)
except ZeroDivisionError:
handle_error_metric(self, "IpMisp_Cond_Ntaken zero division")
desc = """
Instructions per retired Mispredicts for conditional non-
taken branches (lower number means higher occurrence rate)."""
class Metric_IpMisp_Cond_Taken:
name = "IpMisp_Cond_Taken"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Bad_Spec"
metricgroup = frozenset(['Bad', 'BrMispredicts'])
sibling = None
def compute(self, EV):
try:
self.val = IpMisp_Cond_Taken(self, EV, 0)
self.thresh = (self.val < 200)
except ZeroDivisionError:
handle_error_metric(self, "IpMisp_Cond_Taken zero division")
desc = """
Instructions per retired Mispredicts for conditional taken
branches (lower number means higher occurrence rate)."""
class Metric_IpMisp_Ret:
name = "IpMisp_Ret"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Bad_Spec"
metricgroup = frozenset(['Bad', 'BrMispredicts'])
sibling = None
def compute(self, EV):
try:
self.val = IpMisp_Ret(self, EV, 0)
self.thresh = (self.val < 500)
except ZeroDivisionError:
handle_error_metric(self, "IpMisp_Ret zero division")
desc = """
Instructions per retired Mispredicts for return branches
(lower number means higher occurrence rate)."""
class Metric_IpMisp_Indirect:
name = "IpMisp_Indirect"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Bad_Spec"
metricgroup = frozenset(['Bad', 'BrMispredicts'])
sibling = None
def compute(self, EV):
try:
self.val = IpMisp_Indirect(self, EV, 0)
self.thresh = (self.val < 1000)
except ZeroDivisionError:
handle_error_metric(self, "IpMisp_Indirect zero division")
desc = """
Instructions per retired Mispredicts for indirect CALL or
JMP branches (lower number means higher occurrence rate)."""
class Metric_Branch_Misprediction_Cost:
name = "Branch_Misprediction_Cost"
domain = "Core_Metric"
maxval = 300
errcount = 0
area = "Info.Bad_Spec"
metricgroup = frozenset(['Bad', 'BrMispredicts'])
sibling = None
def compute(self, EV):
try:
self.val = Branch_Misprediction_Cost(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Branch_Misprediction_Cost zero division")
desc = """
Branch Misprediction Cost: Fraction of TMA slots wasted per
non-speculative branch misprediction (retired JEClear)"""
class Metric_Spec_Clears_Ratio:
name = "Spec_Clears_Ratio"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Bad_Spec"
metricgroup = frozenset(['BrMispredicts'])
sibling = None
def compute(self, EV):
try:
self.val = Spec_Clears_Ratio(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Spec_Clears_Ratio zero division")
desc = """
Speculative to Retired ratio of all clears (covering
Mispredicts and nukes)"""
class Metric_Cond_NT:
name = "Cond_NT"
domain = "Fraction"
maxval = 1.0
errcount = 0
area = "Info.Branches"
metricgroup = frozenset(['Bad', 'Branches', 'CodeGen', 'PGO'])
sibling = None
def compute(self, EV):
try:
self.val = Cond_NT(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Cond_NT zero division")
desc = """
Fraction of branches that are non-taken conditionals"""
class Metric_Cond_TK:
name = "Cond_TK"
domain = "Fraction"
maxval = 1.0
errcount = 0
area = "Info.Branches"
metricgroup = frozenset(['Bad', 'Branches', 'CodeGen', 'PGO'])
sibling = None
def compute(self, EV):
try:
self.val = Cond_TK(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Cond_TK zero division")
desc = """
Fraction of branches that are taken conditionals"""
class Metric_CallRet:
name = "CallRet"
domain = "Fraction"
maxval = 1.0
errcount = 0
area = "Info.Branches"
metricgroup = frozenset(['Bad', 'Branches'])
sibling = None
def compute(self, EV):
try:
self.val = CallRet(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "CallRet zero division")
desc = """
Fraction of branches that are CALL or RET"""
class Metric_Jump:
name = "Jump"
domain = "Fraction"
maxval = 1.0
errcount = 0
area = "Info.Branches"
metricgroup = frozenset(['Bad', 'Branches'])
sibling = None
def compute(self, EV):
try:
self.val = Jump(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Jump zero division")
desc = """
Fraction of branches that are unconditional (direct or
indirect) jumps"""
class Metric_Other_Branches:
name = "Other_Branches"
domain = "Fraction"
maxval = 1.0
errcount = 0
area = "Info.Branches"
metricgroup = frozenset(['Bad', 'Branches'])
sibling = None
def compute(self, EV):
try:
self.val = Other_Branches(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Other_Branches zero division")
desc = """
Fraction of branches of other types (not individually
covered by other metrics in Info.Branches group)"""
class Metric_Load_Miss_Real_Latency:
name = "Load_Miss_Real_Latency"
domain = "Clocks_Latency"
maxval = 1000
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['Mem', 'MemoryBound', 'MemoryLat'])
sibling = None
def compute(self, EV):
try:
self.val = Load_Miss_Real_Latency(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Load_Miss_Real_Latency zero division")
desc = """
Actual Average Latency for L1 data-cache miss demand load
operations (in core cycles)"""
class Metric_MLP:
name = "MLP"
domain = "Metric"
maxval = 10.0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['Mem', 'MemoryBound', 'MemoryBW'])
sibling = None
def compute(self, EV):
try:
self.val = MLP(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "MLP zero division")
desc = """
Memory-Level-Parallelism (average number of L1 miss demand
load when there is at least one such miss. Per-Logical
Processor)"""
class Metric_L1MPKI:
name = "L1MPKI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['CacheHits', 'Mem'])
sibling = None
def compute(self, EV):
try:
self.val = L1MPKI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L1MPKI zero division")
desc = """
L1 cache true misses per kilo instruction for retired demand
loads"""
class Metric_L1MPKI_Load:
name = "L1MPKI_Load"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['CacheHits', 'Mem'])
sibling = None
def compute(self, EV):
try:
self.val = L1MPKI_Load(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L1MPKI_Load zero division")
desc = """
L1 cache true misses per kilo instruction for all demand
loads (including speculative)"""
class Metric_L2MPKI:
name = "L2MPKI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['Mem', 'Backend', 'CacheHits'])
sibling = None
def compute(self, EV):
try:
self.val = L2MPKI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2MPKI zero division")
desc = """
L2 cache true misses per kilo instruction for retired demand
loads"""
class Metric_L2MPKI_All:
name = "L2MPKI_All"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['CacheHits', 'Mem', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = L2MPKI_All(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2MPKI_All zero division")
desc = """
L2 cache true misses per kilo instruction for all request
types (including speculative)"""
class Metric_L2MPKI_Load:
name = "L2MPKI_Load"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['CacheHits', 'Mem'])
sibling = None
def compute(self, EV):
try:
self.val = L2MPKI_Load(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2MPKI_Load zero division")
desc = """
L2 cache true misses per kilo instruction for all demand
loads (including speculative)"""
class Metric_L2MPKI_RFO:
name = "L2MPKI_RFO"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['CacheMisses', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = L2MPKI_RFO(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2MPKI_RFO zero division")
desc = """
Offcore requests (L2 cache miss) per kilo instruction for
demand RFOs"""
class Metric_L2HPKI_All:
name = "L2HPKI_All"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['CacheHits', 'Mem'])
sibling = None
def compute(self, EV):
try:
self.val = L2HPKI_All(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2HPKI_All zero division")
desc = """
L2 cache hits per kilo instruction for all request types
(including speculative)"""
class Metric_L2HPKI_Load:
name = "L2HPKI_Load"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['CacheHits', 'Mem'])
sibling = None
def compute(self, EV):
try:
self.val = L2HPKI_Load(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2HPKI_Load zero division")
desc = """
L2 cache hits per kilo instruction for all demand loads
(including speculative)"""
class Metric_L3MPKI:
name = "L3MPKI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['Mem'])
sibling = None
def compute(self, EV):
try:
self.val = L3MPKI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L3MPKI zero division")
desc = """
L3 cache true misses per kilo instruction for retired demand
loads"""
class Metric_FB_HPKI:
name = "FB_HPKI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['CacheHits', 'Mem'])
sibling = None
def compute(self, EV):
try:
self.val = FB_HPKI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "FB_HPKI zero division")
desc = """
Fill Buffer (FB) hits per kilo instructions for retired
demand loads (L1D misses that merge into ongoing miss-
handling entries)"""
class Metric_L1D_Cache_Fill_BW:
name = "L1D_Cache_Fill_BW"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['Mem', 'MemoryBW'])
sibling = None
def compute(self, EV):
try:
self.val = L1D_Cache_Fill_BW(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L1D_Cache_Fill_BW zero division")
desc = """
"""
class Metric_L2_Cache_Fill_BW:
name = "L2_Cache_Fill_BW"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['Mem', 'MemoryBW'])
sibling = None
def compute(self, EV):
try:
self.val = L2_Cache_Fill_BW(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2_Cache_Fill_BW zero division")
desc = """
"""
class Metric_L3_Cache_Fill_BW:
name = "L3_Cache_Fill_BW"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['Mem', 'MemoryBW'])
sibling = None
def compute(self, EV):
try:
self.val = L3_Cache_Fill_BW(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L3_Cache_Fill_BW zero division")
desc = """
"""
class Metric_L3_Cache_Access_BW:
name = "L3_Cache_Access_BW"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['Mem', 'MemoryBW', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = L3_Cache_Access_BW(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L3_Cache_Access_BW zero division")
desc = """
"""
class Metric_Page_Walks_Utilization:
name = "Page_Walks_Utilization"
domain = "Core_Metric"
maxval = 1.0
errcount = 0
area = "Info.Memory.TLB"
metricgroup = frozenset(['Mem', 'MemoryTLB'])
sibling = None
def compute(self, EV):
try:
self.val = Page_Walks_Utilization(self, EV, 0)
self.thresh = (self.val > 0.5)
except ZeroDivisionError:
handle_error_metric(self, "Page_Walks_Utilization zero division")
desc = """
Utilization of the core's Page Walker(s) serving STLB misses
triggered by instruction/Load/Store accesses"""
class Metric_Code_STLB_MPKI:
name = "Code_STLB_MPKI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory.TLB"
metricgroup = frozenset(['Fed', 'MemoryTLB'])
sibling = None
def compute(self, EV):
try:
self.val = Code_STLB_MPKI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Code_STLB_MPKI zero division")
desc = """
STLB (2nd level TLB) code speculative misses per kilo
instruction (misses of any page-size that complete the page
walk)"""
class Metric_Load_STLB_MPKI:
name = "Load_STLB_MPKI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory.TLB"
metricgroup = frozenset(['Mem', 'MemoryTLB'])
sibling = None
def compute(self, EV):
try:
self.val = Load_STLB_MPKI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Load_STLB_MPKI zero division")
desc = """
STLB (2nd level TLB) data load speculative misses per kilo
instruction (misses of any page-size that complete the page
walk)"""
class Metric_Store_STLB_MPKI:
name = "Store_STLB_MPKI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory.TLB"
metricgroup = frozenset(['Mem', 'MemoryTLB'])
sibling = None
def compute(self, EV):
try:
self.val = Store_STLB_MPKI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Store_STLB_MPKI zero division")
desc = """
STLB (2nd level TLB) data store speculative misses per kilo
instruction (misses of any page-size that complete the page
walk)"""
class Metric_L1D_Cache_Fill_BW_2T:
name = "L1D_Cache_Fill_BW_2T"
domain = "Core_Metric"
maxval = 0
errcount = 0
area = "Info.Memory.Core"
metricgroup = frozenset(['Mem', 'MemoryBW'])
sibling = None
def compute(self, EV):
try:
self.val = L1D_Cache_Fill_BW_2T(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L1D_Cache_Fill_BW_2T zero division")
desc = """
Average per-core data fill bandwidth to the L1 data cache
[GB / sec]"""
class Metric_L2_Cache_Fill_BW_2T:
name = "L2_Cache_Fill_BW_2T"
domain = "Core_Metric"
maxval = 0
errcount = 0
area = "Info.Memory.Core"
metricgroup = frozenset(['Mem', 'MemoryBW'])
sibling = None
def compute(self, EV):
try:
self.val = L2_Cache_Fill_BW_2T(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2_Cache_Fill_BW_2T zero division")
desc = """
Average per-core data fill bandwidth to the L2 cache [GB /
sec]"""
class Metric_L3_Cache_Fill_BW_2T:
name = "L3_Cache_Fill_BW_2T"
domain = "Core_Metric"
maxval = 0
errcount = 0
area = "Info.Memory.Core"
metricgroup = frozenset(['Mem', 'MemoryBW'])
sibling = None
def compute(self, EV):
try:
self.val = L3_Cache_Fill_BW_2T(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L3_Cache_Fill_BW_2T zero division")
desc = """
Average per-core data fill bandwidth to the L3 cache [GB /
sec]"""
class Metric_L3_Cache_Access_BW_2T:
name = "L3_Cache_Access_BW_2T"
domain = "Core_Metric"
maxval = 0
errcount = 0
area = "Info.Memory.Core"
metricgroup = frozenset(['Mem', 'MemoryBW', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = L3_Cache_Access_BW_2T(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L3_Cache_Access_BW_2T zero division")
desc = """
Average per-core data access bandwidth to the L3 cache [GB /
sec]"""
class Metric_L2_Evictions_Silent_PKI:
name = "L2_Evictions_Silent_PKI"
domain = "Core_Metric"
maxval = 0
errcount = 0
area = "Info.Memory.Core"
metricgroup = frozenset(['L2Evicts', 'Mem', 'Server'])
sibling = None
def compute(self, EV):
try:
self.val = L2_Evictions_Silent_PKI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2_Evictions_Silent_PKI zero division")
desc = """
Rate of silent evictions from the L2 cache per Kilo
instruction where the evicted lines are dropped (no
writeback to L3 or memory)"""
class Metric_L2_Evictions_NonSilent_PKI:
name = "L2_Evictions_NonSilent_PKI"
domain = "Core_Metric"
maxval = 0
errcount = 0
area = "Info.Memory.Core"
metricgroup = frozenset(['L2Evicts', 'Mem', 'Server'])
sibling = None
def compute(self, EV):
try:
self.val = L2_Evictions_NonSilent_PKI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2_Evictions_NonSilent_PKI zero division")
desc = """
Rate of non silent evictions from the L2 cache per Kilo
instruction"""
class Metric_Load_L2_Miss_Latency:
name = "Load_L2_Miss_Latency"
domain = "Clocks_Latency"
maxval = 1000
errcount = 0
area = "Info.Memory.Latency"
metricgroup = frozenset(['Memory_Lat', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = Load_L2_Miss_Latency(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Load_L2_Miss_Latency zero division")
desc = """
Average Latency for L2 cache miss demand Loads"""
class Metric_Load_L3_Miss_Latency:
name = "Load_L3_Miss_Latency"
domain = "Clocks_Latency"
maxval = 1000
errcount = 0
area = "Info.Memory.Latency"
metricgroup = frozenset(['Memory_Lat', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = Load_L3_Miss_Latency(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Load_L3_Miss_Latency zero division")
desc = """
Average Latency for L3 cache miss demand Loads"""
class Metric_Load_L2_MLP:
name = "Load_L2_MLP"
domain = "Metric"
maxval = 100
errcount = 0
area = "Info.Memory.Latency"
metricgroup = frozenset(['Memory_BW', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = Load_L2_MLP(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Load_L2_MLP zero division")
desc = """
Average Parallel L2 cache miss demand Loads"""
class Metric_Data_L2_MLP:
name = "Data_L2_MLP"
domain = "Metric"
maxval = 100
errcount = 0
area = "Info.Memory.Latency"
metricgroup = frozenset(['Memory_BW', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = Data_L2_MLP(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Data_L2_MLP zero division")
desc = """
Average Parallel L2 cache miss data reads"""
class Metric_Offcore_Read_Any_PKI:
name = "Offcore_Read_Any_PKI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory.Mix"
metricgroup = frozenset(['CacheHits', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = Offcore_Read_Any_PKI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Offcore_Read_Any_PKI zero division")
desc = """
Off-core accesses per kilo instruction for reads-to-core
requests (speculative; including in-core HW prefetches)"""
class Metric_Offcore_Read_L3M_PKI:
name = "Offcore_Read_L3M_PKI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory.Mix"
metricgroup = frozenset(['Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = Offcore_Read_L3M_PKI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Offcore_Read_L3M_PKI zero division")
desc = """
L3 cache misses per kilo instruction for reads-to-core
requests (speculative; including in-core HW prefetches)"""
class Metric_Offcore_MWrite_Any_PKI:
name = "Offcore_MWrite_Any_PKI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory.Mix"
metricgroup = frozenset(['Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = Offcore_MWrite_Any_PKI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Offcore_MWrite_Any_PKI zero division")
desc = """
Off-core accesses per kilo instruction for modified write
requests"""
class Metric_UC_Load_PKI:
name = "UC_Load_PKI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory.Mix"
metricgroup = frozenset(['Mem'])
sibling = None
def compute(self, EV):
try:
self.val = UC_Load_PKI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "UC_Load_PKI zero division")
desc = """
Un-cacheable retired load per kilo instruction"""
class Metric_Bus_Lock_PKI:
name = "Bus_Lock_PKI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory.Mix"
metricgroup = frozenset(['Mem'])
sibling = None
def compute(self, EV):
try:
self.val = Bus_Lock_PKI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Bus_Lock_PKI zero division")
desc = """
\"Bus lock\" per kilo instruction"""
class Metric_CPU_Utilization:
name = "CPU_Utilization"
domain = "Metric"
maxval = 1
errcount = 0
area = "Info.System"
metricgroup = frozenset(['HPC', 'Summary'])
sibling = None
def compute(self, EV):
try:
self.val = CPU_Utilization(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "CPU_Utilization zero division")
desc = """
Average CPU Utilization (percentage)"""
class Metric_CPUs_Utilized:
name = "CPUs_Utilized"
domain = "Metric"
maxval = 300
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Summary'])
sibling = None
def compute(self, EV):
try:
self.val = CPUs_Utilized(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "CPUs_Utilized zero division")
desc = """
Average number of utilized CPUs"""
class Metric_Core_Frequency:
name = "Core_Frequency"
domain = "SystemMetric"
maxval = 0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Summary', 'Power'])
sibling = None
def compute(self, EV):
try:
self.val = Core_Frequency(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Core_Frequency zero division")
desc = """
Measured Average Core Frequency for unhalted processors
[GHz]"""
class Metric_Uncore_Frequency:
name = "Uncore_Frequency"
domain = "SystemMetric"
maxval = 0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['SoC'])
sibling = None
def compute(self, EV):
try:
self.val = Uncore_Frequency(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Uncore_Frequency zero division")
desc = """
Measured Average Uncore Frequency for the SoC [GHz]"""
class Metric_GFLOPs:
name = "GFLOPs"
domain = "Metric"
maxval = 200
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Cor', 'Flops', 'HPC'])
sibling = None
def compute(self, EV):
try:
self.val = GFLOPs(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "GFLOPs zero division")
desc = """
Giga Floating Point Operations Per Second. Aggregate across
all supported options of: FP precisions, scalar and vector
instructions, vector-width"""
class Metric_Turbo_Utilization:
name = "Turbo_Utilization"
domain = "Core_Metric"
maxval = 10.0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Power'])
sibling = None
def compute(self, EV):
try:
self.val = Turbo_Utilization(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Turbo_Utilization zero division")
desc = """
Average Frequency Utilization relative nominal frequency"""
class Metric_SMT_2T_Utilization:
name = "SMT_2T_Utilization"
domain = "Core_Metric"
maxval = 1.0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['SMT'])
sibling = None
def compute(self, EV):
try:
self.val = SMT_2T_Utilization(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "SMT_2T_Utilization zero division")
desc = """
Fraction of cycles where both hardware Logical Processors
were active"""
class Metric_Kernel_Utilization:
name = "Kernel_Utilization"
domain = "Metric"
maxval = 1.0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['OS'])
sibling = None
def compute(self, EV):
try:
self.val = Kernel_Utilization(self, EV, 0)
self.thresh = (self.val > 0.05)
except ZeroDivisionError:
handle_error_metric(self, "Kernel_Utilization zero division")
desc = """
Fraction of cycles spent in the Operating System (OS) Kernel
mode"""
class Metric_Kernel_CPI:
name = "Kernel_CPI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['OS'])
sibling = None
def compute(self, EV):
try:
self.val = Kernel_CPI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Kernel_CPI zero division")
desc = """
Cycles Per Instruction for the Operating System (OS) Kernel
mode"""
class Metric_C0_Wait:
name = "C0_Wait"
domain = "Metric"
maxval = 1.0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['C0Wait'])
sibling = None
def compute(self, EV):
try:
self.val = C0_Wait(self, EV, 0)
self.thresh = (self.val > 0.05)
except ZeroDivisionError:
handle_error_metric(self, "C0_Wait zero division")
desc = """
Fraction of cycles the processor is waiting yet unhalted;
covering legacy PAUSE instruction, as well as C0.1 / C0.2
power-performance optimized states. Sample code of TPAUSE: h
ttps://github.com/torvalds/linux/blob/master/arch/x86/lib/de
lay.c"""
class Metric_DRAM_BW_Use:
name = "DRAM_BW_Use"
domain = "GB/sec"
maxval = 200
errcount = 0
area = "Info.System"
metricgroup = frozenset(['HPC', 'MemOffcore', 'MemoryBW', 'SoC'])
sibling = None
def compute(self, EV):
try:
self.val = DRAM_BW_Use(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "DRAM_BW_Use zero division")
desc = """
Average external Memory Bandwidth Use for reads and writes
[GB / sec]"""
class Metric_R2C_Offcore_BW:
name = "R2C_Offcore_BW"
domain = "GB/sec"
maxval = 0
errcount = 0
area = "Info.Memory.SoC"
metricgroup = frozenset(['HPC', 'Mem', 'MemoryBW', 'SoC'])
sibling = None
def compute(self, EV):
try:
self.val = R2C_Offcore_BW(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "R2C_Offcore_BW zero division")
desc = """
Average Off-core access BW for Reads-to-Core (R2C). R2C
account for demand or prefetch load/RFO/code access that
fill data into the Core caches."""
class Metric_R2C_L3M_BW:
name = "R2C_L3M_BW"
domain = "GB/sec"
maxval = 0
errcount = 0
area = "Info.Memory.SoC"
metricgroup = frozenset(['HPC', 'Mem', 'MemoryBW', 'SoC'])
sibling = None
def compute(self, EV):
try:
self.val = R2C_L3M_BW(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "R2C_L3M_BW zero division")
desc = """
Average L3-cache miss BW for Reads-to-Core (R2C). This
covering going to DRAM or other memory off-chip memory
tears. See R2C_Offcore_BW."""
class Metric_R2C_DRAM_BW:
name = "R2C_DRAM_BW"
domain = "GB/sec"
maxval = 0
errcount = 0
area = "Info.Memory.SoC"
metricgroup = frozenset(['HPC', 'Mem', 'MemoryBW', 'SoC'])
sibling = None
def compute(self, EV):
try:
self.val = R2C_DRAM_BW(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "R2C_DRAM_BW zero division")
desc = """
Average DRAM BW for Reads-to-Core (R2C) covering for memory
attached to local- and remote-socket. See R2C_Offcore_BW."""
class Metric_MEM_Read_Latency:
name = "MEM_Read_Latency"
domain = "NanoSeconds"
maxval = 1000
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Mem', 'MemoryLat', 'SoC'])
sibling = None
def compute(self, EV):
try:
self.val = MEM_Read_Latency(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "MEM_Read_Latency zero division")
desc = """
Average latency of data read request to external memory (in
nanoseconds). Accounts for demand loads and L1/L2
prefetches. memory-controller only"""
class Metric_MEM_Parallel_Reads:
name = "MEM_Parallel_Reads"
domain = "SystemMetric"
maxval = 100
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Mem', 'MemoryBW', 'SoC'])
sibling = None
def compute(self, EV):
try:
self.val = MEM_Parallel_Reads(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "MEM_Parallel_Reads zero division")
desc = """
Average number of parallel data read requests to external
memory. Accounts for demand loads and L1/L2 prefetches"""
class Metric_MEM_PMM_Read_Latency:
name = "MEM_PMM_Read_Latency"
domain = "NanoSeconds"
maxval = 0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['MemOffcore', 'MemoryLat', 'SoC', 'Server'])
sibling = None
def compute(self, EV):
try:
self.val = MEM_PMM_Read_Latency(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "MEM_PMM_Read_Latency zero division")
desc = """
Average latency of data read request to external 3D X-Point
memory [in nanoseconds]. Accounts for demand loads and L1/L2
data-read prefetches"""
class Metric_MEM_DRAM_Read_Latency:
name = "MEM_DRAM_Read_Latency"
domain = "NanoSeconds"
maxval = 0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['MemOffcore', 'MemoryLat', 'SoC', 'Server'])
sibling = None
def compute(self, EV):
try:
self.val = MEM_DRAM_Read_Latency(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "MEM_DRAM_Read_Latency zero division")
desc = """
Average latency of data read request to external DRAM memory
[in nanoseconds]. Accounts for demand loads and L1/L2 data-
read prefetches"""
class Metric_PMM_Read_BW:
name = "PMM_Read_BW"
domain = "GB/sec"
maxval = 0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['MemOffcore', 'MemoryBW', 'SoC', 'Server'])
sibling = None
def compute(self, EV):
try:
self.val = PMM_Read_BW(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "PMM_Read_BW zero division")
desc = """
Average 3DXP Memory Bandwidth Use for reads [GB / sec]"""
class Metric_PMM_Write_BW:
name = "PMM_Write_BW"
domain = "GB/sec"
maxval = 0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['MemOffcore', 'MemoryBW', 'SoC', 'Server'])
sibling = None
def compute(self, EV):
try:
self.val = PMM_Write_BW(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "PMM_Write_BW zero division")
desc = """
Average 3DXP Memory Bandwidth Use for Writes [GB / sec]"""
class Metric_IO_Read_BW:
name = "IO_Read_BW"
domain = "GB/sec"
maxval = 0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['IoBW', 'MemOffcore', 'SoC', 'Server'])
sibling = None
def compute(self, EV):
try:
self.val = IO_Read_BW(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "IO_Read_BW zero division")
desc = """
Average IO (network or disk) Bandwidth Use for Reads [GB /
sec]. Bandwidth of IO reads that are initiated by end device
controllers that are requesting memory from the CPU"""
class Metric_IO_Write_BW:
name = "IO_Write_BW"
domain = "GB/sec"
maxval = 0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['IoBW', 'MemOffcore', 'SoC', 'Server'])
sibling = None
def compute(self, EV):
try:
self.val = IO_Write_BW(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "IO_Write_BW zero division")
desc = """
Average IO (network or disk) Bandwidth Use for Writes [GB /
sec]. Bandwidth of IO writes that are initiated by end
device controllers that are writing memory to the CPU"""
class Metric_UPI_Data_Transmit_BW:
name = "UPI_Data_Transmit_BW"
domain = "MB/sec"
maxval = 0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['SoC', 'Server'])
sibling = None
def compute(self, EV):
try:
self.val = UPI_Data_Transmit_BW(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "UPI_Data_Transmit_BW zero division")
desc = """
Cross-socket Ultra Path Interconnect (UPI) data transmit
bandwidth for data only [MB / sec]"""
class Metric_Time:
name = "Time"
domain = "Seconds"
maxval = 0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Summary'])
sibling = None
def compute(self, EV):
try:
self.val = Time(self, EV, 0)
self.thresh = (self.val < 1)
except ZeroDivisionError:
handle_error_metric(self, "Time zero division")
desc = """
Run duration time in seconds"""
class Metric_Socket_CLKS:
name = "Socket_CLKS"
domain = "Count"
maxval = 0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['SoC'])
sibling = None
def compute(self, EV):
try:
self.val = Socket_CLKS(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Socket_CLKS zero division")
desc = """
Socket actual clocks when any core is active on that socket"""
class Metric_IpFarBranch:
name = "IpFarBranch"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Branches', 'OS'])
sibling = None
def compute(self, EV):
try:
self.val = IpFarBranch(self, EV, 0)
self.thresh = (self.val < 1000000)
except ZeroDivisionError:
handle_error_metric(self, "IpFarBranch zero division")
desc = """
Instructions per Far Branch ( Far Branches apply upon
transition from application to operating system, handling
interrupts, exceptions) [lower number means higher
occurrence rate]"""
# Schedule
class Setup:
def __init__(self, r):
o = dict()
n = Frontend_Bound() ; r.run(n) ; o["Frontend_Bound"] = n
n = Fetch_Latency() ; r.run(n) ; o["Fetch_Latency"] = n
n = ICache_Misses() ; r.run(n) ; o["ICache_Misses"] = n
n = ITLB_Misses() ; r.run(n) ; o["ITLB_Misses"] = n
n = Branch_Resteers() ; r.run(n) ; o["Branch_Resteers"] = n
n = Mispredicts_Resteers() ; r.run(n) ; o["Mispredicts_Resteers"] = n
n = Clears_Resteers() ; r.run(n) ; o["Clears_Resteers"] = n
n = Unknown_Branches() ; r.run(n) ; o["Unknown_Branches"] = n
n = MS_Switches() ; r.run(n) ; o["MS_Switches"] = n
n = LCP() ; r.run(n) ; o["LCP"] = n
n = DSB_Switches() ; r.run(n) ; o["DSB_Switches"] = n
n = Fetch_Bandwidth() ; r.run(n) ; o["Fetch_Bandwidth"] = n
n = MITE() ; r.run(n) ; o["MITE"] = n
n = Decoder0_Alone() ; r.run(n) ; o["Decoder0_Alone"] = n
n = DSB() ; r.run(n) ; o["DSB"] = n
n = Bad_Speculation() ; r.run(n) ; o["Bad_Speculation"] = n
n = Branch_Mispredicts() ; r.run(n) ; o["Branch_Mispredicts"] = n
n = Other_Mispredicts() ; r.run(n) ; o["Other_Mispredicts"] = n
n = Machine_Clears() ; r.run(n) ; o["Machine_Clears"] = n
n = Other_Nukes() ; r.run(n) ; o["Other_Nukes"] = n
n = Backend_Bound() ; r.run(n) ; o["Backend_Bound"] = n
n = Memory_Bound() ; r.run(n) ; o["Memory_Bound"] = n
n = L1_Bound() ; r.run(n) ; o["L1_Bound"] = n
n = DTLB_Load() ; r.run(n) ; o["DTLB_Load"] = n
n = Load_STLB_Hit() ; r.run(n) ; o["Load_STLB_Hit"] = n
n = Load_STLB_Miss() ; r.run(n) ; o["Load_STLB_Miss"] = n
n = Store_Fwd_Blk() ; r.run(n) ; o["Store_Fwd_Blk"] = n
n = L1_Hit_Latency() ; r.run(n) ; o["L1_Hit_Latency"] = n
n = Lock_Latency() ; r.run(n) ; o["Lock_Latency"] = n
n = Split_Loads() ; r.run(n) ; o["Split_Loads"] = n
n = FB_Full() ; r.run(n) ; o["FB_Full"] = n
n = L2_Bound() ; r.run(n) ; o["L2_Bound"] = n
n = L3_Bound() ; r.run(n) ; o["L3_Bound"] = n
n = Contested_Accesses() ; r.run(n) ; o["Contested_Accesses"] = n
n = Data_Sharing() ; r.run(n) ; o["Data_Sharing"] = n
n = L3_Hit_Latency() ; r.run(n) ; o["L3_Hit_Latency"] = n
n = SQ_Full() ; r.run(n) ; o["SQ_Full"] = n
n = DRAM_Bound() ; r.run(n) ; o["DRAM_Bound"] = n
n = MEM_Bandwidth() ; r.run(n) ; o["MEM_Bandwidth"] = n
n = MBA_Stalls() ; r.run(n) ; o["MBA_Stalls"] = n
n = MEM_Latency() ; r.run(n) ; o["MEM_Latency"] = n
n = Local_MEM() ; r.run(n) ; o["Local_MEM"] = n
n = Remote_MEM() ; r.run(n) ; o["Remote_MEM"] = n
n = Remote_Cache() ; r.run(n) ; o["Remote_Cache"] = n
n = PMM_Bound() ; r.run(n) ; o["PMM_Bound"] = n
n = Store_Bound() ; r.run(n) ; o["Store_Bound"] = n
n = Store_Latency() ; r.run(n) ; o["Store_Latency"] = n
n = False_Sharing() ; r.run(n) ; o["False_Sharing"] = n
n = Split_Stores() ; r.run(n) ; o["Split_Stores"] = n
n = Streaming_Stores() ; r.run(n) ; o["Streaming_Stores"] = n
n = DTLB_Store() ; r.run(n) ; o["DTLB_Store"] = n
n = Store_STLB_Hit() ; r.run(n) ; o["Store_STLB_Hit"] = n
n = Store_STLB_Miss() ; r.run(n) ; o["Store_STLB_Miss"] = n
n = Core_Bound() ; r.run(n) ; o["Core_Bound"] = n
n = Divider() ; r.run(n) ; o["Divider"] = n
n = Serializing_Operation() ; r.run(n) ; o["Serializing_Operation"] = n
n = Slow_Pause() ; r.run(n) ; o["Slow_Pause"] = n
n = C01_WAIT() ; r.run(n) ; o["C01_WAIT"] = n
n = C02_WAIT() ; r.run(n) ; o["C02_WAIT"] = n
n = Memory_Fence() ; r.run(n) ; o["Memory_Fence"] = n
n = AMX_Busy() ; r.run(n) ; o["AMX_Busy"] = n
n = Ports_Utilization() ; r.run(n) ; o["Ports_Utilization"] = n
n = Ports_Utilized_0() ; r.run(n) ; o["Ports_Utilized_0"] = n
n = Mixing_Vectors() ; r.run(n) ; o["Mixing_Vectors"] = n
n = Ports_Utilized_1() ; r.run(n) ; o["Ports_Utilized_1"] = n
n = Ports_Utilized_2() ; r.run(n) ; o["Ports_Utilized_2"] = n
n = Ports_Utilized_3m() ; r.run(n) ; o["Ports_Utilized_3m"] = n
n = ALU_Op_Utilization() ; r.run(n) ; o["ALU_Op_Utilization"] = n
n = Port_0() ; r.run(n) ; o["Port_0"] = n
n = Port_1() ; r.run(n) ; o["Port_1"] = n
n = Port_6() ; r.run(n) ; o["Port_6"] = n
n = Load_Op_Utilization() ; r.run(n) ; o["Load_Op_Utilization"] = n
n = Store_Op_Utilization() ; r.run(n) ; o["Store_Op_Utilization"] = n
n = Retiring() ; r.run(n) ; o["Retiring"] = n
n = Light_Operations() ; r.run(n) ; o["Light_Operations"] = n
n = FP_Arith() ; r.run(n) ; o["FP_Arith"] = n
n = X87_Use() ; r.run(n) ; o["X87_Use"] = n
n = FP_Scalar() ; r.run(n) ; o["FP_Scalar"] = n
n = FP_Vector() ; r.run(n) ; o["FP_Vector"] = n
n = FP_Vector_128b() ; r.run(n) ; o["FP_Vector_128b"] = n
n = FP_Vector_256b() ; r.run(n) ; o["FP_Vector_256b"] = n
n = FP_Vector_512b() ; r.run(n) ; o["FP_Vector_512b"] = n
n = Int_Operations() ; r.run(n) ; o["Int_Operations"] = n
n = Int_Vector_128b() ; r.run(n) ; o["Int_Vector_128b"] = n
n = Int_Vector_256b() ; r.run(n) ; o["Int_Vector_256b"] = n
n = Memory_Operations() ; r.run(n) ; o["Memory_Operations"] = n
n = Fused_Instructions() ; r.run(n) ; o["Fused_Instructions"] = n
n = Non_Fused_Branches() ; r.run(n) ; o["Non_Fused_Branches"] = n
n = Other_Light_Ops() ; r.run(n) ; o["Other_Light_Ops"] = n
n = Nop_Instructions() ; r.run(n) ; o["Nop_Instructions"] = n
n = Shuffles_256b() ; r.run(n) ; o["Shuffles_256b"] = n
n = Heavy_Operations() ; r.run(n) ; o["Heavy_Operations"] = n
n = Few_Uops_Instructions() ; r.run(n) ; o["Few_Uops_Instructions"] = n
n = Microcode_Sequencer() ; r.run(n) ; o["Microcode_Sequencer"] = n
n = Assists() ; r.run(n) ; o["Assists"] = n
n = Page_Faults() ; r.run(n) ; o["Page_Faults"] = n
n = FP_Assists() ; r.run(n) ; o["FP_Assists"] = n
n = AVX_Assists() ; r.run(n) ; o["AVX_Assists"] = n
n = CISC() ; r.run(n) ; o["CISC"] = n
# parents
o["Fetch_Latency"].parent = o["Frontend_Bound"]
o["ICache_Misses"].parent = o["Fetch_Latency"]
o["ITLB_Misses"].parent = o["Fetch_Latency"]
o["Branch_Resteers"].parent = o["Fetch_Latency"]
o["Mispredicts_Resteers"].parent = o["Branch_Resteers"]
o["Clears_Resteers"].parent = o["Branch_Resteers"]
o["Unknown_Branches"].parent = o["Branch_Resteers"]
o["MS_Switches"].parent = o["Fetch_Latency"]
o["LCP"].parent = o["Fetch_Latency"]
o["DSB_Switches"].parent = o["Fetch_Latency"]
o["Fetch_Bandwidth"].parent = o["Frontend_Bound"]
o["MITE"].parent = o["Fetch_Bandwidth"]
o["Decoder0_Alone"].parent = o["MITE"]
o["DSB"].parent = o["Fetch_Bandwidth"]
o["Branch_Mispredicts"].parent = o["Bad_Speculation"]
o["Other_Mispredicts"].parent = o["Branch_Mispredicts"]
o["Machine_Clears"].parent = o["Bad_Speculation"]
o["Other_Nukes"].parent = o["Machine_Clears"]
o["Memory_Bound"].parent = o["Backend_Bound"]
o["L1_Bound"].parent = o["Memory_Bound"]
o["DTLB_Load"].parent = o["L1_Bound"]
o["Load_STLB_Hit"].parent = o["DTLB_Load"]
o["Load_STLB_Miss"].parent = o["DTLB_Load"]
o["Store_Fwd_Blk"].parent = o["L1_Bound"]
o["L1_Hit_Latency"].parent = o["L1_Bound"]
o["Lock_Latency"].parent = o["L1_Bound"]
o["Split_Loads"].parent = o["L1_Bound"]
o["FB_Full"].parent = o["L1_Bound"]
o["L2_Bound"].parent = o["Memory_Bound"]
o["L3_Bound"].parent = o["Memory_Bound"]
o["Contested_Accesses"].parent = o["L3_Bound"]
o["Data_Sharing"].parent = o["L3_Bound"]
o["L3_Hit_Latency"].parent = o["L3_Bound"]
o["SQ_Full"].parent = o["L3_Bound"]
o["DRAM_Bound"].parent = o["Memory_Bound"]
o["MEM_Bandwidth"].parent = o["DRAM_Bound"]
o["MBA_Stalls"].parent = o["MEM_Bandwidth"]
o["MEM_Latency"].parent = o["DRAM_Bound"]
o["Local_MEM"].parent = o["MEM_Latency"]
o["Remote_MEM"].parent = o["MEM_Latency"]
o["Remote_Cache"].parent = o["MEM_Latency"]
o["PMM_Bound"].parent = o["Memory_Bound"]
o["Store_Bound"].parent = o["Memory_Bound"]
o["Store_Latency"].parent = o["Store_Bound"]
o["False_Sharing"].parent = o["Store_Bound"]
o["Split_Stores"].parent = o["Store_Bound"]
o["Streaming_Stores"].parent = o["Store_Bound"]
o["DTLB_Store"].parent = o["Store_Bound"]
o["Store_STLB_Hit"].parent = o["DTLB_Store"]
o["Store_STLB_Miss"].parent = o["DTLB_Store"]
o["Core_Bound"].parent = o["Backend_Bound"]
o["Divider"].parent = o["Core_Bound"]
o["Serializing_Operation"].parent = o["Core_Bound"]
o["Slow_Pause"].parent = o["Serializing_Operation"]
o["C01_WAIT"].parent = o["Serializing_Operation"]
o["C02_WAIT"].parent = o["Serializing_Operation"]
o["Memory_Fence"].parent = o["Serializing_Operation"]
o["AMX_Busy"].parent = o["Core_Bound"]
o["Ports_Utilization"].parent = o["Core_Bound"]
o["Ports_Utilized_0"].parent = o["Ports_Utilization"]
o["Mixing_Vectors"].parent = o["Ports_Utilized_0"]
o["Ports_Utilized_1"].parent = o["Ports_Utilization"]
o["Ports_Utilized_2"].parent = o["Ports_Utilization"]
o["Ports_Utilized_3m"].parent = o["Ports_Utilization"]
o["ALU_Op_Utilization"].parent = o["Ports_Utilized_3m"]
o["Port_0"].parent = o["ALU_Op_Utilization"]
o["Port_1"].parent = o["ALU_Op_Utilization"]
o["Port_6"].parent = o["ALU_Op_Utilization"]
o["Load_Op_Utilization"].parent = o["Ports_Utilized_3m"]
o["Store_Op_Utilization"].parent = o["Ports_Utilized_3m"]
o["Light_Operations"].parent = o["Retiring"]
o["FP_Arith"].parent = o["Light_Operations"]
o["X87_Use"].parent = o["FP_Arith"]
o["FP_Scalar"].parent = o["FP_Arith"]
o["FP_Vector"].parent = o["FP_Arith"]
o["FP_Vector_128b"].parent = o["FP_Vector"]
o["FP_Vector_256b"].parent = o["FP_Vector"]
o["FP_Vector_512b"].parent = o["FP_Vector"]
o["Int_Operations"].parent = o["Light_Operations"]
o["Int_Vector_128b"].parent = o["Int_Operations"]
o["Int_Vector_256b"].parent = o["Int_Operations"]
o["Memory_Operations"].parent = o["Light_Operations"]
o["Fused_Instructions"].parent = o["Light_Operations"]
o["Non_Fused_Branches"].parent = o["Light_Operations"]
o["Other_Light_Ops"].parent = o["Light_Operations"]
o["Nop_Instructions"].parent = o["Other_Light_Ops"]
o["Shuffles_256b"].parent = o["Other_Light_Ops"]
o["Heavy_Operations"].parent = o["Retiring"]
o["Few_Uops_Instructions"].parent = o["Heavy_Operations"]
o["Microcode_Sequencer"].parent = o["Heavy_Operations"]
o["Assists"].parent = o["Microcode_Sequencer"]
o["Page_Faults"].parent = o["Assists"]
o["FP_Assists"].parent = o["Assists"]
o["AVX_Assists"].parent = o["Assists"]
o["CISC"].parent = o["Microcode_Sequencer"]
# user visible metrics
n = Metric_Mispredictions() ; r.metric(n) ; o["Mispredictions"] = n
n = Metric_Big_Code() ; r.metric(n) ; o["Big_Code"] = n
n = Metric_Instruction_Fetch_BW() ; r.metric(n) ; o["Instruction_Fetch_BW"] = n
n = Metric_Cache_Memory_Bandwidth() ; r.metric(n) ; o["Cache_Memory_Bandwidth"] = n
n = Metric_Cache_Memory_Latency() ; r.metric(n) ; o["Cache_Memory_Latency"] = n
n = Metric_Memory_Data_TLBs() ; r.metric(n) ; o["Memory_Data_TLBs"] = n
n = Metric_Memory_Synchronization() ; r.metric(n) ; o["Memory_Synchronization"] = n
n = Metric_Compute_Bound_Est() ; r.metric(n) ; o["Compute_Bound_Est"] = n
n = Metric_Irregular_Overhead() ; r.metric(n) ; o["Irregular_Overhead"] = n
n = Metric_Other_Bottlenecks() ; r.metric(n) ; o["Other_Bottlenecks"] = n
n = Metric_Branching_Overhead() ; r.metric(n) ; o["Branching_Overhead"] = n
n = Metric_Useful_Work() ; r.metric(n) ; o["Useful_Work"] = n
n = Metric_Core_Bound_Likely() ; r.metric(n) ; o["Core_Bound_Likely"] = n
n = Metric_IPC() ; r.metric(n) ; o["IPC"] = n
n = Metric_UopPI() ; r.metric(n) ; o["UopPI"] = n
n = Metric_UpTB() ; r.metric(n) ; o["UpTB"] = n
n = Metric_CPI() ; r.metric(n) ; o["CPI"] = n
n = Metric_CLKS() ; r.metric(n) ; o["CLKS"] = n
n = Metric_SLOTS() ; r.metric(n) ; o["SLOTS"] = n
n = Metric_Slots_Utilization() ; r.metric(n) ; o["Slots_Utilization"] = n
n = Metric_Execute_per_Issue() ; r.metric(n) ; o["Execute_per_Issue"] = n
n = Metric_CoreIPC() ; r.metric(n) ; o["CoreIPC"] = n
n = Metric_FLOPc() ; r.metric(n) ; o["FLOPc"] = n
n = Metric_FP_Arith_Utilization() ; r.metric(n) ; o["FP_Arith_Utilization"] = n
n = Metric_ILP() ; r.metric(n) ; o["ILP"] = n
n = Metric_EPC() ; r.metric(n) ; o["EPC"] = n
n = Metric_CORE_CLKS() ; r.metric(n) ; o["CORE_CLKS"] = n
n = Metric_IpLoad() ; r.metric(n) ; o["IpLoad"] = n
n = Metric_IpStore() ; r.metric(n) ; o["IpStore"] = n
n = Metric_IpBranch() ; r.metric(n) ; o["IpBranch"] = n
n = Metric_IpCall() ; r.metric(n) ; o["IpCall"] = n
n = Metric_IpTB() ; r.metric(n) ; o["IpTB"] = n
n = Metric_BpTkBranch() ; r.metric(n) ; o["BpTkBranch"] = n
n = Metric_IpFLOP() ; r.metric(n) ; o["IpFLOP"] = n
n = Metric_IpArith() ; r.metric(n) ; o["IpArith"] = n
n = Metric_IpArith_Scalar_HP() ; r.metric(n) ; o["IpArith_Scalar_HP"] = n
n = Metric_IpArith_Scalar_SP() ; r.metric(n) ; o["IpArith_Scalar_SP"] = n
n = Metric_IpArith_Scalar_DP() ; r.metric(n) ; o["IpArith_Scalar_DP"] = n
n = Metric_IpArith_AVX128() ; r.metric(n) ; o["IpArith_AVX128"] = n
n = Metric_IpArith_AVX256() ; r.metric(n) ; o["IpArith_AVX256"] = n
n = Metric_IpArith_AVX512() ; r.metric(n) ; o["IpArith_AVX512"] = n
n = Metric_IpPause() ; r.metric(n) ; o["IpPause"] = n
n = Metric_IpSWPF() ; r.metric(n) ; o["IpSWPF"] = n
n = Metric_Instructions() ; r.metric(n) ; o["Instructions"] = n
n = Metric_Retire() ; r.metric(n) ; o["Retire"] = n
n = Metric_Strings_Cycles() ; r.metric(n) ; o["Strings_Cycles"] = n
n = Metric_IpAssist() ; r.metric(n) ; o["IpAssist"] = n
n = Metric_Execute() ; r.metric(n) ; o["Execute"] = n
n = Metric_Fetch_LSD() ; r.metric(n) ; o["Fetch_LSD"] = n
n = Metric_Fetch_DSB() ; r.metric(n) ; o["Fetch_DSB"] = n
n = Metric_Fetch_MITE() ; r.metric(n) ; o["Fetch_MITE"] = n
n = Metric_Fetch_UpC() ; r.metric(n) ; o["Fetch_UpC"] = n
n = Metric_DSB_Coverage() ; r.metric(n) ; o["DSB_Coverage"] = n
n = Metric_Unknown_Branch_Cost() ; r.metric(n) ; o["Unknown_Branch_Cost"] = n
n = Metric_DSB_Switch_Cost() ; r.metric(n) ; o["DSB_Switch_Cost"] = n
n = Metric_DSB_Misses() ; r.metric(n) ; o["DSB_Misses"] = n
n = Metric_DSB_Bandwidth() ; r.metric(n) ; o["DSB_Bandwidth"] = n
n = Metric_ICache_Miss_Latency() ; r.metric(n) ; o["ICache_Miss_Latency"] = n
n = Metric_IC_Misses() ; r.metric(n) ; o["IC_Misses"] = n
n = Metric_IpDSB_Miss_Ret() ; r.metric(n) ; o["IpDSB_Miss_Ret"] = n
n = Metric_IpUnknown_Branch() ; r.metric(n) ; o["IpUnknown_Branch"] = n
n = Metric_L2MPKI_Code() ; r.metric(n) ; o["L2MPKI_Code"] = n
n = Metric_L2MPKI_Code_All() ; r.metric(n) ; o["L2MPKI_Code_All"] = n
n = Metric_IpMispredict() ; r.metric(n) ; o["IpMispredict"] = n
n = Metric_IpMisp_Cond_Ntaken() ; r.metric(n) ; o["IpMisp_Cond_Ntaken"] = n
n = Metric_IpMisp_Cond_Taken() ; r.metric(n) ; o["IpMisp_Cond_Taken"] = n
n = Metric_IpMisp_Ret() ; r.metric(n) ; o["IpMisp_Ret"] = n
n = Metric_IpMisp_Indirect() ; r.metric(n) ; o["IpMisp_Indirect"] = n
n = Metric_Branch_Misprediction_Cost() ; r.metric(n) ; o["Branch_Misprediction_Cost"] = n
n = Metric_Spec_Clears_Ratio() ; r.metric(n) ; o["Spec_Clears_Ratio"] = n
n = Metric_Cond_NT() ; r.metric(n) ; o["Cond_NT"] = n
n = Metric_Cond_TK() ; r.metric(n) ; o["Cond_TK"] = n
n = Metric_CallRet() ; r.metric(n) ; o["CallRet"] = n
n = Metric_Jump() ; r.metric(n) ; o["Jump"] = n
n = Metric_Other_Branches() ; r.metric(n) ; o["Other_Branches"] = n
n = Metric_Load_Miss_Real_Latency() ; r.metric(n) ; o["Load_Miss_Real_Latency"] = n
n = Metric_MLP() ; r.metric(n) ; o["MLP"] = n
n = Metric_L1MPKI() ; r.metric(n) ; o["L1MPKI"] = n
n = Metric_L1MPKI_Load() ; r.metric(n) ; o["L1MPKI_Load"] = n
n = Metric_L2MPKI() ; r.metric(n) ; o["L2MPKI"] = n
n = Metric_L2MPKI_All() ; r.metric(n) ; o["L2MPKI_All"] = n
n = Metric_L2MPKI_Load() ; r.metric(n) ; o["L2MPKI_Load"] = n
n = Metric_L2MPKI_RFO() ; r.metric(n) ; o["L2MPKI_RFO"] = n
n = Metric_L2HPKI_All() ; r.metric(n) ; o["L2HPKI_All"] = n
n = Metric_L2HPKI_Load() ; r.metric(n) ; o["L2HPKI_Load"] = n
n = Metric_L3MPKI() ; r.metric(n) ; o["L3MPKI"] = n
n = Metric_FB_HPKI() ; r.metric(n) ; o["FB_HPKI"] = n
n = Metric_L1D_Cache_Fill_BW() ; r.metric(n) ; o["L1D_Cache_Fill_BW"] = n
n = Metric_L2_Cache_Fill_BW() ; r.metric(n) ; o["L2_Cache_Fill_BW"] = n
n = Metric_L3_Cache_Fill_BW() ; r.metric(n) ; o["L3_Cache_Fill_BW"] = n
n = Metric_L3_Cache_Access_BW() ; r.metric(n) ; o["L3_Cache_Access_BW"] = n
n = Metric_Page_Walks_Utilization() ; r.metric(n) ; o["Page_Walks_Utilization"] = n
n = Metric_Code_STLB_MPKI() ; r.metric(n) ; o["Code_STLB_MPKI"] = n
n = Metric_Load_STLB_MPKI() ; r.metric(n) ; o["Load_STLB_MPKI"] = n
n = Metric_Store_STLB_MPKI() ; r.metric(n) ; o["Store_STLB_MPKI"] = n
n = Metric_L1D_Cache_Fill_BW_2T() ; r.metric(n) ; o["L1D_Cache_Fill_BW_2T"] = n
n = Metric_L2_Cache_Fill_BW_2T() ; r.metric(n) ; o["L2_Cache_Fill_BW_2T"] = n
n = Metric_L3_Cache_Fill_BW_2T() ; r.metric(n) ; o["L3_Cache_Fill_BW_2T"] = n
n = Metric_L3_Cache_Access_BW_2T() ; r.metric(n) ; o["L3_Cache_Access_BW_2T"] = n
n = Metric_L2_Evictions_Silent_PKI() ; r.metric(n) ; o["L2_Evictions_Silent_PKI"] = n
n = Metric_L2_Evictions_NonSilent_PKI() ; r.metric(n) ; o["L2_Evictions_NonSilent_PKI"] = n
n = Metric_Load_L2_Miss_Latency() ; r.metric(n) ; o["Load_L2_Miss_Latency"] = n
n = Metric_Load_L3_Miss_Latency() ; r.metric(n) ; o["Load_L3_Miss_Latency"] = n
n = Metric_Load_L2_MLP() ; r.metric(n) ; o["Load_L2_MLP"] = n
n = Metric_Data_L2_MLP() ; r.metric(n) ; o["Data_L2_MLP"] = n
n = Metric_Offcore_Read_Any_PKI() ; r.metric(n) ; o["Offcore_Read_Any_PKI"] = n
n = Metric_Offcore_Read_L3M_PKI() ; r.metric(n) ; o["Offcore_Read_L3M_PKI"] = n
n = Metric_Offcore_MWrite_Any_PKI() ; r.metric(n) ; o["Offcore_MWrite_Any_PKI"] = n
n = Metric_UC_Load_PKI() ; r.metric(n) ; o["UC_Load_PKI"] = n
n = Metric_Bus_Lock_PKI() ; r.metric(n) ; o["Bus_Lock_PKI"] = n
n = Metric_CPU_Utilization() ; r.metric(n) ; o["CPU_Utilization"] = n
n = Metric_CPUs_Utilized() ; r.metric(n) ; o["CPUs_Utilized"] = n
n = Metric_Core_Frequency() ; r.metric(n) ; o["Core_Frequency"] = n
n = Metric_Uncore_Frequency() ; r.metric(n) ; o["Uncore_Frequency"] = n
n = Metric_GFLOPs() ; r.metric(n) ; o["GFLOPs"] = n
n = Metric_Turbo_Utilization() ; r.metric(n) ; o["Turbo_Utilization"] = n
n = Metric_SMT_2T_Utilization() ; r.metric(n) ; o["SMT_2T_Utilization"] = n
n = Metric_Kernel_Utilization() ; r.metric(n) ; o["Kernel_Utilization"] = n
n = Metric_Kernel_CPI() ; r.metric(n) ; o["Kernel_CPI"] = n
n = Metric_C0_Wait() ; r.metric(n) ; o["C0_Wait"] = n
n = Metric_DRAM_BW_Use() ; r.metric(n) ; o["DRAM_BW_Use"] = n
n = Metric_R2C_Offcore_BW() ; r.metric(n) ; o["R2C_Offcore_BW"] = n
n = Metric_R2C_L3M_BW() ; r.metric(n) ; o["R2C_L3M_BW"] = n
n = Metric_R2C_DRAM_BW() ; r.metric(n) ; o["R2C_DRAM_BW"] = n
n = Metric_MEM_Read_Latency() ; r.metric(n) ; o["MEM_Read_Latency"] = n
n = Metric_MEM_Parallel_Reads() ; r.metric(n) ; o["MEM_Parallel_Reads"] = n
n = Metric_MEM_PMM_Read_Latency() ; r.metric(n) ; o["MEM_PMM_Read_Latency"] = n
n = Metric_MEM_DRAM_Read_Latency() ; r.metric(n) ; o["MEM_DRAM_Read_Latency"] = n
n = Metric_PMM_Read_BW() ; r.metric(n) ; o["PMM_Read_BW"] = n
n = Metric_PMM_Write_BW() ; r.metric(n) ; o["PMM_Write_BW"] = n
n = Metric_IO_Read_BW() ; r.metric(n) ; o["IO_Read_BW"] = n
n = Metric_IO_Write_BW() ; r.metric(n) ; o["IO_Write_BW"] = n
n = Metric_UPI_Data_Transmit_BW() ; r.metric(n) ; o["UPI_Data_Transmit_BW"] = n
n = Metric_Time() ; r.metric(n) ; o["Time"] = n
n = Metric_Socket_CLKS() ; r.metric(n) ; o["Socket_CLKS"] = n
n = Metric_IpFarBranch() ; r.metric(n) ; o["IpFarBranch"] = n
# references between groups
o["Branch_Resteers"].Unknown_Branches = o["Unknown_Branches"]
o["Mispredicts_Resteers"].Retiring = o["Retiring"]
o["Mispredicts_Resteers"].Branch_Mispredicts = o["Branch_Mispredicts"]
o["Mispredicts_Resteers"].Bad_Speculation = o["Bad_Speculation"]
o["Mispredicts_Resteers"].Frontend_Bound = o["Frontend_Bound"]
o["Mispredicts_Resteers"].Backend_Bound = o["Backend_Bound"]
o["Clears_Resteers"].Retiring = o["Retiring"]
o["Clears_Resteers"].Branch_Mispredicts = o["Branch_Mispredicts"]
o["Clears_Resteers"].Bad_Speculation = o["Bad_Speculation"]
o["Clears_Resteers"].Frontend_Bound = o["Frontend_Bound"]
o["Clears_Resteers"].Backend_Bound = o["Backend_Bound"]
o["Fetch_Bandwidth"].Frontend_Bound = o["Frontend_Bound"]
o["Fetch_Bandwidth"].Fetch_Latency = o["Fetch_Latency"]
o["Bad_Speculation"].Retiring = o["Retiring"]
o["Bad_Speculation"].Frontend_Bound = o["Frontend_Bound"]
o["Bad_Speculation"].Backend_Bound = o["Backend_Bound"]
o["Other_Mispredicts"].Branch_Mispredicts = o["Branch_Mispredicts"]
o["Machine_Clears"].Retiring = o["Retiring"]
o["Machine_Clears"].Branch_Mispredicts = o["Branch_Mispredicts"]
o["Machine_Clears"].Bad_Speculation = o["Bad_Speculation"]
o["Machine_Clears"].Frontend_Bound = o["Frontend_Bound"]
o["Machine_Clears"].Backend_Bound = o["Backend_Bound"]
o["Other_Nukes"].Machine_Clears = o["Machine_Clears"]
o["Other_Nukes"].Branch_Mispredicts = o["Branch_Mispredicts"]
o["Other_Nukes"].Retiring = o["Retiring"]
o["Other_Nukes"].Backend_Bound = o["Backend_Bound"]
o["Other_Nukes"].Bad_Speculation = o["Bad_Speculation"]
o["Other_Nukes"].Frontend_Bound = o["Frontend_Bound"]
o["Load_STLB_Hit"].Load_STLB_Miss = o["Load_STLB_Miss"]
o["Load_STLB_Hit"].DTLB_Load = o["DTLB_Load"]
o["MEM_Latency"].MEM_Bandwidth = o["MEM_Bandwidth"]
o["Store_STLB_Hit"].DTLB_Store = o["DTLB_Store"]
o["Store_STLB_Hit"].Store_STLB_Miss = o["Store_STLB_Miss"]
o["Core_Bound"].Memory_Bound = o["Memory_Bound"]
o["Core_Bound"].Backend_Bound = o["Backend_Bound"]
o["Serializing_Operation"].C02_WAIT = o["C02_WAIT"]
o["Ports_Utilization"].Ports_Utilized_0 = o["Ports_Utilized_0"]
o["Ports_Utilization"].Retiring = o["Retiring"]
o["Retiring"].Heavy_Operations = o["Heavy_Operations"]
o["Light_Operations"].Retiring = o["Retiring"]
o["Light_Operations"].Heavy_Operations = o["Heavy_Operations"]
o["FP_Arith"].Retiring = o["Retiring"]
o["FP_Arith"].FP_Scalar = o["FP_Scalar"]
o["FP_Arith"].X87_Use = o["X87_Use"]
o["FP_Arith"].FP_Vector = o["FP_Vector"]
o["X87_Use"].Retiring = o["Retiring"]
o["FP_Scalar"].Retiring = o["Retiring"]
o["FP_Vector"].Retiring = o["Retiring"]
o["FP_Vector_128b"].Retiring = o["Retiring"]
o["FP_Vector_256b"].Retiring = o["Retiring"]
o["FP_Vector_512b"].Retiring = o["Retiring"]
o["Int_Operations"].Retiring = o["Retiring"]
o["Int_Operations"].Int_Vector_256b = o["Int_Vector_256b"]
o["Int_Operations"].Int_Vector_128b = o["Int_Vector_128b"]
o["Int_Vector_128b"].Retiring = o["Retiring"]
o["Int_Vector_256b"].Retiring = o["Retiring"]
o["Memory_Operations"].Retiring = o["Retiring"]
o["Memory_Operations"].Light_Operations = o["Light_Operations"]
o["Memory_Operations"].Heavy_Operations = o["Heavy_Operations"]
o["Fused_Instructions"].Retiring = o["Retiring"]
o["Fused_Instructions"].Light_Operations = o["Light_Operations"]
o["Fused_Instructions"].Heavy_Operations = o["Heavy_Operations"]
o["Non_Fused_Branches"].Retiring = o["Retiring"]
o["Non_Fused_Branches"].Light_Operations = o["Light_Operations"]
o["Non_Fused_Branches"].Heavy_Operations = o["Heavy_Operations"]
o["Other_Light_Ops"].Light_Operations = o["Light_Operations"]
o["Other_Light_Ops"].Retiring = o["Retiring"]
o["Other_Light_Ops"].Heavy_Operations = o["Heavy_Operations"]
o["Other_Light_Ops"].Int_Operations = o["Int_Operations"]
o["Other_Light_Ops"].Non_Fused_Branches = o["Non_Fused_Branches"]
o["Other_Light_Ops"].FP_Arith = o["FP_Arith"]
o["Other_Light_Ops"].Fused_Instructions = o["Fused_Instructions"]
o["Other_Light_Ops"].Int_Vector_128b = o["Int_Vector_128b"]
o["Other_Light_Ops"].FP_Vector = o["FP_Vector"]
o["Other_Light_Ops"].FP_Scalar = o["FP_Scalar"]
o["Other_Light_Ops"].X87_Use = o["X87_Use"]
o["Other_Light_Ops"].Int_Vector_256b = o["Int_Vector_256b"]
o["Other_Light_Ops"].Memory_Operations = o["Memory_Operations"]
o["Nop_Instructions"].Retiring = o["Retiring"]
o["Nop_Instructions"].Light_Operations = o["Light_Operations"]
o["Nop_Instructions"].Heavy_Operations = o["Heavy_Operations"]
o["Shuffles_256b"].Retiring = o["Retiring"]
o["Shuffles_256b"].Light_Operations = o["Light_Operations"]
o["Shuffles_256b"].Heavy_Operations = o["Heavy_Operations"]
o["Few_Uops_Instructions"].Heavy_Operations = o["Heavy_Operations"]
o["Few_Uops_Instructions"].Microcode_Sequencer = o["Microcode_Sequencer"]
o["CISC"].Microcode_Sequencer = o["Microcode_Sequencer"]
o["CISC"].Assists = o["Assists"]
o["Mispredictions"].Branch_Mispredicts = o["Branch_Mispredicts"]
o["Mispredictions"].LCP = o["LCP"]
o["Mispredictions"].Retiring = o["Retiring"]
o["Mispredictions"].Other_Mispredicts = o["Other_Mispredicts"]
o["Mispredictions"].Microcode_Sequencer = o["Microcode_Sequencer"]
o["Mispredictions"].Frontend_Bound = o["Frontend_Bound"]
o["Mispredictions"].DSB_Switches = o["DSB_Switches"]
o["Mispredictions"].Backend_Bound = o["Backend_Bound"]
o["Mispredictions"].Branch_Resteers = o["Branch_Resteers"]
o["Mispredictions"].ICache_Misses = o["ICache_Misses"]
o["Mispredictions"].MS_Switches = o["MS_Switches"]
o["Mispredictions"].Bad_Speculation = o["Bad_Speculation"]
o["Mispredictions"].ITLB_Misses = o["ITLB_Misses"]
o["Mispredictions"].Unknown_Branches = o["Unknown_Branches"]
o["Mispredictions"].Fetch_Latency = o["Fetch_Latency"]
o["Mispredictions"].Mispredicts_Resteers = o["Mispredicts_Resteers"]
o["Big_Code"].LCP = o["LCP"]
o["Big_Code"].ICache_Misses = o["ICache_Misses"]
o["Big_Code"].DSB_Switches = o["DSB_Switches"]
o["Big_Code"].Branch_Resteers = o["Branch_Resteers"]
o["Big_Code"].MS_Switches = o["MS_Switches"]
o["Big_Code"].ITLB_Misses = o["ITLB_Misses"]
o["Big_Code"].Unknown_Branches = o["Unknown_Branches"]
o["Big_Code"].Fetch_Latency = o["Fetch_Latency"]
o["Instruction_Fetch_BW"].Retiring = o["Retiring"]
o["Instruction_Fetch_BW"].Other_Mispredicts = o["Other_Mispredicts"]
o["Instruction_Fetch_BW"].DSB_Switches = o["DSB_Switches"]
o["Instruction_Fetch_BW"].Backend_Bound = o["Backend_Bound"]
o["Instruction_Fetch_BW"].Branch_Resteers = o["Branch_Resteers"]
o["Instruction_Fetch_BW"].Fetch_Latency = o["Fetch_Latency"]
o["Instruction_Fetch_BW"].ICache_Misses = o["ICache_Misses"]
o["Instruction_Fetch_BW"].Microcode_Sequencer = o["Microcode_Sequencer"]
o["Instruction_Fetch_BW"].Frontend_Bound = o["Frontend_Bound"]
o["Instruction_Fetch_BW"].Bad_Speculation = o["Bad_Speculation"]
o["Instruction_Fetch_BW"].ITLB_Misses = o["ITLB_Misses"]
o["Instruction_Fetch_BW"].Mispredicts_Resteers = o["Mispredicts_Resteers"]
o["Instruction_Fetch_BW"].Branch_Mispredicts = o["Branch_Mispredicts"]
o["Instruction_Fetch_BW"].LCP = o["LCP"]
o["Instruction_Fetch_BW"].Clears_Resteers = o["Clears_Resteers"]
o["Instruction_Fetch_BW"].MS_Switches = o["MS_Switches"]
o["Instruction_Fetch_BW"].Unknown_Branches = o["Unknown_Branches"]
o["Cache_Memory_Bandwidth"].L1_Bound = o["L1_Bound"]
o["Cache_Memory_Bandwidth"].Store_Fwd_Blk = o["Store_Fwd_Blk"]
o["Cache_Memory_Bandwidth"].SQ_Full = o["SQ_Full"]
o["Cache_Memory_Bandwidth"].MEM_Bandwidth = o["MEM_Bandwidth"]
o["Cache_Memory_Bandwidth"].L1_Hit_Latency = o["L1_Hit_Latency"]
o["Cache_Memory_Bandwidth"].PMM_Bound = o["PMM_Bound"]
o["Cache_Memory_Bandwidth"].Data_Sharing = o["Data_Sharing"]
o["Cache_Memory_Bandwidth"].L2_Bound = o["L2_Bound"]
o["Cache_Memory_Bandwidth"].Memory_Bound = o["Memory_Bound"]
o["Cache_Memory_Bandwidth"].Lock_Latency = o["Lock_Latency"]
o["Cache_Memory_Bandwidth"].MEM_Latency = o["MEM_Latency"]
o["Cache_Memory_Bandwidth"].Store_Bound = o["Store_Bound"]
o["Cache_Memory_Bandwidth"].Split_Loads = o["Split_Loads"]
o["Cache_Memory_Bandwidth"].L3_Hit_Latency = o["L3_Hit_Latency"]
o["Cache_Memory_Bandwidth"].DTLB_Load = o["DTLB_Load"]
o["Cache_Memory_Bandwidth"].L3_Bound = o["L3_Bound"]
o["Cache_Memory_Bandwidth"].FB_Full = o["FB_Full"]
o["Cache_Memory_Bandwidth"].Contested_Accesses = o["Contested_Accesses"]
o["Cache_Memory_Bandwidth"].DRAM_Bound = o["DRAM_Bound"]
o["Cache_Memory_Latency"].L1_Bound = o["L1_Bound"]
o["Cache_Memory_Latency"].PMM_Bound = o["PMM_Bound"]
o["Cache_Memory_Latency"].Data_Sharing = o["Data_Sharing"]
o["Cache_Memory_Latency"].L2_Bound = o["L2_Bound"]
o["Cache_Memory_Latency"].Contested_Accesses = o["Contested_Accesses"]
o["Cache_Memory_Latency"].L1_Hit_Latency = o["L1_Hit_Latency"]
o["Cache_Memory_Latency"].MEM_Bandwidth = o["MEM_Bandwidth"]
o["Cache_Memory_Latency"].Store_Latency = o["Store_Latency"]
o["Cache_Memory_Latency"].L3_Hit_Latency = o["L3_Hit_Latency"]
o["Cache_Memory_Latency"].DTLB_Load = o["DTLB_Load"]
o["Cache_Memory_Latency"].False_Sharing = o["False_Sharing"]
o["Cache_Memory_Latency"].Streaming_Stores = o["Streaming_Stores"]
o["Cache_Memory_Latency"].Memory_Bound = o["Memory_Bound"]
o["Cache_Memory_Latency"].SQ_Full = o["SQ_Full"]
o["Cache_Memory_Latency"].Store_Bound = o["Store_Bound"]
o["Cache_Memory_Latency"].Split_Loads = o["Split_Loads"]
o["Cache_Memory_Latency"].L3_Bound = o["L3_Bound"]
o["Cache_Memory_Latency"].FB_Full = o["FB_Full"]
o["Cache_Memory_Latency"].Store_Fwd_Blk = o["Store_Fwd_Blk"]
o["Cache_Memory_Latency"].DTLB_Store = o["DTLB_Store"]
o["Cache_Memory_Latency"].Split_Stores = o["Split_Stores"]
o["Cache_Memory_Latency"].Lock_Latency = o["Lock_Latency"]
o["Cache_Memory_Latency"].MEM_Latency = o["MEM_Latency"]
o["Cache_Memory_Latency"].DRAM_Bound = o["DRAM_Bound"]
o["Memory_Data_TLBs"].L1_Bound = o["L1_Bound"]
o["Memory_Data_TLBs"].Store_Fwd_Blk = o["Store_Fwd_Blk"]
o["Memory_Data_TLBs"].L1_Hit_Latency = o["L1_Hit_Latency"]
o["Memory_Data_TLBs"].DTLB_Load = o["DTLB_Load"]
o["Memory_Data_TLBs"].Store_Latency = o["Store_Latency"]
o["Memory_Data_TLBs"].Split_Stores = o["Split_Stores"]
o["Memory_Data_TLBs"].PMM_Bound = o["PMM_Bound"]
o["Memory_Data_TLBs"].DTLB_Store = o["DTLB_Store"]
o["Memory_Data_TLBs"].L2_Bound = o["L2_Bound"]
o["Memory_Data_TLBs"].Memory_Bound = o["Memory_Bound"]
o["Memory_Data_TLBs"].Lock_Latency = o["Lock_Latency"]
o["Memory_Data_TLBs"].Store_Bound = o["Store_Bound"]
o["Memory_Data_TLBs"].False_Sharing = o["False_Sharing"]
o["Memory_Data_TLBs"].Split_Loads = o["Split_Loads"]
o["Memory_Data_TLBs"].L3_Bound = o["L3_Bound"]
o["Memory_Data_TLBs"].FB_Full = o["FB_Full"]
o["Memory_Data_TLBs"].Streaming_Stores = o["Streaming_Stores"]
o["Memory_Data_TLBs"].DRAM_Bound = o["DRAM_Bound"]
o["Memory_Synchronization"].L1_Bound = o["L1_Bound"]
o["Memory_Synchronization"].Frontend_Bound = o["Frontend_Bound"]
o["Memory_Synchronization"].False_Sharing = o["False_Sharing"]
o["Memory_Synchronization"].Retiring = o["Retiring"]
o["Memory_Synchronization"].PMM_Bound = o["PMM_Bound"]
o["Memory_Synchronization"].Bad_Speculation = o["Bad_Speculation"]
o["Memory_Synchronization"].Machine_Clears = o["Machine_Clears"]
o["Memory_Synchronization"].Data_Sharing = o["Data_Sharing"]
o["Memory_Synchronization"].Memory_Bound = o["Memory_Bound"]
o["Memory_Synchronization"].SQ_Full = o["SQ_Full"]
o["Memory_Synchronization"].Store_Bound = o["Store_Bound"]
o["Memory_Synchronization"].L3_Bound = o["L3_Bound"]
o["Memory_Synchronization"].L2_Bound = o["L2_Bound"]
o["Memory_Synchronization"].Streaming_Stores = o["Streaming_Stores"]
o["Memory_Synchronization"].Contested_Accesses = o["Contested_Accesses"]
o["Memory_Synchronization"].DTLB_Store = o["DTLB_Store"]
o["Memory_Synchronization"].Branch_Mispredicts = o["Branch_Mispredicts"]
o["Memory_Synchronization"].Store_Latency = o["Store_Latency"]
o["Memory_Synchronization"].Split_Stores = o["Split_Stores"]
o["Memory_Synchronization"].Backend_Bound = o["Backend_Bound"]
o["Memory_Synchronization"].L3_Hit_Latency = o["L3_Hit_Latency"]
o["Memory_Synchronization"].Other_Nukes = o["Other_Nukes"]
o["Memory_Synchronization"].DRAM_Bound = o["DRAM_Bound"]
o["Compute_Bound_Est"].Serializing_Operation = o["Serializing_Operation"]
o["Compute_Bound_Est"].Ports_Utilization = o["Ports_Utilization"]
o["Compute_Bound_Est"].C02_WAIT = o["C02_WAIT"]
o["Compute_Bound_Est"].Retiring = o["Retiring"]
o["Compute_Bound_Est"].AMX_Busy = o["AMX_Busy"]
o["Compute_Bound_Est"].Ports_Utilized_2 = o["Ports_Utilized_2"]
o["Compute_Bound_Est"].Memory_Bound = o["Memory_Bound"]
o["Compute_Bound_Est"].Ports_Utilized_1 = o["Ports_Utilized_1"]
o["Compute_Bound_Est"].Core_Bound = o["Core_Bound"]
o["Compute_Bound_Est"].Backend_Bound = o["Backend_Bound"]
o["Compute_Bound_Est"].Ports_Utilized_3m = o["Ports_Utilized_3m"]
o["Compute_Bound_Est"].Divider = o["Divider"]
o["Compute_Bound_Est"].Ports_Utilized_0 = o["Ports_Utilized_0"]
o["Irregular_Overhead"].Heavy_Operations = o["Heavy_Operations"]
o["Irregular_Overhead"].Ports_Utilization = o["Ports_Utilization"]
o["Irregular_Overhead"].C02_WAIT = o["C02_WAIT"]
o["Irregular_Overhead"].Retiring = o["Retiring"]
o["Irregular_Overhead"].ICache_Misses = o["ICache_Misses"]
o["Irregular_Overhead"].Microcode_Sequencer = o["Microcode_Sequencer"]
o["Irregular_Overhead"].Frontend_Bound = o["Frontend_Bound"]
o["Irregular_Overhead"].Serializing_Operation = o["Serializing_Operation"]
o["Irregular_Overhead"].Core_Bound = o["Core_Bound"]
o["Irregular_Overhead"].Bad_Speculation = o["Bad_Speculation"]
o["Irregular_Overhead"].ITLB_Misses = o["ITLB_Misses"]
o["Irregular_Overhead"].Divider = o["Divider"]
o["Irregular_Overhead"].Mispredicts_Resteers = o["Mispredicts_Resteers"]
o["Irregular_Overhead"].Memory_Bound = o["Memory_Bound"]
o["Irregular_Overhead"].Machine_Clears = o["Machine_Clears"]
o["Irregular_Overhead"].Branch_Mispredicts = o["Branch_Mispredicts"]
o["Irregular_Overhead"].LCP = o["LCP"]
o["Irregular_Overhead"].Other_Mispredicts = o["Other_Mispredicts"]
o["Irregular_Overhead"].AMX_Busy = o["AMX_Busy"]
o["Irregular_Overhead"].Few_Uops_Instructions = o["Few_Uops_Instructions"]
o["Irregular_Overhead"].DSB_Switches = o["DSB_Switches"]
o["Irregular_Overhead"].Ports_Utilized_0 = o["Ports_Utilized_0"]
o["Irregular_Overhead"].Assists = o["Assists"]
o["Irregular_Overhead"].Backend_Bound = o["Backend_Bound"]
o["Irregular_Overhead"].Branch_Resteers = o["Branch_Resteers"]
o["Irregular_Overhead"].Clears_Resteers = o["Clears_Resteers"]
o["Irregular_Overhead"].MS_Switches = o["MS_Switches"]
o["Irregular_Overhead"].Other_Nukes = o["Other_Nukes"]
o["Irregular_Overhead"].Unknown_Branches = o["Unknown_Branches"]
o["Irregular_Overhead"].Fetch_Latency = o["Fetch_Latency"]
o["Other_Bottlenecks"].L1_Bound = o["L1_Bound"]
o["Other_Bottlenecks"].C02_WAIT = o["C02_WAIT"]
o["Other_Bottlenecks"].Retiring = o["Retiring"]
o["Other_Bottlenecks"].PMM_Bound = o["PMM_Bound"]
o["Other_Bottlenecks"].Data_Sharing = o["Data_Sharing"]
o["Other_Bottlenecks"].L2_Bound = o["L2_Bound"]
o["Other_Bottlenecks"].Core_Bound = o["Core_Bound"]
o["Other_Bottlenecks"].Ports_Utilization = o["Ports_Utilization"]
o["Other_Bottlenecks"].Contested_Accesses = o["Contested_Accesses"]
o["Other_Bottlenecks"].Divider = o["Divider"]
o["Other_Bottlenecks"].L3_Bound = o["L3_Bound"]
o["Other_Bottlenecks"].Ports_Utilized_3m = o["Ports_Utilized_3m"]
o["Other_Bottlenecks"].L1_Hit_Latency = o["L1_Hit_Latency"]
o["Other_Bottlenecks"].FB_Full = o["FB_Full"]
o["Other_Bottlenecks"].MEM_Bandwidth = o["MEM_Bandwidth"]
o["Other_Bottlenecks"].Store_Latency = o["Store_Latency"]
o["Other_Bottlenecks"].Other_Mispredicts = o["Other_Mispredicts"]
o["Other_Bottlenecks"].DSB_Switches = o["DSB_Switches"]
o["Other_Bottlenecks"].Ports_Utilized_0 = o["Ports_Utilized_0"]
o["Other_Bottlenecks"].Ports_Utilized_1 = o["Ports_Utilized_1"]
o["Other_Bottlenecks"].Ports_Utilized_2 = o["Ports_Utilized_2"]
o["Other_Bottlenecks"].Assists = o["Assists"]
o["Other_Bottlenecks"].Backend_Bound = o["Backend_Bound"]
o["Other_Bottlenecks"].Branch_Resteers = o["Branch_Resteers"]
o["Other_Bottlenecks"].L3_Hit_Latency = o["L3_Hit_Latency"]
o["Other_Bottlenecks"].Heavy_Operations = o["Heavy_Operations"]
o["Other_Bottlenecks"].Fetch_Latency = o["Fetch_Latency"]
o["Other_Bottlenecks"].DTLB_Load = o["DTLB_Load"]
o["Other_Bottlenecks"].False_Sharing = o["False_Sharing"]
o["Other_Bottlenecks"].ICache_Misses = o["ICache_Misses"]
o["Other_Bottlenecks"].Microcode_Sequencer = o["Microcode_Sequencer"]
o["Other_Bottlenecks"].Frontend_Bound = o["Frontend_Bound"]
o["Other_Bottlenecks"].Machine_Clears = o["Machine_Clears"]
o["Other_Bottlenecks"].Streaming_Stores = o["Streaming_Stores"]
o["Other_Bottlenecks"].Memory_Bound = o["Memory_Bound"]
o["Other_Bottlenecks"].SQ_Full = o["SQ_Full"]
o["Other_Bottlenecks"].Store_Bound = o["Store_Bound"]
o["Other_Bottlenecks"].Split_Loads = o["Split_Loads"]
o["Other_Bottlenecks"].Bad_Speculation = o["Bad_Speculation"]
o["Other_Bottlenecks"].ITLB_Misses = o["ITLB_Misses"]
o["Other_Bottlenecks"].Mispredicts_Resteers = o["Mispredicts_Resteers"]
o["Other_Bottlenecks"].Store_Fwd_Blk = o["Store_Fwd_Blk"]
o["Other_Bottlenecks"].Serializing_Operation = o["Serializing_Operation"]
o["Other_Bottlenecks"].DTLB_Store = o["DTLB_Store"]
o["Other_Bottlenecks"].Branch_Mispredicts = o["Branch_Mispredicts"]
o["Other_Bottlenecks"].LCP = o["LCP"]
o["Other_Bottlenecks"].Split_Stores = o["Split_Stores"]
o["Other_Bottlenecks"].AMX_Busy = o["AMX_Busy"]
o["Other_Bottlenecks"].Few_Uops_Instructions = o["Few_Uops_Instructions"]
o["Other_Bottlenecks"].Lock_Latency = o["Lock_Latency"]
o["Other_Bottlenecks"].MEM_Latency = o["MEM_Latency"]
o["Other_Bottlenecks"].Clears_Resteers = o["Clears_Resteers"]
o["Other_Bottlenecks"].MS_Switches = o["MS_Switches"]
o["Other_Bottlenecks"].Other_Nukes = o["Other_Nukes"]
o["Other_Bottlenecks"].Unknown_Branches = o["Unknown_Branches"]
o["Other_Bottlenecks"].DRAM_Bound = o["DRAM_Bound"]
o["Useful_Work"].Retiring = o["Retiring"]
o["Useful_Work"].Heavy_Operations = o["Heavy_Operations"]
o["Useful_Work"].Microcode_Sequencer = o["Microcode_Sequencer"]
o["Useful_Work"].Few_Uops_Instructions = o["Few_Uops_Instructions"]
o["Useful_Work"].Assists = o["Assists"]
o["Core_Bound_Likely"].Memory_Bound = o["Memory_Bound"]
o["Core_Bound_Likely"].Ports_Utilized_0 = o["Ports_Utilized_0"]
o["Core_Bound_Likely"].Core_Bound = o["Core_Bound"]
o["Core_Bound_Likely"].Ports_Utilization = o["Ports_Utilization"]
o["Core_Bound_Likely"].Retiring = o["Retiring"]
o["Core_Bound_Likely"].Backend_Bound = o["Backend_Bound"]
o["UopPI"].Retiring = o["Retiring"]
o["UpTB"].Retiring = o["Retiring"]
o["Retire"].Retiring = o["Retiring"]
o["DSB_Misses"].MITE = o["MITE"]
o["DSB_Misses"].LCP = o["LCP"]
o["DSB_Misses"].Fetch_Bandwidth = o["Fetch_Bandwidth"]
o["DSB_Misses"].Frontend_Bound = o["Frontend_Bound"]
o["DSB_Misses"].DSB_Switches = o["DSB_Switches"]
o["DSB_Misses"].Branch_Resteers = o["Branch_Resteers"]
o["DSB_Misses"].ICache_Misses = o["ICache_Misses"]
o["DSB_Misses"].MS_Switches = o["MS_Switches"]
o["DSB_Misses"].ITLB_Misses = o["ITLB_Misses"]
o["DSB_Misses"].DSB = o["DSB"]
o["DSB_Misses"].Unknown_Branches = o["Unknown_Branches"]
o["DSB_Misses"].Fetch_Latency = o["Fetch_Latency"]
o["DSB_Bandwidth"].Fetch_Bandwidth = o["Fetch_Bandwidth"]
o["DSB_Bandwidth"].Frontend_Bound = o["Frontend_Bound"]
o["DSB_Bandwidth"].DSB = o["DSB"]
o["DSB_Bandwidth"].MITE = o["MITE"]
o["DSB_Bandwidth"].Fetch_Latency = o["Fetch_Latency"]
o["IC_Misses"].Fetch_Latency = o["Fetch_Latency"]
o["IC_Misses"].LCP = o["LCP"]
o["IC_Misses"].MS_Switches = o["MS_Switches"]
o["IC_Misses"].ICache_Misses = o["ICache_Misses"]
o["IC_Misses"].ITLB_Misses = o["ITLB_Misses"]
o["IC_Misses"].Unknown_Branches = o["Unknown_Branches"]
o["IC_Misses"].DSB_Switches = o["DSB_Switches"]
o["IC_Misses"].Branch_Resteers = o["Branch_Resteers"]
o["Branch_Misprediction_Cost"].Retiring = o["Retiring"]
o["Branch_Misprediction_Cost"].ICache_Misses = o["ICache_Misses"]
o["Branch_Misprediction_Cost"].Microcode_Sequencer = o["Microcode_Sequencer"]
o["Branch_Misprediction_Cost"].Frontend_Bound = o["Frontend_Bound"]
o["Branch_Misprediction_Cost"].Bad_Speculation = o["Bad_Speculation"]
o["Branch_Misprediction_Cost"].ITLB_Misses = o["ITLB_Misses"]
o["Branch_Misprediction_Cost"].Mispredicts_Resteers = o["Mispredicts_Resteers"]
o["Branch_Misprediction_Cost"].Branch_Mispredicts = o["Branch_Mispredicts"]
o["Branch_Misprediction_Cost"].LCP = o["LCP"]
o["Branch_Misprediction_Cost"].Other_Mispredicts = o["Other_Mispredicts"]
o["Branch_Misprediction_Cost"].DSB_Switches = o["DSB_Switches"]
o["Branch_Misprediction_Cost"].Backend_Bound = o["Backend_Bound"]
o["Branch_Misprediction_Cost"].Branch_Resteers = o["Branch_Resteers"]
o["Branch_Misprediction_Cost"].MS_Switches = o["MS_Switches"]
o["Branch_Misprediction_Cost"].Unknown_Branches = o["Unknown_Branches"]
o["Branch_Misprediction_Cost"].Fetch_Latency = o["Fetch_Latency"]
# siblings cross-tree
o["Mispredicts_Resteers"].sibling = (o["Branch_Mispredicts"],)
o["Clears_Resteers"].sibling = (o["MS_Switches"], o["Machine_Clears"], o["L1_Bound"], o["Microcode_Sequencer"],)
o["MS_Switches"].sibling = (o["Clears_Resteers"], o["Machine_Clears"], o["L1_Bound"], o["Serializing_Operation"], o["Mixing_Vectors"], o["Microcode_Sequencer"],)
o["LCP"].sibling = (o["DSB_Switches"], o["Fetch_Bandwidth"],)
o["DSB_Switches"].sibling = (o["LCP"], o["Fetch_Bandwidth"],)
o["Fetch_Bandwidth"].sibling = (o["LCP"], o["DSB_Switches"],)
o["Decoder0_Alone"].sibling = (o["Few_Uops_Instructions"],)
o["Branch_Mispredicts"].sibling = (o["Mispredicts_Resteers"],)
o["Machine_Clears"].sibling = (o["Clears_Resteers"], o["MS_Switches"], o["L1_Bound"], o["Contested_Accesses"], o["Data_Sharing"], o["Remote_Cache"], o["False_Sharing"], o["Microcode_Sequencer"],)
o["L1_Bound"].sibling = (o["Clears_Resteers"], o["MS_Switches"], o["Machine_Clears"], o["Ports_Utilized_1"], o["Microcode_Sequencer"],)
o["DTLB_Load"].sibling = (o["DTLB_Store"],)
o["Lock_Latency"].sibling = (o["Store_Latency"],)
o["FB_Full"].sibling = (o["SQ_Full"], o["MEM_Bandwidth"], o["Store_Latency"], o["Streaming_Stores"],)
o["Contested_Accesses"].sibling = (o["Machine_Clears"], o["Data_Sharing"], o["Remote_Cache"], o["False_Sharing"],)
o["Data_Sharing"].sibling = (o["Machine_Clears"], o["Contested_Accesses"], o["Remote_Cache"], o["False_Sharing"],)
o["L3_Hit_Latency"].sibling = (o["MEM_Latency"],)
o["L3_Hit_Latency"].overlap = True
o["SQ_Full"].sibling = (o["FB_Full"], o["MEM_Bandwidth"],)
o["MEM_Bandwidth"].sibling = (o["FB_Full"], o["SQ_Full"],)
o["MEM_Latency"].sibling = (o["L3_Hit_Latency"],)
o["Remote_Cache"].sibling = (o["Machine_Clears"], o["Contested_Accesses"], o["Data_Sharing"], o["False_Sharing"],)
o["Store_Latency"].sibling = (o["Lock_Latency"], o["FB_Full"],)
o["Store_Latency"].overlap = True
o["False_Sharing"].sibling = (o["Machine_Clears"], o["Contested_Accesses"], o["Data_Sharing"], o["Remote_Cache"],)
o["Streaming_Stores"].sibling = (o["FB_Full"],)
o["DTLB_Store"].sibling = (o["DTLB_Load"],)
o["Serializing_Operation"].sibling = (o["MS_Switches"],)
o["Mixing_Vectors"].sibling = (o["MS_Switches"],)
o["Ports_Utilized_1"].sibling = (o["L1_Bound"],)
o["Ports_Utilized_2"].sibling = (o["Port_0"], o["Port_1"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["FP_Vector_512b"], o["Int_Vector_128b"], o["Int_Vector_256b"],)
o["Port_0"].sibling = (o["Ports_Utilized_2"], o["Port_1"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["FP_Vector_512b"], o["Int_Vector_128b"], o["Int_Vector_256b"],)
o["Port_1"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["FP_Vector_512b"], o["Int_Vector_128b"], o["Int_Vector_256b"],)
o["Port_6"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["FP_Vector_512b"], o["Int_Vector_128b"], o["Int_Vector_256b"],)
o["FP_Scalar"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_6"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["FP_Vector_512b"], o["Int_Vector_128b"], o["Int_Vector_256b"],)
o["FP_Vector"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_6"], o["FP_Scalar"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["FP_Vector_512b"], o["Int_Vector_128b"], o["Int_Vector_256b"],)
o["FP_Vector_128b"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_256b"], o["FP_Vector_512b"], o["Int_Vector_128b"], o["Int_Vector_256b"],)
o["FP_Vector_256b"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_512b"], o["Int_Vector_128b"], o["Int_Vector_256b"],)
o["FP_Vector_512b"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["Int_Vector_128b"], o["Int_Vector_256b"],)
o["Int_Vector_128b"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["FP_Vector_512b"], o["Int_Vector_256b"],)
o["Int_Vector_256b"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["FP_Vector_512b"], o["Int_Vector_128b"],)
o["Few_Uops_Instructions"].sibling = (o["Decoder0_Alone"],)
o["Microcode_Sequencer"].sibling = (o["Clears_Resteers"], o["MS_Switches"], o["Machine_Clears"], o["L1_Bound"],)
o["Mispredictions"].sibling = (o["Mispredicts_Resteers"], o["Branch_Mispredicts"],)
o["Cache_Memory_Bandwidth"].sibling = (o["FB_Full"], o["SQ_Full"], o["MEM_Bandwidth"],)
o["Cache_Memory_Latency"].sibling = (o["L3_Hit_Latency"], o["MEM_Latency"],)
o["Memory_Data_TLBs"].sibling = (o["DTLB_Load"], o["DTLB_Store"],)
o["Memory_Synchronization"].sibling = (o["DTLB_Load"], o["DTLB_Store"],)
o["Irregular_Overhead"].sibling = (o["MS_Switches"], o["Microcode_Sequencer"],)
o["IpTB"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],)
o["DSB_Coverage"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],)
o["DSB_Misses"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],)
o["DSB_Bandwidth"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],)
o["Branch_Misprediction_Cost"].sibling = (o["Mispredicts_Resteers"], o["Branch_Mispredicts"],)
o["DRAM_BW_Use"].sibling = (o["FB_Full"], o["SQ_Full"], o["MEM_Bandwidth"],)
| 274,697 | Python | .py | 6,251 | 37.977764 | 1,910 | 0.656158 | andikleen/pmu-tools | 1,984 | 331 | 178 | GPL-2.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,886 | tl_cpu.py | andikleen_pmu-tools/tl_cpu.py | # Copyright (c) 2012-2020, Intel Corporation
# Author: Andi Kleen
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# toplev CPU detection
#
# Environment variables to override (mainly for testing):
# FORCECPU=cpu Force CPU type (e.g. skl). Also --force-cpu
# FORCECOUNTERS=n Force number of generic counters
# FORCEHT=0/1 Force SMT mode
# HYPERVISOR=0/1 Force hypervisor mode (also --force-hypervisor)
# CPUINFO=file Read cpu information from file instead of /proc/cpuinfo. Also --force-cpuinfo
# REDUCED_COUNTERS=n Reduce counters by n
#
from collections import defaultdict, Counter
import os
import re
import glob
import sys
if sys.version_info.major == 3:
import typing # noqa
from typing import Set, List, Dict, Tuple # noqa
modelid_map = {
(0x8e, ): "KBLR",
(0x9e, ): "CFL",
}
cpus_8gpc = set(["icl", "tgl", "icx", "spr", "sprmax"]) # XXX handle hybrid ADL
def num_offline_cpus():
cpus = glob.glob("/sys/devices/system/cpu/cpu[0-9]*/online")
offline = 0
for fn in cpus:
with open(fn, "r") as f:
if int(f.read()) == 0:
offline += 1
return offline
def reduced_counters():
rc = os.getenv("REDUCED_COUNTERS")
if rc:
return int(rc)
val = 1
fn = "/sys/devices/cpu/allow_tsx_force_abort"
if os.path.exists(fn):
with open(fn, "r") as f:
val = int(f.read())
return 1 if val == 0 else 0
class Env(object):
def __init__(self):
self.forcecpu = os.getenv("FORCECPU")
self.forcecounters = os.getenv("FORCECOUNTERS")
self.forceht = os.getenv("FORCEHT")
self.hypervisor = os.getenv("HYPERVISOR")
self.cpuinfo = os.getenv("CPUINFO")
class CPU(object):
"""Detect the CPU."""
# overrides for easy regression tests
def force_cpu(self, known_cpus):
force = self.env.forcecpu
if not force:
return False
self.cpu = ""
for i in known_cpus:
if force == i[0]:
self.cpu = i[0]
break
if self.cpu is None:
print("Unknown FORCECPU ",force)
return True
def force_ht(self):
ht = self.env.forceht
if ht:
self.ht = True if int(ht) else False
return True
return False
def __init__(self, known_cpus=(), nocheck=False, env=Env()):
self.vendor = ""
self.env = env
self.model = 0
self.cpu = ""
self.realcpu = "simple"
self.ht = False
self.counters = {} # type: Dict[str,int]
self.has_tsx = False
self.hypervisor = False
self.force_hypervisor = False
if self.env.hypervisor:
self.hypervisor = True
self.force_hypervisor = True
self.freq = 0.0
self.threads = 0
forced_cpu = self.force_cpu(known_cpus)
forced_ht = self.force_ht()
cores = Counter() # type: typing.Counter[Tuple[int,int]]
sockets = Counter() # type: typing.Counter[int]
self.coreids = defaultdict(list)
self.cputocore = {}
self.cputothread = {}
self.sockettocpus = defaultdict(list)
self.cputosocket = {}
self.allcpus = []
self.step = 0
self.name = ""
cpuinfo = self.env.cpuinfo
if cpuinfo is None:
cpuinfo = "/proc/cpuinfo"
with open(cpuinfo, "r") as f:
seen = set()
for l in f:
n = l.split()
if len(n) < 3:
continue
if n[0] == 'processor':
seen.add("processor")
cpunum = int(n[2])
self.allcpus.append(cpunum)
elif (n[0], n[2]) == ("vendor_id", "GenuineIntel"):
self.vendor = n[2]
seen.add("vendor_id")
elif (len(n) > 3 and
(n[0], n[1], n[3]) == ("cpu", "family", "6")):
seen.add("cpu family")
elif (n[0], n[1]) == ("model", ":"):
seen.add("model")
self.model = int(n[2])
elif (n[0], n[1]) == ("model", "name"):
seen.add("model name")
m = re.search(r"@ (\d+\.\d+)GHz", l)
if m:
self.freq = float(m.group(1))
self.name = " ".join(n[3:])
elif (n[0], n[1]) == ("physical", "id"):
physid = int(n[3])
sockets[physid] += 1
self.sockettocpus[physid].append(cpunum)
self.cputosocket[cpunum] = physid
elif (n[0], n[1]) == ("core", "id"):
coreid = int(n[3])
key = (physid, coreid,)
cores[key] += 1
self.threads = max(self.threads, cores[key])
if self.threads > 1 and not forced_ht:
self.ht = True
self.coreids[key].append(cpunum)
self.cputocore[cpunum] = key
self.cputothread[cpunum] = self.coreids[key].index(cpunum)
elif n[0] == "flags":
seen.add("flags")
self.has_tsx = "rtm" in n
if "hypervisor" in n:
self.hypervisor = True
elif n[0] == "stepping":
seen.add("stepping")
self.step = int(n[2])
if len(seen) >= 7:
for i in known_cpus:
if self.model in i[1] or (self.model, self.step) in i[1]:
self.realcpu = i[0]
if not forced_cpu:
self.cpu = i[0]
break
self.force_counters()
self.limit4_counters = { "cpu": "none" }
self.standard_counters = { "cpu": tuple(("0,1,2,3",)) }
if self.cpu.startswith("adl") or self.cpu.startswith("mtl"):
atom_counters = 6 if self.cpu.startswith("adl") else 8
newcounters = {
"cpu_core": 8,
"cpu": 4,
"cpu_atom": atom_counters,
}
self.standard_counters = {
"cpu_core": ("0,1,2,3,4,5,6,7", "0,1,2,3", ),
"cpu": ("0,1,2,3,4,5,6,7", "0,1,2,3", ),
"cpu_atom": ("0,1,2,3,4,5", ) if atom_counters == 6 else ("0,1,2,3,4,5,6,7,", )
}
self.limit4_counters = { "cpu_core": "0,1,2,3", "cpu_atom": "none",
"cpu": "0,1,2,3" }
elif self.cpu == "slm":
newcounters = { "cpu": 2 }
self.standard_counters = { "cpu": ("0,1",) }
# when running in a hypervisor always assume worst case HT in on
# also when CPUs are offline assume SMT is on
elif self.ht or self.hypervisor or (num_offline_cpus() > 0 and not nocheck) or self.cpu in cpus_8gpc:
if self.cpu in cpus_8gpc or (self.cpu == "simple" and self.realcpu in cpus_8gpc):
newcounters = {"cpu": 8 }
self.standard_counters = { "cpu": ("0,1,2,3,4,5,6,7", "0,1,2,3", ) }
self.limit4_counters = { "cpu": "0,1,2,3" }
else:
newcounters = { "cpu": 4 }
elif self.cpu == "ehl":
newcounters = { "cpu": 4 }
else:
newcounters = { "cpu": 8 }
if not self.counters:
self.counters = newcounters
if not nocheck and not self.env.forcecounters:
for j in ("cpu", "cpu_core", "cpu_atom"):
if j in self.counters:
self.counters[j] -= reduced_counters()
if self.cpu in cpus_8gpc:
self.standard_counters = { "cpu": ("0,1,2,3,4,5,6,7", "0,1,2,3", ) }
self.limit4_counters = { "cpu": "0,1,2,3" }
if self.cpu == "spr" and "Max" in self.name:
self.cpu = "sprmax"
try:
self.pmu_name = open("/sys/devices/cpu/caps/pmu_name").read().strip()
except IOError:
self.pmu_name = ""
self.sockets = len(sockets.keys())
self.modelid = None
mid = (self.model,)
self.true_name = self.cpu
if mid in modelid_map:
self.modelid = modelid_map[mid]
self.true_name = self.modelid.lower()
# XXX match steppings here too
def force_counters(self):
cnt = self.env.forcecounters
if cnt:
cntn = int(cnt)
if self.realcpu == "adl" or self.realcpu == "mtl":
self.counters = {
"cpu_core": cntn,
"cpu_atom": cntn,
"cpu": cntn }
else:
self.counters = { "cpu": cntn }
| 9,244 | Python | .py | 234 | 28.047009 | 109 | 0.506336 | andikleen/pmu-tools | 1,984 | 331 | 178 | GPL-2.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,887 | gen_level.py | andikleen_pmu-tools/gen_level.py | # generate levels for events from the model
# utility module for other tools
l1 = set(("Frontend_Bound", "Backend_Bound", "Retiring", "Bad_Speculation"))
def get_level(name):
is_node = name in l1 or "." in name
level = name.count(".") + 1
if is_node:
return level
return 0
def is_metric(name):
return get_level(name) == 0
def level_name(name):
if name.count(".") > 0:
f = name.split(".")[:-1]
n = ".".join(f)
elif is_metric(name):
return "CPU-METRIC" # XXX split
else:
n = "TopLevel"
n = n.replace(" ", "_")
return n
| 600 | Python | .py | 21 | 23.619048 | 76 | 0.586806 | andikleen/pmu-tools | 1,984 | 331 | 178 | GPL-2.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,888 | dummyarith.py | andikleen_pmu-tools/dummyarith.py |
# dummy arithmetic type without any errors, for collecting
# the events from the model. Otherwise divisions by zero cause
# early exits
class DummyArith(object):
def __add__(self, o):
return self
__sub__ = __add__
__mul__ = __add__
__div__ = __add__
__truediv__ = __add__
__rsub__ = __add__
__radd__ = __add__
__rmul__ = __add__
__rdiv__ = __add__
__rtruediv__ = __add__
def __lt__(self, o):
return True
__eq__ = __lt__
__ne__ = __lt__
__gt__ = __lt__
__ge__ = __lt__
__or__ = __add__
__and__ = __add__
__min__ = __add__
__max__ = __add__
| 633 | Python | .py | 25 | 20.56 | 62 | 0.455446 | andikleen/pmu-tools | 1,984 | 331 | 178 | GPL-2.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,889 | toplev.py | andikleen_pmu-tools/toplev.py | #!/usr/bin/env python3
# Copyright (c) 2012-2022, Intel Corporation
# Author: Andi Kleen
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# Measure a workload using the topdown performance model:
# estimate on which part of the CPU pipeline it bottlenecks.
#
# Must find ocperf in python module path.
# Handles a variety of perf and kernel versions, but older ones have various
# limitations.
# Environment variables for overrides (see also tl_cpu/ocperf):
# TOPOLOGY=file Read sysfs topology from file. Also --force-topology
# PERF=exe Force perf binary to run
# FORCE_NMI_WATCHDOG=1 Force NMI watchdog mode
# KERNEL_VERSION=... Force kernel version (e.g. 5.0)
# FORCEMETRICS=1 Force fixed metrics and slots
# TLSEED=n Set seed for --subset sample: sampling
# DURATION_TIME=0 Force not using duration_time
from __future__ import print_function, division
import sys
import os
import re
import textwrap
import platform
import pty
import subprocess
import argparse
import time
import types
import csv
import bisect
import random
import json
import io
import glob
from dummyarith import DummyArith
from copy import copy
from fnmatch import fnmatch
from math import isnan
from collections import defaultdict, Counter, OrderedDict
from itertools import compress, groupby, chain
from listutils import cat_unique, dedup, filternot, not_list, append_dict, \
zip_longest, flatten, findprefix, dummy_dict
from objutils import has, safe_ref, map_fields, ref_or
from tl_stat import ComputeStat, ValStat, combine_valstat
import tl_cpu
import tl_output
import ocperf
import event_download
from tl_uval import UVal
from tl_io import flex_open_r, flex_open_w, popentext, warn, warn_once, \
warn_once_no_assert, print_once, \
obj_debug_print, debug_print, warn_no_assert, \
set_args as io_set_args
if sys.version_info.major == 3:
import typing # noqa
from typing import Set, List, Dict, Any, Tuple, DefaultDict # noqa
known_cpus = (
("snb", (42, )),
("jkt", (45, )),
("ivb", (58, )),
("ivt", (62, )),
("hsw", (60, 70, 69 )),
("hsx", (63, )),
("slm", (55, 77, 76, )),
("bdw", (61, 71, )),
("bdx", (79, 86, )),
("simple", ()),
("skl", (94, 78, 142, 158, 165, 166, )),
("knl", (87, )),
("skx", ((85, 4,), )),
("clx", ((85, 5,), (85, 6,), (85, 7,), (85, 8,), (85, 9,), (85, 10,), )),
("icl", (126, 125, 157,
167, )), # RKL as ICL for now
("tgl", (140, 141, )),
("icx", (106, 108, )),
("adl", (154, 151,
183, )), # RPL as ADL
("adl-glc", (154, )),
("adl-grt", (154, )),
("spr", (143, )),
("ehl", (150, )),
("sprmax", ()),
("mtl", (170, 186, )),
("mtl-cmt", (170, 186, )),
("mtl-rwc", (170, 186, )),
)
eventlist_alias = {
} # type: Dict[str,str]
tsx_cpus = ("hsw", "hsx", "bdw", "skl", "skx", "clx", "icl", "icx",
"spr", "sprmax")
hybrid_cpus = ("adl", "mtl",)
atom_hybrid_cpus = ("adl-grt", "mtl-cmt",)
non_json_events = set(("dummy", "duration_time"))
tma_mgroups = set() # type: Set[str]
# tunables (tunable with --tune)
DEDUP_AREA = "Info.Bot*"
DEDUP_NODE = ""
BOTTLENECK_LEVEL_INC = 1
IDLE_MARKER_THRESHOLD = 0.05
SIB_THRESH = 0.05
PERF_SKIP_WINDOW = 15
KEEP_UNREF = False
INAME = False
FUZZYINPUT = False
# handle kernels that don't support all events
unsup_pebs = (
("BR_MISP_RETIRED.ALL_BRANCHES:pp", (("hsw",), (3, 18), None)),
("MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM:pp", (("hsw",), (3, 18), None)),
("MEM_LOAD_UOPS_RETIRED.L3_MISS:pp", (("hsw",), (3, 18), None)),
)
ivb_ht_39 = (("ivb", "ivt"), (4, 1), (3, 9))
# uncomment if you removed commit 741a698f420c3 from kernel
#ivb_ht_39 = ((), None, None)
# both kernel bugs and first time a core was supported
# disable events if the kernel does not support them properly
# this does not handle backports (override with --force-events)
unsup_events = (
# commit 36bbb2f2988a29
("OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE", (("hsw", "hsx"), (3, 18), None)),
# commit 741a698f420c3 broke it, commit e979121b1b and later fixed it
("MEM_LOAD_UOPS_L*_HIT_RETIRED.*", ivb_ht_39),
("MEM_LOAD_UOPS_RETIRED.*", ivb_ht_39),
("MEM_LOAD_UOPS_L*_MISS_RETIRED.*", ivb_ht_39),
("MEM_UOPS_RETIRED.*", ivb_ht_39),
# commit 5e176213a6b2bc
# the event works, but it cannot put into the same group as
# any other CYCLE_ACTIVITY.* event. For now black list, but
# could also special case this in the group scheduler.
("CYCLE_ACTIVITY.STALLS_TOTAL", (("bdw", (4, 4), None))),
# commit 91f1b70582c62576
("CYCLE_ACTIVITY.*", (("bdw"), (4, 1), None)),
("L1D_PEND_MISS.PENDING", (("bdw"), (4, 1), None)),
# commit 6113af14c8
("CYCLE_ACTIVITY:CYCLES_LDM_PENDING", (("ivb", "ivt"), (3, 12), None)),
# commit f8378f52596477
("CYCLE_ACTIVITY.*", (("snb", "jkt"), (3, 9), None)),
# commit 0499bd867bd17c (ULT) or commit 3a632cb229bfb18 (other)
# technically most haswells are 3.10, but ULT is 3.11
("L1D_PEND_MISS.PENDING", (("hsw",), (3, 11), None)),
("L1D_PEND_MISS.PENDING", (("hsx"), (3, 10), None)),
# commit c420f19b9cdc
("CYCLE_ACTIVITY.*_L1D_PENDING", (("hsw", "hsx"), (4, 1), None)),
("CYCLE_ACTIVITY.CYCLES_NO_EXECUTE", (("hsw", "hsx"), (4, 1), None)),
# commit 3a632cb229b
("CYCLE_ACTIVITY.*", (("hsw", "hsx"), (3, 11), None)))
FIXED_BASE = 50
METRICS_BASE = 100
SPECIAL_END = 130
limited_counters_base = {
"instructions": FIXED_BASE + 0,
"cycles": FIXED_BASE + 1,
"ref-cycles": FIXED_BASE + 2,
"slots": FIXED_BASE + 3,
"cpu/slots/": FIXED_BASE + 3,
"cpu_core/slots/": FIXED_BASE + 3,
"topdown.slots": FIXED_BASE + 3,
"cpu_core/topdown-fe-bound/": METRICS_BASE + 0,
"cpu/topdown-fe-bound/": METRICS_BASE + 0,
"cpu_core/topdown-be-bound/": METRICS_BASE + 1,
"cpu/topdown-be-bound/": METRICS_BASE + 1,
"cpu_core/topdown-bad-spec/": METRICS_BASE + 2,
"cpu/topdown-bad-spec/": METRICS_BASE + 2,
"cpu_core/topdown-retiring/": METRICS_BASE + 3,
"cpu/topdown-retiring/": METRICS_BASE + 3,
"cpu_core/topdown-heavy-ops/": METRICS_BASE + 4,
"cpu/topdown-heavy-ops/": METRICS_BASE + 4,
"cpu/topdown-br-mispredict/": METRICS_BASE + 5,
"cpu_core/topdown-br-mispredict/": METRICS_BASE + 5,
"cpu_core/topdown-mem-bound/": METRICS_BASE + 6,
"cpu/topdown-mem-bound/": METRICS_BASE + 6,
"cpu/topdown-fetch-lat/": METRICS_BASE + 7,
"cpu_core/topdown-fetch-lat/": METRICS_BASE + 7,
"cpu/cycles-ct/": 2,
"cpu_core/cycles-ct/": 2,
}
promotable_limited = set((
"instructions",
"cycles",
"slots",
"cpu/slots/",
"cpu_core/slots/")
)
Undef = UVal("undef", 0)
# should clean those up, but they're all read-only after initialization anyways
global smt_mode
global output_numcpus
global metrics_own_group
global run_l1_parallel
class EventContextBase(object):
def __init__(self):
self.constraint_fixes = {}
self.constraint_patterns = []
self.errata_whitelist = []
self.outgroup_events = set()
self.sched_ignore_events = set()
self.require_pebs_events = set()
self.core_domains = set()
self.limited_counters = {}
self.limited_set = set()
self.fixed_events = set()
self.errata_events = {}
self.errata_warn_events = {}
self.limit4_events = set()
self.notfound_cache = {}
self.rmap_cache = {}
self.slots_available = False
self.standard_counters = tuple("") # type: Tuple[str, ...]
self.counters = 0
self.limit4_counters = ""
self.force_metrics = False
self.metrics_override = False
def init_emap(self, emap):
self.emap = emap
class EventContext(EventContextBase):
"""Event related context for a given target CPU."""
def __init__(self, pmu):
EventContextBase.__init__(self)
self.outgroup_events = set(["dummy", "duration_time", "msr/tsc/"])
self.core_domains = set(["Slots", "CoreClocks", "CoreMetric",
"Core_Execution", "Core_Clocks", "Core_Metric"])
self.limited_counters = dict(limited_counters_base)
self.limited_set = set(self.limited_counters.keys())
self.fixed_events = set([x for x in self.limited_counters
if FIXED_BASE <= self.limited_counters[x] <= SPECIAL_END])
if (pmu is None
or pmu not in cpu.counters
or pmu not in cpu.standard_counters
or pmu not in cpu.limit4_counters):
pmu = "cpu"
self.standard_counters = cpu.standard_counters[pmu]
self.counters = cpu.counters[pmu]
self.limit4_counters = cpu.limit4_counters[pmu]
smt_mode = False
def works(x):
return os.system(x + " >/dev/null 2>/dev/null") == 0
exists_cache = {} # type: Dict[str,bool]
def cached_exists(fn):
if fn in exists_cache:
return exists_cache[fn]
found = os.path.exists(fn)
exists_cache[fn] = found
return found
def safe_int(x):
try:
return int(x)
except ValueError:
return 0
def event_nocheck():
return args.import_ or args.no_check
class PerfFeatures(object):
"""Adapt to the quirks of various perf versions."""
def __init__(self, args):
pmu = "cpu"
if os.path.exists("/sys/devices/cpu_core"):
pmu = "cpu_core"
p = os.getenv("PERF")
self.perf = p if p else "perf"
ret = os.system(self.perf + " stat --log-fd 3 3>/dev/null true")
if ret:
# work around the insane perf setup on Debian derivates
# it fails if the perf isn't the same as the kernel
# look for the underlying perf installs, if any
# perf is compatible, so just pick the newest
if ret == 512:
l = sorted(glob.glob("/usr/lib/linux-tools*/perf"),
key=lambda x: [int(t) if t.isdigit() else t for t in re.split(r'(\d+)', x)])
if len(l) > 0:
self.perf = l[0]
ret = os.system(self.perf + " stat --log-fd 3 3>/dev/null true")
if ret:
sys.exit("perf binary is too old/not installed or perf is disabled in /proc/sys/kernel/perf_event_paranoid")
self.logfd_supported = ret == 0
self.supports_power = (
not args.no_uncore
and not args.force_hypervisor
and os.path.exists("/sys/devices/power/events/energy-cores"))
with os.popen(self.perf + " --version") as f:
v = f.readline().split()
perf_version = tuple(map(safe_int, v[2].split(".")[:2])) if len(v) >= 3 else (0,0)
self.supports_percore = (perf_version >= (5,7) or
works(self.perf + " stat --percore-show-thread true"))
dt = os.getenv("DURATION_TIME")
if dt:
self.supports_duration_time = int(dt)
else:
self.supports_duration_time = (perf_version >= (5,2) or
works(self.perf + " stat -e duration_time true"))
# guests don't support offcore response
if event_nocheck():
self.has_max_precise = True
self.max_precise = 3
else:
self.has_max_precise = os.path.exists("/sys/devices/%s/caps/max_precise" % pmu)
if self.has_max_precise:
self.max_precise = int(open("/sys/devices/%s/caps/max_precise" % pmu).read())
if args.exclusive and not args.print and not (perf_version >= (5,10) or works(self.perf + " stat -e '{branches,branches,branches,branches}:e' true")):
sys.exit("perf binary does not support :e exclusive modifier")
def kv_to_key(v):
return v[0] * 100 + v[1]
def unsup_event(e, table, kernel_version, min_kernel=None):
if ":" in e:
e = e[:e.find(":")]
for j in table:
if fnmatch(e, j[0]) and cpu.realcpu in j[1][0]:
break
else:
return False
v = j[1]
if v[1] and kernel_version < kv_to_key(v[1]):
if min_kernel:
min_kernel.append(v[1])
return True
if v[2] and kernel_version >= kv_to_key(v[2]):
return False
return False
def remove_qual(ev):
def get_group(prefix, m):
if m.group(1):
return prefix + m.group(1)
return prefix
return re.sub(r':(p?)[ku]+', lambda m: get_group("", m), re.sub(r'/(p?)[ku]+', lambda m: get_group("/", m), ev))
def limited_overflow(evlist, num):
class GenericCounters:
def __init__(self):
self.num = 0
def gen_overflow(c, gc, inc):
if c in promotable_limited:
gc.num += inc - 1
return False
return True
assigned = Counter([ectx.limited_counters[remove_qual(x)] for x in evlist if remove_qual(x) in ectx.limited_counters])
gc = GenericCounters()
return any([x > 1 and gen_overflow(k, gc, x) for k, x in assigned.items()]), gc.num
# we limit to 3 events because one could be taken up by the nmi watchdog
# and also there's some kernel issue that sometimes only 3 fit on ICL
LIMIT4_MAX_EVENTS = 3
# limited to first four counters on ICL+
def limit4_overflow(evlist):
return sum([remove_qual(x) in ectx.limit4_events for x in evlist]) > LIMIT4_MAX_EVENTS
def ismetric(x):
return x.startswith(("topdown-", "cpu_core/topdown-", "cpu/topdown-"))
resources = ("frontend=", "offcore_rsp=", "ldlat=", "in_tx_cp=", "cycles-ct")
def event_to_resource(ev):
for j in resources:
if remove_qual(j) in ev:
return j
return ""
def resource_split(evlist):
r = Counter(map(event_to_resource, evlist))
for j in sorted(r.keys()):
if j == "":
continue
if j == "offcore_rsp=":
if r[j] > 2:
return True
elif r[j] > 1:
return True
return False
def num_generic_counters(evset):
# XXX does not handle formulas having different u/k qualifiers, but we would need to fix the
# callers to be consistent to handle that
return len(evset - set(add_filter(ectx.fixed_events)) - ectx.fixed_events - ectx.outgroup_events - ectx.sched_ignore_events)
FORCE_SPLIT = 100
# Force metrics into own group
metrics_own_group = True
def is_slots(x):
return re.match(r'(cpu/|cpu_core/)?slots[,/]', x) is not None
def needed_counters(evlist):
evset = set(evlist)
num = num_generic_counters(evset)
metrics = [ismetric(x) for x in evlist]
slots = [is_slots(x) for x in evlist]
if any(metrics) or any(slots):
# slots must be first if metrics are present
if any(map(is_slots, evlist)) and not is_slots(evlist[0]):
debug_print("split for slots %s" % evlist)
return FORCE_SPLIT
# force split if there are other events.
if metrics_own_group and len(evlist) > sum(metrics) + 1:
debug_print("split for other events in topdown %s" % evlist)
return FORCE_SPLIT
# split if any resource is oversubscribed
if resource_split(evlist):
debug_print("resource split %s" % evlist)
return FORCE_SPLIT
evlist = list(compress(evlist, not_list(metrics)))
# force split if we overflow fixed or limited
l_over, numg = limited_overflow(evlist, num)
if l_over:
debug_print("split for limited overflow %s " % evlist)
return FORCE_SPLIT
if limit4_overflow(evlist):
debug_print("split for limit4 overflow %s" % evlist)
return FORCE_SPLIT
return num + numg
def event_group(evlist):
evlist = add_filter(evlist)
l = [] # type: List[str]
pgroup = False
for is_og, g in groupby(evlist, lambda x: x in ectx.outgroup_events):
gl = list(g)
slots_or_metric = [ismetric(x) or is_slots(x) for x in gl]
# keep the groups for slots or metric because of some kernel
# requirements and also some perf versions reorder slots with no group.
if is_og or (args.no_group and not any(slots_or_metric)):
l += gl
else:
e = ",".join(gl)
e = "{%s}" % e
if args.exclusive or args.pinned or args.weak or args.host or args.guest:
e += ":"
if args.weak:
e += "W"
if args.exclusive:
e += "e"
if args.guest:
e += "G"
if args.host:
e += "H"
elif args.pinned:
if all(slots_or_metric):
e += "D"
assert pgroup is False
assert is_slots(gl[0])
pgroup = True
else:
assert not any(slots_or_metric)
l.append(e)
return ",".join(l)
def exe_dir():
d = os.path.realpath(sys.argv[0])
d = os.path.dirname(d)
if d:
return d
return "."
def add_args(rest, *args):
a = [x for x in args if x not in rest]
return a + rest
def update_arg(arg, flag, sep, newval):
i = findprefix(arg, flag, "--")
if i >= 0:
if arg[i] == flag:
arg[i+1] = newval
else:
arg[i] = flag + sep + newval
return True
return False
def del_arg_val(arg, flag):
i = findprefix(arg, flag, "--")
del arg[i:i+2 if arg[i] == flag else i+1]
def init_args():
p = argparse.ArgumentParser(usage='toplev [options] perf-arguments',
description='''
Estimate on which part of the CPU pipeline a workload bottlenecks using the TopDown model.
The bottlenecks are expressed as a tree with different levels.
Requires a modern Intel CPU.
Examples:
toplev -l1 --single-thread program
measure single threaded program. On hyper threaded systems with
Skylake or older the system should be idle.
toplev -NB program
Measure program showing consolidated bottleneck view and extra
information associated with bottlenecks. Note this will multiplex
performance counters, so there may be measuring errors.
toplev -NB --run-sample program
Measure programing showing bottlenecks and extra nodes, and
automatically sample for the location of bottlenecks in a second
pass.
toplev --drilldown --only-bottleneck program
Rerun workload with minimal multiplexing until critical bottleneck
is found. Only print critical bottleneck
toplev -l3 --no-desc -I 100 -x, sleep X
measure whole system for X seconds every 100ms, outputting in CSV format.
toplev --all --core C0 taskset -c 0,1 program
Measure program running on core 0 with all nodes and metrics enables
toplev --all --xlsx x.xlsx -a sleep 10
Generate spreadsheet with full system measurement for 10 seconds
''', epilog='''
Other perf arguments allowed (see the perf documentation)
After -- perf arguments conflicting with toplev can be used.
Some caveats:
toplev defaults to measuring the full system and show data
for all CPUs. Use taskset to limit the workload to known CPUs if needed.
In some cases (idle system, single threaded workload) --single-thread
can also be used to get less output.
The lower levels of the measurement tree are less reliable
than the higher levels. They also rely on counter multi-plexing,
and can not run each equation in a single group, which can cause larger
measurement errors with non steady state workloads
(If you don't understand this terminology; it means measurements
in higher levels are less accurate and it works best with programs that primarily
do the same thing over and over)
If the program is very reproducible -- such as a simple kernel --
it is also possible to use --no-multiplex. In this case the
workload is rerun multiple times until all data is collected.
Only use with sleep if the workload is running in a steady state.
With the --drilldown option toplev can automatically remeasure the workload
with only the nodes needed to measure the particular bottlenecks
This also requires a reproducible or steady-state workload.
toplev needs a new enough perf tool and has specific requirements on
the kernel. See http://github.com/andikleen/pmu-tools/wiki/toplev-kernel-support.''',
formatter_class=argparse.RawDescriptionHelpFormatter)
g = p.add_argument_group('General operation')
g.add_argument('--interval', '-I', help='Measure every ms instead of only once',
type=int)
g.add_argument('--no-multiplex',
help='Do not multiplex, but run the workload multiple times as needed. '
'Requires reproducible workloads.',
action='store_true')
g.add_argument('--single-thread', '-S', help='Measure workload as single thread. Workload must run single threaded. '
'In SMT mode other thread must be idle.', action='store_true')
g.add_argument('--fast', '-F', help='Skip sanity checks to optimize CPU consumption', action='store_true')
g.add_argument('--import', help='Import specified perf stat output file instead of running perf. '
'Must be for same cpu, same arguments, same /proc/cpuinfo, same topology, unless overriden',
dest='import_')
g.add_argument('--subset', help="Process only a subset of the input file with --import. "
"Valid syntax: a-b. Process from seek offset a to b. b is optional. "
"x/n%% process x'th n percent slice. Starts counting at 0. Add - to process to end of input. "
"sample:n%% Sample each time stamp in input with n%% (0-100%%) probability. "
"toplev will automatically round to the next time stamp boundary.")
g.add_argument('--parallel',
help="Run toplev --import in parallel in N processes, or the system's number of CPUs if 0 is specified",
action='store_true')
g.add_argument('--pjobs', type=int, default=0,
help='Number of threads to run with parallel. Default is number of CPUs.')
g.add_argument('--gen-script', help='Generate script to collect perfmon information for --import later',
action='store_true')
g.add_argument('--script-record', help='Use perf stat record in script for faster recording or '
'import generated perf.data (requires new perf)', action='store_true')
g.add_argument('--drilldown', help='Automatically rerun to get more details on bottleneck', action='store_true')
g.add_argument('--show-cpu', help='Print current CPU type and exit',
action='store_true')
g = p.add_argument_group('Measurement filtering')
g.add_argument('--kernel', help='Only measure kernel code', action='store_true')
g.add_argument('--user', help='Only measure user code', action='store_true')
g.add_argument('--cpu', '-C', help=argparse.SUPPRESS)
g.add_argument('--pid', '-p', help=argparse.SUPPRESS)
g.add_argument('--core', help='Limit output to cores. Comma list of Sx-Cx-Tx. All parts optional.')
g.add_argument('--no-aggr', '-A', help='Measure every CPU', action='store_true')
g.add_argument('--cputype', help='Limit to hybrid cpu type (atom or core)', choices=['atom', 'core'])
g = p.add_argument_group('Select events')
g.add_argument('--level', '-l', help='Measure upto level N (max 6)',
type=int, default=-1)
g.add_argument('--metrics', '-m', help="Print extra metrics", action='store_true')
g.add_argument('--sw', help="Measure perf Linux metrics", action='store_true')
g.add_argument('--no-util', help="Do not measure CPU utilization", action='store_true')
g.add_argument('--tsx', help="Measure TSX metrics", action='store_true')
g.add_argument('--all', help="Measure everything available", action='store_true')
g.add_argument('--frequency', help="Measure frequency", action='store_true')
g.add_argument('--power', help='Display power metrics', action='store_true')
g.add_argument('--nodes', help='Include or exclude nodes (with + to add, -|^ to remove, '
'comma separated list, wildcards allowed, add * to include all children/siblings, '
'add /level to specify highest level node to match, '
'add ^ to match related siblings and metrics, '
'start with ! to only include specified nodes)')
g.add_argument('--metric-group', help='Add (+) or remove (-|^) metric groups of metrics, '
'comma separated list from --list-metric-groups.', default=None)
g.add_argument('--areas', help='Add specific areas. Comma separate list, wildcards allowed')
g.add_argument('--pinned', help='Run topdown metrics (on ICL+) pinned', action='store_true')
g.add_argument('--exclusive', help='Use exclusive groups. Requires new kernel and new perf', action='store_true')
g.add_argument('--host', action='store_true', help="Count host only")
g.add_argument('--guest', action='store_true', help="Count guest only")
g.add_argument('--weak', action='store_true', help="Use weak groups to work around scheduling problems")
g.add_argument('--thread',
help="Enable per thread SMT measurements for pre-ICL, at the cost of more multiplexing.",
action='store_true')
g.add_argument('--aux', help='Enable auxilliary hierarchy nodes on some models. '
'Auxiliary nodes offer alternate views of the same bottleneck component, which can impact observed bottleneck percentage totals',
action='store_true')
g.add_argument('--node-metrics', '-N', help='Add metrics related to selected nodes, but hide when node is not crossing threshold',
action='store_true')
g.add_argument('--bottlenecks', '-B', help='Show bottlenecks view of Bottleneck metrics. Use -l0 to disable standard topdown view.', action='store_true')
g = p.add_argument_group('Model tunables')
g.add_argument('--fp16', help='Enable FP16 support in some models', action='store_true')
g.add_argument('--hbm-only', help='Enable HBM only mode in some models', action='store_true')
g.add_argument('--ret-latency', help='Read JSON file with Retire latencies. Can specify path inside JSON file with :, for example foo.json:6-cores:MEAN')
g = p.add_argument_group('Query nodes')
g.add_argument('--list-metrics', help='List all metrics. Can be followed by prefixes to limit, ^ for full match',
action='store_true')
g.add_argument('--list-nodes', help='List all nodes. Can be followed by prefixes to limit, ^ for full match',
action='store_true')
g.add_argument('--list-metric-groups', help='List metric groups.', action='store_true')
g.add_argument('--list-all', help='List every supported node/metric/metricgroup. Can be followed by prefixes to limit, ^ for full match.',
action='store_true')
g.add_argument('--describe', help='Print full descriptions for listed node prefixes. Add ^ to require full match.', action='store_true')
g = p.add_argument_group('Workarounds')
g.add_argument('--no-group', help='Dont use groups', action='store_true')
g.add_argument('--force-events', help='Assume kernel supports all events. May give wrong results.',
action='store_true')
g.add_argument('--ignore-errata', help='Do not disable events with errata', action='store_true', default=True)
g.add_argument('--handle-errata', help='Disable events with errata', action='store_true')
g.add_argument('--reserved-counters', default=0, help='Assume N generic counters are used elsewhere', type=int)
g = p.add_argument_group('Filtering output')
g.add_argument('--only-bottleneck', help='Only print topdown tree bottleneck and associated metrics (unless overriden with other options like --nodes or --bottleneck)', action='store_true')
g.add_argument('--verbose', '-v', help='Print all results even when below threshold or exceeding boundaries. '
'Note this can result in bogus values, as the TopDown methodology relies on thresholds '
'to correctly characterize workloads. Values not crossing threshold are marked with <.',
action='store_true')
g = p.add_argument_group('Output format')
g.add_argument('--per-core', help='Aggregate output per core', action='store_true')
g.add_argument('--per-socket', help='Aggregate output per socket', action='store_true')
g.add_argument('--per-thread', help='Aggregate output per CPU thread', action='store_true')
g.add_argument('--global', help='Aggregate output for all CPUs', action='store_true', dest='global_')
g.add_argument('--no-desc', help='Do not print event descriptions', action='store_true')
g.add_argument('--desc', help='Force event descriptions', action='store_true')
g.add_argument('--csv', '-x', help='Enable CSV mode with specified delimeter')
g.add_argument('--output', '-o', help='Set output file')
g.add_argument('--split-output', help='Generate multiple output files, one for each specified '
'aggregation option (with -o)',
action='store_true')
g.add_argument('--graph', help='Automatically graph interval output with tl-barplot.py',
action='store_true')
g.add_argument("--graph-cpu", help="CPU to graph using --graph")
g.add_argument('--title', help='Set title of graph')
g.add_argument('-q', '--quiet', help='Avoid unnecessary status output', action='store_true')
g.add_argument('--long-desc', help='Print long descriptions instead of abbreviated ones.',
action='store_true')
g.add_argument('--columns', help='Print CPU output in multiple columns for each node', action='store_true')
g.add_argument('--json', help='Print output in JSON format for Chrome about://tracing', action='store_true')
g.add_argument('--summary', help='Print summary at the end. Only useful with -I', action='store_true')
g.add_argument('--no-area', help='Hide area column', action='store_true')
g.add_argument('--perf-output', help='Save perf stat output in specified file')
g.add_argument('--perf-summary', help='Save summarized perf stat output in specified file')
g.add_argument('--no-perf', help=argparse.SUPPRESS, action='store_true') # noop, for compatibility
g.add_argument('--perf', help='Print perf command line', action='store_true')
g.add_argument('--print', help="Only print perf command line. Don't run", action='store_true')
g.add_argument('--idle-threshold', help="Hide idle CPUs (default <5%% of busiest if not CSV, specify percent)",
default=None, type=float)
g.add_argument('--no-output', help="Don't print computed output. Does not affect --summary.", action='store_true')
g.add_argument('--no-mux', help="Don't print mux statistics", action="store_true")
g.add_argument('--abbrev', help="Abbreviate node names in output", action="store_true")
g.add_argument('--no-sort', help="Don't sort output by Metric group", action="store_true")
g = p.add_argument_group('Environment')
g.add_argument('--force-cpu', help='Force CPU type', choices=[x[0] for x in known_cpus])
g.add_argument('--force-topology', metavar='findsysoutput', help='Use specified topology file (find /sys/devices)')
g.add_argument('--force-cpuinfo', metavar='cpuinfo', help='Use specified cpuinfo file (/proc/cpuinfo)')
g.add_argument('--force-hypervisor', help='Assume running under hypervisor (no uncore, no offcore, no PEBS)',
action='store_true')
g.add_argument('--no-uncore', help='Disable uncore events', action='store_true')
g.add_argument('--no-check', help='Do not check that PMU units exist', action='store_true')
g = p.add_argument_group('Additional information')
g.add_argument('--print-group', '-g', help='Print event group assignments',
action='store_true')
g.add_argument('--raw', help="Print raw values", action='store_true')
g.add_argument('--valcsv', '-V', help='Write raw counter values into CSV file')
g.add_argument('--stats', help='Show statistics on what events counted', action='store_true')
g = p.add_argument_group('xlsx output')
g.add_argument('--xlsx', help='Generate xlsx spreadsheet output with data for '
'socket/global/thread/core/summary/raw views with 1s interval. '
'Add --single-thread to only get program output.')
g.add_argument('--set-xlsx', help=argparse.SUPPRESS, action='store_true') # set arguments for xlsx only
g.add_argument('--xnormalize', help='Add extra sheets with normalized data in xlsx files', action='store_true')
g.add_argument('--xchart', help='Chart data in xlsx files', action='store_true')
g.add_argument('--keep', help='Keep temporary files', action='store_true')
g.add_argument('--xkeep', dest='keep', action='store_true', help=argparse.SUPPRESS)
g = p.add_argument_group('Sampling')
g.add_argument('--show-sample', help='Show command line to rerun workload with sampling', action='store_true')
g.add_argument('--run-sample', help='Automatically rerun workload with sampling', action='store_true')
g.add_argument('--sample-args', help='Extra arguments to pass to perf record for sampling. Use + to specify -',
default='-g')
g.add_argument('--sample-repeat',
help='Repeat measurement and sampling N times. This interleaves counting and sampling. '
'Useful for background collection with -a sleep X.', type=int)
g.add_argument('--sample-basename', help='Base name of sample perf.data files', default="perf.data")
g.add_argument('-d', help=argparse.SUPPRESS, action='help') # prevent passing this to perf
p.add_argument('--version', help=argparse.SUPPRESS, action='store_true')
p.add_argument('--debug', help=argparse.SUPPRESS, action='store_true') # enable scheduler debugging
p.add_argument('--dfilter', help=argparse.SUPPRESS, action='append')
p.add_argument('--repl', action='store_true', help=argparse.SUPPRESS) # start python repl after initialization
p.add_argument('--filterquals', help=argparse.SUPPRESS, action='store_true') # remove events not supported by perf
p.add_argument('--setvar', help=argparse.SUPPRESS, action='append') # set env variable (for test suite iterating options)
p.add_argument('--tune', nargs='+', help=argparse.SUPPRESS) # override global variables with python expression
p.add_argument('--tune-model', nargs='+', help=argparse.SUPPRESS) # override global variables late with python expression
p.add_argument('--force-bn', action='append', help=argparse.SUPPRESS) # force bottleneck for testing
p.add_argument('--no-json-header', action='store_true', help=argparse.SUPPRESS) # no [ for json
p.add_argument('--no-json-footer', action='store_true', help=argparse.SUPPRESS) # no ] for json
p.add_argument('--no-csv-header', action='store_true', help=argparse.SUPPRESS) # no header/version for CSV
p.add_argument('--no-csv-footer', action='store_true', help=argparse.SUPPRESS) # no version for CSV
p.add_argument('--no-version', action='store_true', help="Don't print version")
args, rest = p.parse_known_args()
io_set_args(args)
if args.setvar:
for j in args.setvar:
l = j.split("=")
os.environ[l[0]] = l[1]
return args, rest
def output_count():
return args.per_core + args.global_ + args.per_thread + args.per_socket
def multi_output():
return output_count() > 1
def open_output_files(args):
if args.valcsv:
try:
args.valcsv = flex_open_w(args.valcsv)
except IOError as e:
sys.exit("Cannot open valcsv file %s: %s" % (args.valcsv, e))
if args.perf_output:
try:
args.perf_output = flex_open_w(args.perf_output)
except IOError as e:
sys.exit("Cannot open perf output file %s: %s" % (args.perf_output, e))
def init_xlsx(args):
args.set_xlsx = True
if args.output:
sys.exit("-o / --output not allowed with --xlsx")
if args.valcsv:
sys.exit("--valcsv not allowed with --xlsx")
if args.perf_output:
sys.exit("--perf-output not allowed with --xlsx")
if args.csv:
sys.exit("-c / --csv not allowed with --xlsx")
if args.thread:
sys.exit("--thread not supported with --xlsx") # XXX
if not args.xlsx.endswith(".xlsx"):
sys.exit("--xlsx must end in .xlsx")
xlsx_base = re.sub(r'\.xlsx$', '.csv', args.xlsx)
args.valcsv = re.sub(r'\.csv$', '-valcsv.csv', xlsx_base)
args.perf_output = re.sub(r'\.csv$', '-perf.csv', xlsx_base)
args.output = xlsx_base
if args.xchart:
args.xnormalize = True
args.verbose = True
forced_per_socket = False
forced_per_core = False
def set_xlsx(args):
if not args.interval:
args.interval = 1000
args.csv = ','
if args.xlsx:
args.summary = True
if not args.single_thread:
args.per_thread = True
args.split_output = True
global forced_per_socket
if args.per_socket:
forced_per_socket = True
global forced_per_core
if args.per_core:
forced_per_core = True
args.per_socket = True
args.per_core = True
args.no_aggr = True
args.global_ = True
def do_xlsx(env, args):
cmd = "%s %s/tl-xlsx.py --valcsv '%s' --perf '%s' --cpuinfo '%s' " % (
sys.executable,
exe_dir(),
args.valcsv.name,
args.perf_output.name,
env.cpuinfo if env.cpuinfo else "/proc/cpuinfo")
if args.single_thread:
names = ["program"]
files = [args.output if args.output else "program"]
else:
names = ((["socket"] if args.per_socket else []) +
(["core"] if args.per_core else []) +
["global", "thread"])
files = [tl_output.output_name(args.output, p) for p in names]
extrafiles = []
extranames = []
charts = []
if args.xnormalize:
for j, n in zip(files, names):
nname = j.replace(".csv", "-norm.csv")
ncmd = "%s %s/interval-normalize.py --normalize-cpu --error-exit < '%s' > '%s'" % (
sys.executable,
exe_dir(),
j,
nname)
if not args.quiet:
print(ncmd)
ret = os.system(ncmd)
if ret:
warn("interval-normalize failed: %d" % ret)
return ret
extrafiles.append(nname)
extranames.append("n" + n)
if args.xchart:
charts.append("n" + n)
cmd += " ".join(["--%s '%s'" % (n, f) for n, f in zip(names, files)])
cmd += " " + " ".join(["--add '%s' '%s'" % (f, n) for n, f in zip(extranames, extrafiles)])
cmd += " " + " ".join(["--chart '%s'" % f for f in charts])
cmd += " '%s'" % args.xlsx
if not args.quiet:
print(cmd)
ret = os.system(cmd)
if not args.keep:
for fn in files + extrafiles:
os.remove(fn)
return ret
def gentmp(o, cpu):
o = re.sub(r'\.(xz|gz)', '', o)
if o.endswith(".csv"):
return o.replace(".csv", "-cpu%d.csv" % cpu)
return o + "-cpu%d" % cpu
def output_to_tmp(arg, outfn, args):
if not args.output or args.xlsx:
arg.insert(1, "-o" + outfn)
elif update_arg(arg, "--output", "=", outfn):
pass
elif update_arg(arg, "-o", "", outfn):
pass
else:
# does not handle -o combined with other one letter options
sys.exit("Use plain -o / --output argument with --parallel")
def merge_files(files, outf, args):
for j in files:
tl_output.catrmfile(j, outf, args.keep)
# run multiple subset toplevs in parallel and merge the results
def run_parallel(args, env):
procs = []
pofns = []
valfns = []
sums = []
targ = copy(sys.argv)
del targ[targ.index("--parallel")]
ichunk = os.path.getsize(args.import_) / args.pjobs
fileoff = 0
for cpu in range(args.pjobs):
arg = copy(targ)
if args.xlsx:
del_arg_val(arg, "--xlsx")
arg = [arg[0], "--set-xlsx", "--perf-output=X", "--valcsv=X"] + arg[1:]
outfn = gentmp(args.output if args.output else "toplevo%d" % os.getpid(), cpu)
output_to_tmp(arg, outfn, args)
if args.perf_output or args.xlsx:
pofn = gentmp(args.perf_output if args.perf_output else "toplevp", cpu)
update_arg(arg, "--perf-output", "=", pofn)
pofns.append(pofn)
if args.valcsv or args.xlsx:
valfn = gentmp(args.valcsv if args.valcsv else "toplevv", cpu)
update_arg(arg, "--valcsv", "=", valfn)
valfns.append(valfn)
end = ""
if cpu < args.pjobs-1:
end = "%d" % (fileoff + ichunk)
arg.insert(1, ("--subset=%d-" % fileoff) + end)
fileoff += ichunk
if args.json and args.pjobs > 1:
if cpu > 0:
arg.insert(1, "--no-json-header")
if cpu < args.pjobs - 1 or args.summary:
arg.insert(1, "--no-json-footer")
sumfn = None
if args.summary:
del arg[arg.index("--summary")]
sumfn = gentmp("toplevs%d" % os.getpid(), cpu)
arg.insert(1, "--perf-summary=" + sumfn)
sums.append(sumfn)
if args.pjobs > 1:
if cpu > 0:
arg.insert(1, "--no-csv-header")
if cpu < args.pjobs - 1 or args.summary:
arg.insert(1, "--no-csv-footer")
if not args.quiet:
print(" ".join(arg))
pp = subprocess.Popen(arg, stdout=subprocess.PIPE, **popentext) # type: ignore
procs.append((pp, outfn))
if args.xlsx:
init_xlsx(args)
set_xlsx(args)
logfiles, logf = tl_output.open_all_logfiles(args, args.output)
for p in procs:
ret = p[0].wait()
if ret:
sys.exit("Subprocess toplev failed %d" % ret)
tl_output.catrmoutput(p[1], logf, logfiles, args.keep)
ret = 0
if sums:
cmd = [sys.executable, exe_dir() + "/interval-merge.py"] + sums
if not args.quiet:
print(" ".join(cmd))
inp = subprocess.Popen(cmd, stdout=subprocess.PIPE)
outfn = "toplevm%d" % os.getpid()
output_to_tmp(targ, outfn, args)
if args.xlsx:
del_arg_val(targ, "--xlsx")
targ.insert(1, "--set-xlsx")
if args.perf_output:
del_arg_val(targ, "--perf-output")
if args.valcsv:
del_arg_val(targ, "--valcsv")
update_arg(targ, "--import", "=", "/dev/stdin")
targ.insert(1, "--no-output")
if args.json:
targ.insert(1, "--no-json-header")
else:
targ.insert(1, "--no-csv-header")
if not args.quiet:
print(" ".join(targ))
outp = subprocess.Popen(targ, stdin=inp.stdout)
ret = inp.wait()
if ret:
sys.exit("interval-merge failed")
ret = outp.wait()
if ret:
sys.exit("summary toplev failed")
tl_output.catrmoutput(outfn, logf, logfiles, args.keep)
if not args.keep:
for j in sums:
os.remove(j)
open_output_files(args)
merge_files(valfns, args.valcsv, args)
merge_files(pofns, args.perf_output, args)
if args.xlsx:
ret = do_xlsx(env, args)
# XXX graph
return ret
def init_idle_threshold(args):
if args.idle_threshold:
idle_threshold = args.idle_threshold / 100.
elif args.csv or args.xlsx or args.set_xlsx: # not for args.graph
idle_threshold = 0 # avoid breaking programs that rely on the CSV output
else:
idle_threshold = 0.05
return idle_threshold
ret_latency = None
def setup_retlatency(args):
global ret_latency
if args.ret_latency:
try:
l = args.ret_latency.split(":")
ret_latency = json.load(open(l[0]))["Data"]
except IOError:
sys.exit("Cannot open %s" % l[0])
except KeyError:
sys.exit("retlat file has unparseable format")
else:
load_default_retlat()
def lookup_retlat(event):
if ret_latency is None:
warn_once("No --ret-latency for %s" % event)
return 1.0
try:
l = args.ret_latency.split(":") if args.ret_latency else ()
o = ret_latency[event]
return o["MEAN"]
for k in l[1:]:
if k in o[0] or k.upper() in o[0]:
o = o[0][k]
# XXX check for remaining arguments
break
if k not in o[1] and k.upper() in o[1]:
k = k.upper()
o = o[1][k]
if type(o) is list:
o = o[0]["MEAN"]
return o
except KeyError as e:
warn_once("bad ret latency key %s" % e)
return 1.0 # XXX
def gen_cpu_name(cpu):
if cpu == "simple":
c = event_download.get_cpustr()
if not c.startswith("GenuineIntel"): # fix github runner
c = "GenuineIntel-6-4E"
return c
if cpu == "sprmax":
cpu = "spr"
for j in known_cpus:
if cpu == j[0]:
if len(j[1]) > 0 and isinstance(j[1][0], tuple):
return "GenuineIntel-6-%02X-%d" % j[1][0]
else:
if len(j[1]) == 0:
sys.exit("Cannot generate cpu name for %s" % cpu)
if j[1][0] in eventlist_alias:
return eventlist_alias[j[1][0]] # type: ignore
return "GenuineIntel-6-%02X" % j[1][0] # type: ignore
sys.exit("Unknown cpu %s" % cpu)
return None
def update_args(args, env):
if args.force_cpu:
env.forcecpu = args.force_cpu
cpuname = gen_cpu_name(args.force_cpu)
if not os.getenv("EVENTMAP"):
os.environ["EVENTMAP"] = cpuname
if not os.getenv("UNCORE"):
os.environ["UNCORE"] = cpuname
if args.force_topology:
if not os.getenv("TOPOLOGY"):
os.environ["TOPOLOGY"] = args.force_topology
ocperf.topology = None # force reread
if args.force_cpuinfo:
env.cpuinfo = args.force_cpuinfo
if args.force_hypervisor:
env.hypervisor = True
if args.sample_repeat:
args.run_sample = True
if args.handle_errata:
args.ignore_errata = False
if args.exclusive and args.pinned:
sys.exit("--exclusive and --pinned cannot be combined")
def handle_parallel(args, env):
if args.parallel:
if not args.import_:
sys.exit("--parallel requires --import")
if args.import_.endswith(".xz") or args.import_.endswith(".gz"):
sys.exit("Uncompress input file first") # XXX
if args.perf_summary:
sys.exit("--parallel does not support --perf-summary") # XXX
if args.subset:
# XXX support sample
sys.exit("--parallel does not support --subset")
if args.json and multi_output() and not args.split_output:
sys.exit("--parallel does not support multi-output --json without --split-output")
if args.graph:
sys.exit("--parallel does not support --graph") # XXX
if args.pjobs == 0:
import multiprocessing
args.pjobs = multiprocessing.cpu_count()
sys.exit(run_parallel(args, env))
def handle_rest(args, rest):
if rest[:1] == ["--"]:
rest = rest[1:]
if args.cpu:
rest = ["--cpu", args.cpu] + rest
if args.pid:
rest = ["--pid", args.pid] + rest
if args.csv and len(args.csv) != 1:
sys.exit("--csv/-x argument can be only a single character")
if args.xlsx:
init_xlsx(args)
if args.set_xlsx:
set_xlsx(args)
return rest
def update_args2(args):
if args.perf_summary:
try:
args.perf_summary = flex_open_w(args.perf_summary)
except IOError as e:
sys.exit("Cannot open perf summary file %s: %s" % (args.perf_summary, e))
# XXX force no_uncore because the resulting file cannot be imported otherwise?
if args.all:
args.tsx = True
args.power = True
args.sw = True
args.metrics = True
args.frequency = True
args.level = 6
if args.only_bottleneck:
args.quiet = True
args.no_version = True
def handle_graph(args):
graphp = None
if args.graph:
if not args.interval:
args.interval = 100
extra = ""
if args.title:
title = args.title
else:
title = "cpu %s" % (args.graph_cpu if args.graph_cpu else 0)
extra += '--title "' + title + '" '
if args.split_output:
sys.exit("--split-output not allowed with --graph")
if args.output:
extra += '--output "' + args.output + '" '
if args.graph_cpu:
extra += "--cpu " + args.graph_cpu + " "
args.csv = ','
cmd = "%s %s/tl-barplot.py %s /dev/stdin" % (sys.executable, exe_dir(), extra)
if not args.quiet:
print(cmd)
graphp = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, **popentext) # type: ignore
args.output = graphp.stdin
return graphp
def init_ring_filter(args):
ring_filter = ""
if args.kernel and not args.user:
ring_filter = 'k'
if args.user and not args.kernel:
ring_filter = 'u'
return ring_filter
MAX_ERROR = 0.05
def check_ratio(l):
if args.verbose:
return True
return 0 - MAX_ERROR < l < 1 + MAX_ERROR
def update_args_cpu(args, pversion):
if args.level < 0:
if args.bottlenecks:
args.level = 4
else:
args.level = 2 if any([x >= 8 for x in cpu.counters.values()]) else 1
if args.show_cpu:
print("%s %s %s" % (cpu.true_name, cpu.pmu_name, cpu.name))
sys.exit(0)
desired_cpu = args.force_cpu if args.force_cpu else cpu.model
if desired_cpu in eventlist_alias:
r = eventlist_alias[desired_cpu]
if not os.getenv("EVENTMAP"):
os.environ["EVENTMAP"] = r
if not os.getenv("UNCORE"):
os.environ["UNCORE"] = r
if cpu.pmu_name and cpu.pmu_name.startswith("generic") and not args.quiet:
print("warning: kernel is in architectural mode and might mismeasure events", file=sys.stderr)
print("Consider a kernel update. See https://github.com/andikleen/pmu-tools/wiki/toplev-kernel-support", file=sys.stderr)
if cpu.cpu in hybrid_cpus:
sys.exit("Hybrid %s not supported in architectural mode" % cpu.cpu)
if args.xlsx and not forced_per_socket and cpu.sockets == 1:
args.per_socket = False
if args.xlsx and not forced_per_core and cpu.threads == 1:
args.per_core = False
if cpu.hypervisor:
feat.max_precise = 0
feat.has_max_precise = True
if not pversion.has_uncore_expansion:
# XXX reenable power
args.no_uncore = True
if cpu.hypervisor or args.no_uncore:
feat.supports_power = False
def print_perf(r):
if not (args.perf or args.print):
return
l = ["'" + x + "'" if x.find("{") >= 0 else x for x in r]
l = [x.replace(";", "\\;") for x in l]
i = l.index('--log-fd')
del l[i:i+2]
s = " ".join(l)
if len(l) > 2:
s = s.replace("},", "},\n")
print(s)
sys.stdout.flush()
def gen_script(r):
print("#!/bin/sh")
print("# Generated from 'toplev " + " ".join(sys.argv) + " for CPU " + cpu.cpu)
print("# Show output with toplev.py " +
" ".join([x for x in sys.argv if x != "--gen-script"]) +
" --import toplev_perf%s --force-cpuinfo toplev_cpuinfo --force-topology toplev_topology --force-cpu " % (
".data" if args.script_record else ".csv") + cpu.cpu)
print("# print until Ctrl-C or run with command on command line (e.g. -a -I 1000 sleep 10)")
print("# override output file names with OUT=... script (default toplev_...)")
if not args.script_record:
print("# enable compression with POSTFIX=.xz script")
print("OUT=${OUT:-toplev}")
print("PERF=${PERF:-perf}")
print("find /sys/devices > ${OUT}_topology")
print("cat /proc/cpuinfo > ${OUT}_cpuinfo")
r[0] = "$PERF"
i = r.index('--log-fd')
r[i] = "-o"
if args.script_record:
r[i + 1] = "${OUT}_perf.data"
i = r.index("stat")
r[i] = "stat record --quiet"
else:
r[i + 1] = "${OUT}_perf.csv${POSTFIX}"
i = r.index('-x;')
if args.script_record:
del r[i]
else:
r[i] = '-x\\;'
i = r.index('-e')
r[i+1] = "'" + r[i+1] + "'"
print(" ".join(r + ['"$@"']))
class PerfRun(object):
"""Control a perf subprocess."""
def __init__(self):
self.skip_to_next_ts = False
self.end_seek_offset = None
self.sample_prob = None
self.skip_line = False
self.perf = None
self.offset = None
def handle_inputsubset(self, f, iss):
m = re.match(r'(\d+)-?(\d+)?$', iss)
if m:
off = int(m.group(1))
f.seek(off)
if m.group(2):
self.end_seek_offset = int(m.group(2)) + 1
if off:
self.skip_to_next_ts = True
self.skip_line = True
return
m = re.match(r'(\d+)/([0-9.]+)%(-)?$', iss)
if m:
f.seek(0, 2)
size = f.tell()
chunk = int(size * (float(m.group(2)) / 100.))
nth = int(m.group(1))
if (nth+1)*chunk > size:
sys.exit("--subset %s out of range" % iss)
f.seek(chunk * nth)
if m.group(3) is None:
self.end_seek_offset = chunk * (1+nth) + 1
if chunk * nth != 0:
self.skip_to_next_ts = True
self.skip_line = True
return
m = re.match('sample:([0-9.]+)%?$', iss)
if m:
self.sample_prob = float(m.group(1)) / 100.
self.random = random.Random()
s = os.getenv("TLSEED")
if s:
self.random.seed(int(s))
self.sampling = False
return
sys.exit("Unparseable --subset %s" % iss)
def skip_input(self):
if self.skip_to_next_ts:
return True
if self.sample_prob:
return not self.sampling
return False
# must be stored before reading the line
def store_offset(self):
if self.end_seek_offset:
self.offset = self.inputf.tell() # type: ignore
def skip_first_line(self):
if self.skip_line:
self.skip_line = False
return True
return False
def next_timestamp(self):
if self.end_seek_offset:
if self.end_seek_offset <= self.offset:
return True
self.skip_to_next_ts = False
if self.sample_prob:
r = self.random.random()
self.sampling = r < self.sample_prob
return False
def execute(self, r):
if args.import_:
print_perf(r)
if args.script_record:
self.perf = subprocess.Popen([feat.perf, "stat", "report", "-x;", "-i", args.import_], stderr=subprocess.PIPE, **popentext) # type: ignore
return self.perf.stderr
self.perf = None
try:
f = flex_open_r(args.import_)
except IOError as e:
sys.exit("Cannot open file %s: %s" % (args.import_, e))
if args.subset:
try:
self.handle_inputsubset(f, args.subset)
except OSError:
sys.exit("--subset not supported on compressed or unseekable files.")
except io.UnsupportedOperation:
sys.exit("--subset not supported on compressed or unseekable files.")
self.inputf = f
return f
if args.gen_script:
gen_script(r)
sys.exit(0)
outp, inp = pty.openpty()
if 'set_inheritable' in os.__dict__:
os.set_inheritable(inp, True)
n = r.index("--log-fd")
r[n + 1] = "%d" % (inp)
print_perf(r)
if args.print:
sys.exit(0)
self.perf = subprocess.Popen(r, close_fds=False) # type: ignore
os.close(inp)
return os.fdopen(outp, 'r')
def wait(self):
ret = 0
if self.perf:
ret = self.perf.wait()
self.perf = None
return ret
def __del__(self):
if self.perf:
self.perf.kill()
def separator(x):
if ":" in x:
return ""
if x.startswith("cpu"):
return ""
return ":"
def add_filter_event(e):
if "/" in e and not e.startswith("cpu"):
return e
if e == "dummy" or e == "emulation-faults" or e == "duration_time":
return e
s = separator(e)
if not e.endswith(s + args.ring_filter):
return e + s + args.ring_filter
return e
def add_filter(s):
if args.ring_filter:
s = list(map(add_filter_event, s))
return s
def is_cpu_event(s):
return re.match(r'cpu(_atom|_core)?/', s) is not None
def initialize_event(name, i, e):
if "." in name or "_" in name and name not in non_json_events:
eo = e.output(noname=not INAME, noexplode=True)
ectx.emap.update_event(eo, e)
ectx.emap.update_event(remove_qual(eo), e) # XXX
if (e.counter not in ectx.standard_counters and not name.startswith("UNC_")):
if e.counter.startswith("Fixed"):
ectx.limited_counters[i] = int(e.counter.split()[2]) + FIXED_BASE
ectx.fixed_events.add(i)
elif e.counter.isdecimal() and int(e.counter) >= 32:
ectx.limited_counters[i] = int(e.counter) - 32 + FIXED_BASE
ectx.fixed_events.add(i)
else:
# for now use the first counter only to simplify
# the assignment. This is sufficient for current
# CPUs
ectx.limited_counters[i] = int(e.counter.split(",")[0])
ectx.limited_set.add(i)
nameu = e.name.upper()
if nameu in ectx.constraint_fixes:
e.counter = ectx.constraint_fixes[nameu]
for k, v in ectx.constraint_patterns:
if nameu.startswith(k):
e.counter = v
if e.counter == ectx.limit4_counters or eo.endswith("/p"):
ectx.limit4_events.add(i)
if e.errata and e.errata != "0" and e.errata != "null":
if e.errata not in ectx.errata_whitelist:
ectx.errata_events[name] = e.errata
else:
ectx.errata_warn_events[name] = e.errata
if ('pebs' in e.__dict__ and e.pebs == 2) or name.startswith("FRONTEND_"):
ectx.require_pebs_events.add(name)
else:
non_json_events.add(i)
valid_events.add_event(i)
if INAME:
valid_events.add_event(ocperf.gen_name(re.sub(r':.*','', name), False)) # XXX sup, handle :... uniquely
if not is_cpu_event(i) and i not in ectx.fixed_events:
if not i.startswith("uncore"):
valid_events.add_event(i)
if i.startswith("msr/"):
ectx.sched_ignore_events.add(i)
else:
ectx.outgroup_events.add(add_filter_event(i))
def raw_event(i, name="", period=False, initialize=False):
e = None
orig_i = i
if i == "cycles" and (cpu.cpu in hybrid_cpus or cached_exists("/sys/devices/cpu_core")):
i = "cpu_clk_unhalted.thread"
if "." in i or "_" in i and i not in non_json_events:
if not cpu.ht:
i = i.replace(":percore", "")
extramsg = [] # type: List[str]
e = ectx.emap.getevent(i, nocheck=event_nocheck(), extramsg=extramsg)
if e is None:
if i not in ectx.notfound_cache:
ectx.notfound_cache[i] = extramsg[0]
if not args.quiet:
print("%s %s" % (i, extramsg[0]), file=sys.stderr)
return "dummy"
if has(e, 'perfqual') and not cached_exists("/sys/devices/%s/format/%s" % (ectx.emap.pmu, e.perfqual)):
if not args.quiet:
print("%s event not supported in hypervisor or architectural mode" % i, file=sys.stderr)
return "dummy"
if re.match("^[0-9]", name):
name = "T" + name
if args.filterquals:
e.filter_qual()
i = e.output(noname=not INAME, name=name, period=period, noexplode=True)
if not ectx.force_metrics:
m = re.search(r'(topdown-[a-z-]+)', i)
if m and not cached_exists("/sys/devices/%s/events/%s" % (ectx.emap.pmu, m.group(1))):
if not args.quiet:
print("%s event not supported in sysfs" % m.group(1))
i = "dummy"
if initialize:
initialize_event(orig_i, i, e)
return i
# generate list of converted raw events from events string
def raw_events(evlist, initialize=False):
return [raw_event(x, initialize=initialize) for x in evlist]
def mark_fixed(s):
r = raw_event(s)
if r in ectx.fixed_events:
return "%s[F]" % s
return s
def pwrap(s, linelen=70, indent=""):
print(indent +
("\n" + indent).join(
textwrap.wrap(s, linelen, break_long_words=False)),
file=sys.stderr)
def pwrap_not_quiet(s, linelen=70, indent=""):
if not args.quiet:
pwrap(s, linelen, indent)
def perf_args(evstr, rest):
add = []
if args.interval:
add += ['-I', str(args.interval)]
return [feat.perf, "stat", "-x;", "--log-fd", "X"] + add + ["-e", evstr] + rest
def setup_perf(evstr, rest):
prun = PerfRun()
inf = prun.execute(perf_args(evstr, rest))
return inf, prun
class Stat(object):
def __init__(self):
self.total = 0
self.errors = Counter() # type: typing.Counter[str]
def print_not(a, count, msg, j):
print(("%s %s %s %.2f%% in %d measurements"
% (j, j, msg, # XXX rmap again with ectx
100.0 * (float(count) / float(a.total)),
a.total)),
file=sys.stderr)
# XXX need to get real ratios from perf
def print_account(ad):
total = Counter() # type: typing.Counter[str]
for j in ad:
a = ad[j]
for e in a.errors:
if args.stats:
print_not(a, a.errors[e], e, j)
total[e] += 1
if sum(total.values()) > 0 and not args.quiet:
print(", ".join(["%d events %s" % (num, e) for e, num in total.items()]),
file=sys.stderr)
class ValidEvents(object):
def update(self):
self.string = "|".join(self.valid_events)
def __init__(self):
self.valid_events = [r"cpu(_core|_atom)?/.*?/", "uncore.*?/.*?/", "ref-cycles", "power.*",
r"msr.*", "emulation-faults",
r"r[0-9a-fA-F]+", "cycles", "instructions", "dummy",
"slots", r"topdown-(fe-bound|be-bound|retiring|bad-spec|heavy-ops|br-mispredict|fetch-lat|mem-bound)"]
self.update()
def add_event(self, ev):
if re.match(self.string, ev):
return
# add first to overwrite more generic regexprs list r...
self.valid_events.insert(0, ev)
self.update()
valid_events = ValidEvents()
def is_event(l, n):
if len(l) <= n:
return False
# use static string to make regexpr caching work
return re.match(valid_events.string, l[n], re.I)
def set_interval(env, d, interval):
env['interval-ns'] = d * 1e9
env['interval-ms'] = d * 1e3
env['interval-s'] = d
env['interval'] = interval
def key_to_coreid(k):
try:
x = cpu.cputocore[int(k)]
return x[0] * 1000 + x[1]
except ValueError:
return 0
def key_to_socketid(k):
try:
return cpu.cputocore[int(k)][0]
except ValueError:
return 0
def core_fmt(core):
if cpu.sockets > 1:
return "S%d-C%d" % (core / 1000, core % 1000,)
return "C%d" % (core % 1000,)
def socket_fmt(j):
return "S%d" % cpu.cputocore[j][0]
def thread_fmt(j):
return core_fmt(key_to_coreid(j)) + ("-T%d" % cpu.cputothread[int(j)])
def display_core(cpunum, ignore_thread=False):
for match in args.core.split(","):
m = re.match(r'(?P<socket>S\d+)?-?(?P<core>C\d+)?-?(?P<thread>T\d+)?', match, re.I)
if not m or not any((m.group('core'), m.group('socket'),)):
sys.exit("Bad core match %s" % match)
def matching(name, mapping):
return mapping[cpunum] == int(m.group(name)[1:]) # type: ignore
if m.group('socket') and not matching('socket', cpu.cputosocket):
continue
if m.group('core') and cpu.cputocore[cpunum][1] != int(m.group('core')[1:]):
continue
if not ignore_thread and m.group('thread') and not matching('thread', cpu.cputothread):
continue
return True
return False
OUTPUT_CORE_THREAD = 0
OUTPUT_CORE = 1
OUTPUT_THREAD = 2
OUTPUT_SOCKET = 3
OUTPUT_GLOBAL = 4
def display_keys(runner, keys, mode, post=""):
allowed_threads = runner.cpu_list
if mode == OUTPUT_GLOBAL:
return ("",)
if len(keys) > 1 and smt_mode:
if mode == OUTPUT_SOCKET:
all_cpus = dedup(map(socket_fmt, allowed_threads))
else:
cores = [key_to_coreid(x) for x in keys if int(x) in allowed_threads]
if mode != OUTPUT_CORE:
threads = [thread_fmt(x) + post for x in allowed_threads]
else:
threads = []
all_cpus = [core_fmt(x)+post for x in cores] + threads
else:
all_cpus = [x + post for x in keys]
if any(map(package_node, runner.olist)):
all_cpus += ["S%d" % x for x in range(cpu.sockets)]
return all_cpus
def verify_rev(rev, cpus):
for k in cpus:
for ind, o in enumerate(rev[k]):
assert o == rev[cpus[0]][ind]
assert len(rev[k]) == len(rev[cpus[0]])
def is_cycles(ev):
return re.match(r'cycles|slots|r20003c|cpu(_atom|_core)?/slots|cpu(_atom|_core)?/event=0x(3c|a4),umask=0x(0|1)', ev) is not None
def find_cycles(rev):
for l in rev.values():
for idle_ev in l:
if is_cycles(idle_ev):
return idle_ev
return ""
def find_idle_keys(res, rev, idle_thresh):
if sum([len(res[k]) for k in res.keys()]) == 0:
return set()
if len(res.keys()) == 1:
return set()
idle_ev = find_cycles(rev)
if idle_ev == "":
warn_once("no idle detection because no cycle event found")
return set()
cycles = { k: max([0] + [val for val, ev in zip(res[k], rev[k]) if ev == idle_ev])
for k in res.keys() }
if sum(cycles.values()) == 0 and not args.quiet:
print_once("no idle detection because cycles counts are zero")
return set()
max_cycles = max(cycles.values())
return {k for k in cycles.keys() if cycles[k] < max_cycles * idle_thresh}
def is_idle(cpus, idle_keys):
return all([("%d" % c) in idle_keys for c in cpus])
def idle_core(core, idle_keys):
return is_idle(cpu.coreids[core], idle_keys)
def idle_socket(socket, idle_keys):
return is_idle(cpu.sockettocpus[socket], idle_keys)
# from https://stackoverflow.com/questions/4836710/does-python-have-a-built-in-function-for-string-natural-sort
def num_key(s):
return [int(t) if t.isdigit() else t for t in re.split(r'(\d+)', s)]
def invalid_res(res, key, nothing):
if len(res) == 0:
if isinstance(key, list):
for j in key:
nothing.add(j)
else:
nothing.add(key)
return True
return False
def runner_name(r):
if r.pmu is None:
return "cpu"
return r.pmu.replace("cpu_", "")
default_compute_iter = 3
# override how often to recompute to converge all the thresholds
COMPUTE_ITER = None
def print_keys(runner, res, rev, valstats, out, interval, env, mode, runner_list):
nothing = set() # type: Set[str]
allowed_threads = runner.cpu_list
def filtered(j):
return j != "" and j.isdecimal() and int(j) not in allowed_threads
core_node = lambda obj: safe_ref(obj, 'domain') in runner.ectx.core_domains
thread_node = lambda obj: not (core_node(obj) or package_node(obj))
idle_keys = find_idle_keys(res, rev, runner.idle_threshold)
idle_mark_keys = find_idle_keys(res, rev, IDLE_MARKER_THRESHOLD)
printer = runner.printer
hidden_keys = set()
stat = runner.stat
keys = sorted(res.keys(), key=num_key)
post = ""
if runner.pmu != "cpu":
if len(res.keys()) > 1:
post += "-"
post += runner_name(runner)
out.set_cpus(display_keys(runner, keys, mode, post))
runner.printer.numprint = 0
if smt_mode:
printed_cores = set()
printed_sockets = set()
for j in list(keys):
if j != "" and int(j) not in cpu.cputocore:
warn_once("Warning: input cpu %s not in cpuinfo." % j)
del res[j]
keys = sorted(res.keys(), key=num_key)
for j in keys:
if filtered(j):
continue
sid = key_to_socketid(j)
core = key_to_coreid(j)
if mode == OUTPUT_CORE and core in printed_cores:
continue
if mode == OUTPUT_SOCKET and sid in printed_sockets:
continue
if j in idle_keys:
hidden_keys.add(j)
continue
runner.reset_thresh()
if mode == OUTPUT_GLOBAL:
cpus = keys
elif mode == OUTPUT_SOCKET:
cpus = [x for x in keys if key_to_socketid(x) == sid and not filtered(x)]
else:
cpus = [x for x in keys if key_to_coreid(x) == core and not filtered(x)]
combined_res = list(zip(*[res[x] for x in cpus]))
combined_st = [combine_valstat(z)
for z in zip(*[valstats[x] for x in cpus])]
env['num_merged'] = len(cpus)
if mode in (OUTPUT_CORE,OUTPUT_SOCKET,OUTPUT_GLOBAL):
merged_res = combined_res
merged_st = combined_st
else:
merged_res = res[j]
merged_st = valstats[j]
if invalid_res(merged_res, cpus, nothing):
continue
# may need to repeat to get stable threshold values
# in case of mutual dependencies between SMT and non SMT
# but don't loop forever (?)
used_stat = stat
onemore = False
iterations = COMPUTE_ITER if COMPUTE_ITER else default_compute_iter
for _ in range(iterations):
env['num_merged'] = 1
changed = runner.compute(merged_res, rev[j], merged_st, env, thread_node, used_stat, runner_list)
verify_rev(rev, cpus)
env['num_merged'] = len(cpus)
changed += runner.compute(combined_res, rev[cpus[0]], combined_st, env, core_node, used_stat, runner_list)
if changed == 0 and COMPUTE_ITER is None:
# do always one more so that any thresholds depending on a later node are caught
if not onemore:
onemore = True
continue
break
used_stat = None
# find bottleneck
bn = find_bn(runner.olist, not_package_node)
if mode == OUTPUT_GLOBAL:
printer.print_res(runner.olist, out, interval, "", not_package_node, bn)
break
if mode == OUTPUT_SOCKET:
printer.print_res(runner.olist, out, interval, socket_fmt(int(j)),
not_package_node, bn, idle_socket(sid, idle_mark_keys))
printed_sockets.add(sid)
continue
if mode == OUTPUT_THREAD:
runner.compute(res[j], rev[j], valstats[j], env, package_node, stat, runner_list)
printer.print_res(runner.olist, out, interval, thread_fmt(int(j))+post, any_node,
bn, j in idle_mark_keys)
continue
# per core or mixed core/thread mode
# print the SMT aware nodes
if core not in printed_cores:
printer.print_res(runner.olist, out, interval, core_fmt(core)+post, core_node, bn,
idle_core(core, idle_mark_keys))
printed_cores.add(core)
# print the non SMT nodes
if mode == OUTPUT_CORE:
fmt = core_fmt(core)
idle = idle_core(core, idle_mark_keys)
else:
fmt = thread_fmt(int(j))
idle = j in idle_mark_keys
printer.print_res(runner.olist, out, interval, fmt+post, thread_node, bn, idle)
elif mode != OUTPUT_GLOBAL:
env['num_merged'] = 1
for j in keys:
if filtered(j):
continue
if j in idle_keys:
hidden_keys.add(j)
continue
if invalid_res(res[j], j, nothing):
continue
runner.reset_thresh()
runner.compute(res[j], rev[j], valstats[j], env, not_package_node, stat, runner_list)
bn = find_bn(runner.olist, not_package_node)
printer.print_res(runner.olist, out, interval, j+post, not_package_node, bn, j in idle_mark_keys)
if mode == OUTPUT_GLOBAL:
env['num_merged'] = 1
cpus = [x for x in keys if not filtered(x)]
if cpus:
combined_res = [sum([res[j][i] for j in cpus])
for i in range(len(res[cpus[0]]))]
combined_st = [combine_valstat([valstats[j][i] for j in cpus])
for i in range(len(valstats[cpus[0]]))]
if smt_mode:
nodeselect = package_node
else:
nodeselect = any_node
if not invalid_res(combined_res, cpus, nothing):
runner.reset_thresh()
runner.compute(combined_res, rev[cpus[0]] if len(cpus) > 0 else [],
combined_st, env, nodeselect, stat, runner_list)
bn = find_bn(runner.olist, lambda x: True)
printer.print_res(runner.olist, out, interval, "", nodeselect, bn, False)
elif mode != OUTPUT_THREAD:
packages = set()
for j in keys:
if j == "":
continue
if j.isdecimal():
if filtered(j):
continue
p_id = cpu.cputosocket[int(j)]
if p_id in packages:
continue
packages.add(p_id)
jname = "S%d" % p_id
else:
jname = j
if j in idle_keys:
hidden_keys.add(j)
continue
runner.reset_thresh()
if invalid_res(res[j], j, nothing):
continue
runner.compute(res[j], rev[j], valstats[j], env, package_node, stat, runner_list)
printer.print_res(runner.olist, out, interval, jname, package_node, None, j in idle_mark_keys)
# no bottlenecks from package nodes for now
out.flush()
if not FUZZYINPUT:
stat.referenced_check(res, runner.sched.evnum)
stat.compute_errors()
runner.idle_keys |= hidden_keys
if nothing and not args.quiet:
print("%s: Nothing measured%s" % (runner.pmu, " for " if len(nothing) > 0 and "" not in nothing else ""), " ".join(sorted(nothing)), file=sys.stderr)
if runner.printer.numprint == 0 and not args.quiet and runner.olist:
print("No node %scrossed threshold" % (
"for %s " % runner_name(runner) if runner.pmu != "cpu" else ""), file=sys.stderr)
def print_and_split_keys(runner, res, rev, valstats, out, interval, env, rlist):
if multi_output():
if args.per_thread:
out.remark("Per thread")
out.reset("thread")
print_keys(runner, res, rev, valstats, out, interval, env, OUTPUT_THREAD, rlist)
if args.per_core:
out.remark("Per core")
out.reset("core")
print_keys(runner, res, rev, valstats, out, interval, env, OUTPUT_CORE, rlist)
if args.per_socket:
out.remark("Per socket")
out.reset("socket")
print_keys(runner, res, rev, valstats, out, interval, env, OUTPUT_SOCKET, rlist)
if args.global_:
out.remark("Global")
out.reset("global")
print_keys(runner, res, rev, valstats, out, interval, env, OUTPUT_GLOBAL, rlist)
else:
if args.split_output:
sys.exit("--split-output needs --per-thread / --global / --per-socket / --per-core")
mode = OUTPUT_CORE_THREAD
if args.per_thread:
mode = OUTPUT_THREAD
elif args.per_core:
mode = OUTPUT_CORE
elif args.per_socket:
mode = OUTPUT_SOCKET
elif args.global_:
mode = OUTPUT_GLOBAL
print_keys(runner, res, rev, valstats, out, interval, env, mode, rlist)
def print_check_keys(runner, res, rev, valstats, out, interval, env, rlist):
if res and all([sum(res[k]) == 0.0 and len(res[k]) > 0 for k in res.keys()]) and cpu.cpu == cpu.realcpu:
if args.subset:
return
if runner.pmu == "cpu":
sys.exit("All measured values 0. perf broken?")
else:
if not args.quiet:
print("Measured values for %s all 0" % runner_name(runner), file=sys.stderr)
return
if args.interval and interval is None:
interval = float('nan')
if not args.no_output:
print_and_split_keys(runner, res, rev, valstats, out, interval, env, rlist)
def print_summary(summary, out, runner_list, full_system):
if args.perf_summary:
p = summary.summary_perf
for sv in zip_longest(*p.values()):
for ind, title in enumerate(p.keys()):
r = sv[ind]
l = []
if args.interval:
l.append("\tSUMMARY")
if full_system:
l.append(("CPU" + title) if re.match(r'\d+$', title) else title)
if output_numcpus:
l.append("0") # XXX
if r is None:
continue
if title.isdecimal():
cpunum = int(title)
if (r[2].startswith("uncore") or r[2].startswith("power")) and (
cpunum != cpu.sockettocpus[cpu.cputosocket[cpunum]][0]):
continue
if r[2].startswith("duration_time") and cpunum != 0 and not args.cpu and not args.core:
continue
args.perf_summary.write(";".join(l + ["%f" % r[0], r[1],
r[2], "%f" % r[3],
"%.2f" % r[4], "", ""]) + "\n")
if not args.summary:
return
for runner, res, rev in runner_split(runner_list, summary.res, summary.rev):
print_and_split_keys(runner, res, rev,
summary.valstats, out,
float('nan'), summary.env, runner_list)
def is_outgroup(x):
return set(x) - ectx.outgroup_events == set()
class SaveContext(object):
"""Save (some) environment context, in this case stdin seek offset to make < file work
when we reexecute the workload multiple times."""
def __init__(self):
try:
self.startoffset = sys.stdin.tell() # type: int | None
except OSError:
self.startoffset = None
except IOError:
self.startoffset = None
def restore(self):
if self.startoffset is not None:
sys.stdin.seek(self.startoffset)
def execute_no_multiplex(runner_list, out, rest, summary):
results = [] # type: List[Any]
groups = []
num_outg = 0
for runner in runner_list:
groups += [g.evnum for g in runner.sched.evgroups]
num_outg += sum([g.outgroup for g in runner.sched.evgroups])
num_runs = len(groups) - num_outg
outg = []
n = 0
ctx = SaveContext()
resoff = Counter() # type: typing.Counter[str]
RES, REV, INTERVAL, VALSTATS, ENV = range(5)
ret = 0
# runs could be further reduced by tweaking
# the scheduler to avoid any duplicated events
for runner in runner_list:
groups = [g.evnum for g in runner.sched.evgroups]
for g, gg in zip(groups, runner.sched.evgroups):
if gg.outgroup:
outg.append(g)
continue
print("RUN #%d of %d%s: %s" % (n + 1, num_runs,
" for %s" % runner_name(runner) if len(runner_list) > 1 else "",
" ".join([quote(o.name) for o in gg.objl])))
# becomes results for first iteration
lresults = results if n == 0 else []
res = None
events = outg + [g]
runner.set_ectx()
evstr = group_join(events)
flat_events = flatten(events)
flat_rmap = [event_rmap(e, runner_list) for e in flat_events]
runner.clear_ectx()
for nret, res, rev, interval, valstats, env in do_execute(
[runner],
summary, evstr, flat_rmap,
out, rest, resoff, flat_events):
ret = max(ret, nret)
lresults.append([res, rev, interval, valstats, env])
if res:
for t in res.keys():
resoff[t] += len(res[t])
if n > 0:
if len(lresults) != len(results):
if not args.quiet:
print("Original run had %d intervals, this run has %d. "
"Workload run time not stable?" %
(len(lresults), len(results)), file=sys.stderr)
if len(lresults) > len(results):
# throw away excessive intervals
lresults = lresults[:len(results)]
else:
# fill the missing intervals with dummy data
v = lresults[0]
for ind, _ in enumerate(results):
if ind >= len(lresults):
lresults.append([dummy_dict(v[RES]),
v[REV],
v[INTERVAL],
dummy_dict(v[RES], ValStat(0,0)),
v[ENV]])
assert len(lresults) == len(results)
i = 0
for r, lr in zip(results, lresults):
for j in (RES, REV, VALSTATS):
append_dict(r[j], lr[j])
i += 1
ctx.restore()
outg = []
n += 1
assert num_runs == n
for res, rev, interval, valstats, env in results:
if summary:
summary.add(res, rev, valstats, env)
for runner, res, rev in runner_split(runner_list, res, rev):
print_check_keys(runner, res, rev, valstats, out, interval, env, runner_list)
return ret
def runner_split(runner_list, res, rev):
for r in runner_list:
if len(res.keys()) == 1 and "" in res:
off = r.sched.offset
end = off + len(r.sched.evnum)
yield r, { "": res[""][off:end]}, { "": rev[""][off:end] }
elif r.cpu_list:
d = defaultdict(list) # type: DefaultDict[str,list]
d.update({ "%d" % k: res["%d" % k] for k in r.cpu_list })
yield r, d, rev
else:
yield r, res, rev
def execute(runner_list, out, rest, summary):
evstr, flat_events, flat_rmap = "", [], []
for runner in runner_list:
new_events = [x.evnum for x in runner.sched.evgroups if len(x.evnum) > 0]
if len(new_events) == 0:
continue
runner.set_ectx()
if evstr:
evstr += ","
evstr += group_join(new_events)
new_flat_events = flatten(new_events)
flat_events += new_flat_events
flat_rmap += [event_rmap(e, runner_list) for e in new_flat_events]
runner.clear_ectx()
ctx = SaveContext()
for ret, res, rev, interval, valstats, env in do_execute(
runner_list, summary,
evstr, flat_rmap, out, rest, Counter(), None):
if summary:
summary.add(res, rev, valstats, env)
for runner, res, rev in runner_split(runner_list, res, rev):
print_check_keys(runner, res, rev, valstats, out, interval, env, runner_list)
ctx.restore()
return ret
def find_group(num, runner_list):
offset = 0
for runner in runner_list:
if num - offset < len(runner.sched.evnum):
break
offset += len(runner.sched.evnum)
num -= offset
groups = runner.sched.evgroups
g = groups[bisect.bisect_right(groups, GroupCmp(num)) - 1]
if g.base <= num < g.base + len(g.evnum):
return g
warn("group for event %d not found" % num)
return None
def dump_raw(valcsv, interval, title, event, ename, val, index, stddev, multiplex, runner_list):
if index < 0:
return
g = find_group(index, runner_list)
if g is None:
return
nodes = " ".join(sorted([o.name.replace(" ", "_") for o in g.objl if event in o.evnum]))
if args.raw:
print("raw", title, "event", event, "val", val, "ename", ename, "index",
index, "group", g.num, "nodes", nodes)
if args.valcsv:
valcsv.writerow((interval, title, g.num, ename, val, event, index,
stddev, multiplex, nodes))
def group_join(events):
e = ""
last = None
sep = ""
for j in events:
e += sep
# add dummy event to separate identical events to avoid merging
# in perf stat
if last == j[0] and sep:
e += "emulation-faults,"
e += event_group(j)
sep = ","
last = j[-1]
return e
def update_perf_summary(summary, off, title, val, event, unit, multiplex):
if title not in summary.summary_perf:
summary.summary_perf[title] = []
if len(summary.summary_perf[title]) <= off:
summary.summary_perf[title].append([val, unit, event, 0, multiplex])
else:
r = summary.summary_perf[title][off]
r[0] += val
assert r[1] == unit
assert r[2] == event or event == "dummy"
r[3] = min(r[3], multiplex)
def find_runner(rlist, off, title, event):
if len(rlist) == 1 and rlist[0].pmu == "cpu":
return rlist[0], off
for r in rlist:
if title == "":
if r.sched.offset <= off < r.sched.offset+len(r.sched.evnum):
return r, off - r.sched.offset
elif r.cpu_list:
# in the per cpu case each hybrid cpu has its own line, so no offsets for the runners
# but need to handle leaks below
if int(title) in r.cpu_list:
# For hybrid, non cpu events like msr/tsc/ get expanded over all CPUs.
# and leak into the other runner who doesn't know anything about them.
# XXX check does not handle dups
if not event.startswith("cpu") and (off >= len(r.sched.evnum) or event != r.sched.evnum[off]):
return None, 0
return r, off
else:
return r, off
return None, 0
def perf_name(e):
m = re.search(r'name=([^,/]+)', e)
if m:
return m.group(1)
return None
FINE = 0
SKIP = 1
FUZZY = 2
def check_event(rlist, event, off, title, prev_interval, l, revnum, linenum, last_linenum):
r, off = find_runner(rlist, off, title, event)
if r is None:
return r, FINE, event
# likely event expanded over all CPUs
if event.startswith("cpu") and not event.startswith(r.pmu):
if args.debug:
print("event wrong pmu", event, title, r.pmu)
return None, FINE, event
# cannot check because it's an event that needs to be expanded first
if not event.startswith("cpu") and title.isdecimal() and int(title) not in r.cpu_list:
return r, FINE, event
if revnum is None:
revnum = r.sched.evnum
if event.startswith("uncore"):
event = re.sub(r'_[0-9]+', '', event)
try:
expected_ev = remove_qual(revnum[off])
except IndexError:
if FUZZYINPUT:
expected_ev = event
else:
sys.exit("Out of range event %s offset %d (len %d). %s" % (event, off, len(revnum),
"Mismatch in toplev arguments from recording?" if args.import_ else ""))
if event != expected_ev:
en = perf_name(expected_ev)
if en == event:
return r, FINE, expected_ev
# work around perf bug that incorrectly expands uncore events in some versions
if off > 0 and event == remove_qual(revnum[off - 1]):
return None, FINE, expected_ev
if FUZZYINPUT:
return r, FUZZY, expected_ev
# some perf version don't output <not counted/supported due to dd15480a3d67
# if the event is expected within a small window assume it's not counted
# and reuse the value for the next
near = revnum[off:off+PERF_SKIP_WINDOW]
for j in near:
if event == remove_qual(j):
if args.debug:
print("skipping, expected", expected_ev, "got", event,
"off", off, "title", title,
"context", revnum[off:off+PERF_SKIP_WINDOW])
if linenum == last_linenum[0]: # avoid endless loop
return r, FINE, expected_ev
last_linenum[0] = linenum
return r, SKIP, expected_ev
print("Event in input does not match schedule (%s vs expected %s [pmu:%s/ind:%d/tit:%s/int:%f+%d])." % (
event, expected_ev, r.pmu, off, title, prev_interval, linenum),
file=sys.stderr)
sys.stdout.write(l)
if args.import_:
sys.exit("Different arguments than original toplev?")
sys.exit("Input corruption")
return r, FINE, event
def update_missing(res, rev, valstats, fallback):
for k in rev.keys():
for ind, event in enumerate(rev[k]):
if not isnan(res[k][ind]):
continue
key = (k, event)
if key in fallback:
if args.debug:
print("updating fuzzy event", k, event, fallback[key][0])
res[k][ind] = fallback[key][0]
valstats[k][ind] = fallback[key][1]
continue
pn = perf_name(event)
if pn:
key = (k, pn)
if key in fallback:
res[k][ind] = fallback[key][0]
valstats[k][ind] = fallback[key][1]
if args.debug:
print("updating", k, rev[k][ind])
continue
if not args.quiet:
print("Cannot find value for", k, rev[k][ind], pn, "in input")
res[k][ind] = float("nan")
assert not any([x is None for x in res[k]])
def do_execute(rlist, summary, evstr, flat_rmap, out, rest, resoff, revnum):
res = defaultdict(list) # type: DefaultDict[str,List[float]]
rev = defaultdict(list) # type: DefaultDict[str, List[str]]
valstats = defaultdict(list) # type: DefaultDict[str,List[ValStat]]
env = {} # type: Dict[str,str]
account = defaultdict(Stat) # type: DefaultDict[str,Stat]
inf, prun = setup_perf(evstr, rest)
prev_interval = 0.0
interval = None
interval_dur = 0.0
linenum = 1
skip = False
last_linenum = [0]
fallback = {} # type: Dict[Tuple[str,str], Tuple[float, ValStat]]
need_fallback = False
if not args.import_ and not args.interval:
start = time.time()
while True:
if skip:
# when skipping reuse line from last iteration
pass
else:
try:
prun.store_offset()
l = inf.readline()
origl = l
if not l:
break
# some perf versions break CSV output lines incorrectly for power events
if l.endswith("Joules"):
l2 = inf.readline()
l = l + l2.strip()
if l.startswith("#") or l.strip() == "":
linenum += 1
skip = False
continue
except OSError:
# handle pty EIO
break
except IOError:
break
except KeyboardInterrupt:
continue
if re.match(r'^(Timestamp|Value|Location)', l):
# header generated by toplev in import mode. ignore.
linenum += 1
skip = False
continue
if prun.skip_first_line():
skip = False
continue
origl = l
if args.interval:
m = re.match(r"\s*([0-9.]{9,}|SUMMARY);(.*)", l)
if m:
interval = float(m.group(1)) if m.group(1) != "SUMMARY" else 0.0
l = m.group(2)
if interval != prev_interval:
linenum = 1
# skip the first because we can't tell when it started
if prev_interval != 0.0 and prun.next_timestamp():
interval_dur = interval - prev_interval
interval = prev_interval
break
if res:
interval_dur = interval - prev_interval
set_interval(env, interval_dur, prev_interval)
if need_fallback:
update_missing(res, rev, valstats, fallback)
fallback = {}
need_fallback = False
yield 0, res, rev, prev_interval, valstats, env
res = defaultdict(list)
rev = defaultdict(list)
valstats = defaultdict(list)
prev_interval = interval
start = interval
elif not l[:1].isspace():
# these are likely bogus summary lines printed by v5.8 perf stat
# just ignore
skip = False
continue
if prun.skip_input():
continue
if args.perf_output:
args.perf_output.write(origl.rstrip() + "\n")
n = l.split(";")
# filter out the empty unit field added by 3.14
n = [x for x in n if x not in ('', 'Joules', 'ns')]
# timestamp is already removed
# -a --per-socket socket,numcpus,count,event,...
# -a --per-core core,numcpus,count,event,...
# -a -A cpu,count,event,...
# count,event,...
if is_event(n, 1):
title, count, event, off = "", n[0], n[1], 2
elif is_event(n, 3):
title, count, event, off = n[0], n[2], n[3], 4
elif is_event(n, 2):
title, count, event, off = n[0], n[1], n[2], 3
else:
if not FUZZYINPUT:
warn("unparseable perf output\n%s" % origl.rstrip())
linenum += 1
skip = False
continue
# dummy event used as separator to avoid merging problems
if event.startswith("emulation-faults"):
linenum += 1
skip = False
continue
title = title.replace("CPU", "")
# code later relies on stripping ku flags
event = remove_qual(event)
event = re.sub(r'\s+\[.*\]', '', event)
# duplicated duration_time in perf ~6.5. was already added from the first.
if event == "duration_time" and count == "<not counted>":
linenum += 1
skip = False
continue
skip = False
origevent = event
runner, action, event = check_event(rlist, event, len(res[title]),
title, prev_interval, origl, revnum, linenum, last_linenum)
if runner is None:
linenum += 1
continue
if action == SKIP:
l = origl
skip = True
if action == FUZZY:
need_fallback = True
multiplex = float('nan')
event = event.rstrip()
if re.match(r"\s*[0-9.]+", count):
val = float(count)
elif re.match(r"\s*<", count):
account[event].errors[count.replace("<","").replace(">","")] += 1
multiplex = 0.
val = 0
else:
warn("unparseable perf count\n%s" % l.rstrip())
linenum += 1
continue
# post fixes:
# ,xxx% -> -rXXX stddev
stddev = 0.
if len(n) > off and n[off].endswith("%") and not skip:
stddev = (float(n[off].replace("%", "").replace(",", ".")) / 100.) * val
off += 1
# ,xxx,yyy -> multiplexing in newer perf
if len(n) > off + 1 and not skip:
multiplex = float(n[off + 1].replace(",", "."))
off += 2
st = ValStat(stddev=stddev, multiplex=multiplex)
account[event].total += 1
def ignored_cpu(num):
return num not in runner.cpu_list and (num not in cpu.cputocore or not any(
[k in runner.cpu_list for k in cpu.coreids[cpu.cputocore[num]]]))
def add(t):
if runner.cpu_list and t.isdecimal() and ignored_cpu(int(t)):
return
if skip:
res[t].append(0.0)
elif action == FUZZY:
res[t].append(float("nan"))
else:
res[t].append(val)
fallback[(t, origevent)] = (val, st)
rev[t].append(event)
valstats[t].append(st)
if args.perf_summary:
# XXX add unit, enabled, num-cpus
assert len(res[t]) == len(rev[t])
update_perf_summary(summary, resoff[t] + len(res[t]) - 1, t, val, event, "", multiplex)
def dup_val(l):
for j in l:
if j in runner.cpu_list:
add("%d" % j)
def uncore_event(event):
return re.match(r'power|uncore', event)
# power/uncore events are only output once for every socket
if ((uncore_event(event) or uncore_event(origevent)) and
title.isdecimal() and
(not ((args.core or args.cpu) and not args.single_thread))):
cpunum = int(title)
socket = cpu.cputosocket[cpunum]
dup_val(cpu.sockettocpus[socket])
elif re.match(r'(S\d+-)?(D\d+-)?C\d+', title) and (smt_mode or args.no_aggr):
m = re.match(r'(?:S(\d+)-)?(?:D(\d+)-)?C(\d+)', title)
assert m is not None
if m.group(2): # XXX
warn_once("die topology not supported currently")
socket, core = int(m.group(1)), int(m.group(3))
dup_val(cpu.coreids[(socket, core)])
# duration time is only output once, except with --cpu/-C (???)
# except perf 6.2+ outputs it with -A on all cpus, but not counting except the first
elif ((event.startswith("duration_time") or origevent.startswith("duration_time"))
and title.isdecimal() and not args.cpu and not args.core):
dup_val(runner.cpu_list)
else:
add(title)
if skip:
continue
linenum += 1
if args.raw or args.valcsv:
dump_raw(out.valcsv,
interval if args.interval else "",
title,
event,
event_rmap(event, rlist),
val if val or not re.match(r"\s*<", count) else count,
len(res[title]) - 1,
stddev, multiplex, rlist)
inf.close()
if not args.import_ and not args.interval:
set_interval(env, time.time() - start, start)
elif args.interval:
set_interval(env, interval_dur if interval_dur else args.interval/1000.,
interval if interval else float('NaN'))
else:
warn_no_assert("cannot determine time duration. Per second metrics may be wrong. Use -Ixxx.")
set_interval(env, 0, 0)
ret = prun.wait()
print_account(account)
if need_fallback:
update_missing(res, rev, valstats, fallback)
yield ret, res, rev, interval, valstats, env
run_l1_parallel = False # disabled for now until we can fix the perf scheduler
def adjust_ev(ev, level):
# use the programmable slots for non L1 so that level 1
# can (mostly) run in parallel with other groups.
# this also helps for old or non ICL kernels
# XXX this creates groups without slots leader
if isinstance(ev, str) and ev.startswith("TOPDOWN.SLOTS") and ((run_l1_parallel and level != 1) or not ectx.slots_available):
ev = ev.replace("TOPDOWN.SLOTS", "TOPDOWN.SLOTS_P")
return ev
def ev_collect(ev, level, obj):
if isinstance(ev, types.LambdaType):
return ev(lambda ev, level: ev_collect(ev, level, obj), level)
if ev == "mux":
return DummyArith()
if ev.startswith("interval-"):
if not feat.supports_duration_time:
return DummyArith()
ev = "duration_time"
ev = adjust_ev(ev, level)
if level == 999:
if ret_latency is None:
setup_retlatency(args)
return DummyArith()
key = (ev, level, obj.name)
if key not in obj.evlevels:
if ev.startswith(("TOPDOWN.SLOTS", "PERF_METRICS.")):
ind = [x[1] == level for x in obj.evlevels]
ins = ind.index(True) if any(ind) else 0
obj.evlevels.insert(ins + (0 if ev.startswith("TOPDOWN.SLOTS") else 1), key)
else:
obj.evlevels.append(key)
if safe_ref(obj, 'nogroup') or ev == "duration_time":
ectx.outgroup_events.add(ev.lower())
return DummyArith()
def canon_event(e):
m = re.match(r"(.*?):(.*)", e)
if m:
e = m.group(1)
return e.lower()
def find_runner_by_pmu(pmu, runner_list):
for r in runner_list:
if r.pmu == pmu:
return r
return None
def event_pmu(ev):
m = re.match(r'(.*?)/', ev)
if m:
return m.group(1)
return None
def event_ectx(ev, runner_list):
pmu = event_pmu(ev)
if pmu and pmu.startswith("cpu"):
r = find_runner_by_pmu(pmu, runner_list)
if r:
return r.ectx
# this works for now because the atom model doesn't use any uncore events
# may need to fix later once it does
return ectx if ectx else runner_list[0].ectx
def do_event_rmap(e, ectx_):
n = ectx_.emap.getperf(e)
if ectx_.emap.getevent(n, nocheck=event_nocheck()):
return n
if e in non_json_events:
return e
if e.startswith("uncore"):
warn_no_assert("rmap: cannot find %s, using dummy" % e)
else:
warn("rmap: cannot find %s, using dummy" % e)
return "dummy"
def event_rmap(e, runner_list):
ectx_ = event_ectx(e, runner_list)
if e in ectx_.rmap_cache:
return ectx_.rmap_cache[e]
n = do_event_rmap(e, ectx_)
ectx_.rmap_cache[e] = n
return n
cycles_aliases = frozenset(("cycles", "cpu_clk_unhalted.thread", "cpu_clk_unhalted.core",
"cpu_core/event=0x3c,umask=0x0/", "cpu_atom/event=0x3c,umask=0x0/",
"cpu/event=0x3c,umask=0x0/"))
# compare events to handle name aliases
def compare_event(aname, bname):
# XXX this should be handled in ocperf
if aname in cycles_aliases and bname in cycles_aliases:
return True
a = ectx.emap.getevent(aname, nocheck=event_nocheck())
if a is None:
return False
b = ectx.emap.getevent(bname, nocheck=event_nocheck())
if b is None:
return False
fields = ('val','event','cmask','edge','inv')
return map_fields(a, fields) == map_fields(b, fields)
def is_hybrid():
return ocperf.file_exists("/sys/devices/cpu/format/any")
def lookup_res(res, rev, ev, obj, env, level, referenced, cpuoff, st, runner_list):
"""get measurement result, possibly wrapping in UVal"""
if level == 999:
return lookup_retlat(ev)
ev = adjust_ev(ev, level)
if isinstance(ev, str) and ev.startswith("interval") and feat.supports_duration_time:
scale = { "interval-s": 1e9,
"interval-ns": 1,
"interval-ms": 1e6 }[ev]
return lookup_res(res, rev, "duration_time", obj, env, level, referenced, cpuoff, st, runner_list)/scale
if ev in env:
return env[ev]
if ev == "mux":
return min([s.multiplex for s in st])
#
# when the model passed in a lambda run the function for each logical cpu
# (by resolving its EVs to only that CPU)
# and then sum up. This is needed for the workarounds to make various
# per thread counters at least as big as unhalted cycles.
#
# otherwise we always sum up.
#
if isinstance(ev, types.LambdaType):
return sum([ev(lambda ev, level:
lookup_res(res, rev, ev, obj, env, level, referenced, off, st, runner_list), level)
for off in range(env['num_merged'])])
index = obj.res_map[(ev, level, obj.name)]
referenced.add(index)
#print((ev, level, obj.name), "->", index)
if not args.fast:
try:
r = rev[index]
except IndexError:
warn_once_no_assert("Not enough lines in perf output for rev (%d vs %d for %s) at %s, event %s" %
(index, len(rev), obj.name, env['interval'], ev))
return 0
rmap_ev = event_rmap(r, runner_list).lower()
ev = ev.lower()
assert (rmap_ev == canon_event(ev).replace("/k", "/") or
compare_event(rmap_ev, ev) or
rmap_ev == "dummy" or
(rmap_ev.endswith("_any") and not is_hybrid())), "event rmap mismatch %s vs %s" % (rmap_ev, ev)
try:
vv = res[index]
except IndexError:
warn_once("Not enough lines in perf output for res (%d vs %d for %s) at %s" %
(index, len(res), obj.name, env['interval']))
return 0.0
if isinstance(vv, tuple):
if cpuoff == -1:
vv = sum(vv)
else:
try:
vv = vv[cpuoff]
except IndexError:
warn_once("Partial CPU thread data from perf for %s" %
obj.name)
return 0.0
if st[index].stddev or st[index].multiplex != 100.0:
return UVal(name=ev, value=vv, stddev=st[index].stddev, mux=st[index].multiplex)
return vv
class BadEvent(Exception):
def __init__(self, name):
super(Exception, self).__init__()
self.event = name
# XXX check for errata
def sample_event(e, emap):
ev = emap.getevent(e, nocheck=event_nocheck())
if not ev:
raise BadEvent(e)
postfix = args.ring_filter
if postfix:
postfix = ":" + postfix
return ev.name + postfix
def sample_desc(s, emap):
try:
return " ".join([sample_event(x, emap) for x in s])
except BadEvent as e:
warn_once_no_assert("Unknown sample event %s" % (e.event))
return ""
def get_level(x):
return x[1]
def get_levels(evlev):
return [x[1] for x in evlev]
def get_names(evlev):
return [x[0] for x in evlev]
def full_name(obj):
name = obj.name
while 'parent' in obj.__dict__ and obj.parent:
obj = obj.parent
name = obj.name + "." + name
return name
def package_node(obj):
return safe_ref(obj, 'domain') in ("Package", "SystemMetric")
def not_package_node(obj):
return not package_node(obj)
def core_node(obj):
return safe_ref(obj, 'domain') in ectx.core_domains
def thread_node(obj):
if package_node(obj):
return False
if core_node(obj):
return False
return True
def any_node(obj):
return True
def obj_domain(obj):
return obj.domain.replace("Estimated", "est").replace("Calculated", "calc")
def metric_unit(obj):
if has(obj, 'unit'):
return obj.unit
if has(obj, 'domain'):
return obj_domain(obj).replace("SystemMetric", "SysMetric")
return "Metric"
def obj_desc(obj, sep="\n\t"):
desc = obj.desc[1:].replace("\n", sep)
# by default limit to first sentence
if not args.long_desc and "." in desc:
desc = desc[:desc.find(".") + 1] + ".."
return desc
def get_mg(obj):
return ref_or(obj, 'metricgroup', frozenset([]))
# only check direct children, the rest are handled recursively
def children_over(l, obj):
n = [o.thresh for o in l if 'parent' in o.__dict__ and o.parent == obj]
return any(n)
def bottleneck_related(obj, bn):
if obj == bn:
return True
if (get_mg(bn) & get_mg(obj)) - tma_mgroups:
return True
return False
def obj_desc_runtime(obj, rest, bn):
# hide description if children are also printed
if children_over(rest, obj) or (not args.desc and not bottleneck_related(obj, bn)):
desc = ""
else:
desc = obj_desc(obj)
if 'htoff' in obj.__dict__ and obj.htoff and obj.thresh and cpu.ht and not args.single_thread:
desc += """
Warning: Hyper Threading may lead to incorrect measurements for this node.
Suggest to re-measure with HT off (run cputop.py "thread == 1" offline | sh)."""
return desc
def node_filter(obj, default, sibmatch, mgroups):
if args.nodes:
fname = full_name(obj)
name = obj.name
def _match(m):
return (fnmatch(name, m) or
fnmatch(fname, m) or
fnmatch(fname, "*." + m))
def match(m, checklevel=True):
if m.endswith("^"):
m = m[:-1]
r = re.match("(.*)/([0-9]+)", m)
if r:
level = int(r.group(2))
if checklevel and obj.level > level:
return False
m = r.group(1)
return _match(m)
def has_siblings(j, obj):
return j.endswith("^") and 'sibling' in obj.__dict__ and obj.sibling
def has_mg(j, obj):
return j.endswith("^") and get_mg(obj)
nodes = args.nodes
if nodes[0] == '!':
default = False
nodes = nodes[1:]
for j in nodes.split(","):
j = j.strip()
if j == "":
continue
i = 0
if j[0] == '^' or j[0] == '-':
if match(j[1:]):
return False
continue
elif j[0] == '+':
i += 1
if match(j[i:], True):
if has_siblings(j, obj):
sibmatch |= set(obj.sibling)
if has_mg(j, obj):
mgroups |= obj.metricgroup
obj.forced = True
return True
if has_siblings(j, obj):
for sib in obj.sibling:
fname = full_name(sib)
name = sib.name
if match(j[i:], False):
sibmatch.add(obj)
return True
return default
def _find_bn(bn, level):
siblings = sorted([x for x in bn if x.level - 1 == level], key=lambda x: x.val, reverse=True)
if len(siblings) == 0:
return None
# remove overlap nodes
siblings = [x for x in siblings if not (has(x, 'overlap') and x.overlap)]
# ambigious
if level > 0 and len(siblings) > 1 and siblings[0].val - siblings[1].val <= SIB_THRESH:
return None
n = _find_bn([x for x in bn if full_name(x).startswith(full_name(siblings[0]))], level + 1)
if n is None:
return siblings[0]
return n
def find_bn(olist, match):
if args.force_bn:
bn = sorted([o for o in olist if o.name in args.force_bn], key=lambda x: x.level, reverse=True)
if bn:
return bn[0]
bn = [o for o in olist if match(o) and not o.metric and o.thresh]
if not bn:
return None
return _find_bn(bn, 0)
pmu_does_not_exist = set()
# XXX check if PMU can be accessed from current user
def missing_pmu(e):
if event_nocheck():
return False
m = re.match(r"([a-z0-9_]+)/", e)
if m:
pmu = m.group(1)
if pmu in pmu_does_not_exist:
return True
if not os.path.isdir("/sys/devices/%s" % pmu):
pmu_does_not_exist.add(pmu)
return True
return False
def query_errata(obj, errata_events, errata_nodes, errata_names):
errata = [errata_events[x] for x in obj.evlist if x in errata_events]
if any(errata):
errata_nodes.add(obj)
errata_names |= set(errata)
def olist_by_metricgroup(l, mg):
if args.no_sort:
return l
valid = set(l)
visited = set()
ml = []
for obj in l:
if obj in visited:
continue
def add(obj):
if obj not in visited:
ml.append(obj)
visited.add(obj)
if has(obj, 'metricgroup'):
for g in sorted(obj.metricgroup):
for j in mg[g]:
if j in valid:
add(j)
add(obj)
return ml
def node_unit(obj):
return (" " + obj_domain(obj)) if has(obj, 'domain') else ""
def node_below(obj):
return not obj.thresh
class Summary(object):
"""Accumulate counts for summary."""
def __init__(self):
self.res = defaultdict(list)
self.rev = defaultdict(list)
self.env = Counter() # type: typing.Counter[str]
self.valstats = defaultdict(list)
self.summary_perf = OrderedDict()
def add(self, res, rev, valstats, env):
for j in sorted(res.keys()):
for ind, val in enumerate(res[j]):
if ind < len(self.res[j]):
self.res[j][ind] += val
self.valstats[j][ind] = combine_valstat([self.valstats[j][ind], valstats[j][ind]])
else:
self.res[j].append(val)
self.valstats[j].append(valstats[j][ind])
if len(rev.keys()) == 1:
append_dict(self.rev, rev)
else:
self.rev.update(rev)
for j in env.keys():
self.env[j] += env[j]
def parse_metric_group(l, mg):
if l is None:
return [], []
add, rem = [], []
for n in l.split(","):
if n[0:1] == '-' or n[0:1] == '^':
if n[1:] not in mg:
print("metric group", n[1:], "not found", file=sys.stderr)
continue
rem += [x.name for x in mg[n[1:]]]
continue
if n[0:1] == '+':
n = n[1:]
if n not in mg:
print("metric group", n, "not found", file=sys.stderr)
continue
add += [x.name for x in mg[n]]
return add, rem
def obj_area(obj):
return obj.area if has(obj, 'area') and not args.no_area else None
def get_parents(obj):
def get_par(obj):
return obj.parent if 'parent' in obj.__dict__ else None
p = get_par(obj)
l = []
while p:
l.append(p)
p = get_par(p)
return l
def quote(s):
if " " in s:
return '"' + s + '"'
return s
class Group(object):
def __init__(self, evnum, objl, num, outgroup=False):
self.evnum = evnum
self.base = -1
self.objl = set(objl)
self.outgroup = outgroup
self.num = num
class GroupCmp(object):
def __init__(self, v):
self.v = v
def __lt__(self, g):
return self.v < g.base
# Control whether even unrelated groups can be merged
any_merge = True
# Interleave uncore events between CPU groups
distribute_uncore = False
def grab_group(l):
if needed_counters(l) <= ectx.counters:
return len(l)
n = 1
while needed_counters(l[:n]) < ectx.counters and n < len(l):
n += 1
if needed_counters(l[:n]) > ectx.counters and n > 0:
n -= 1
assert needed_counters(l[:n]) <= ectx.counters
return n
def update_group_map(evnum, obj, group):
for lev in obj.evlevels:
r = raw_event(lev[0])
# can happen during splitting
# the update of the other level will fix it
if r in evnum and lev not in obj.group_map:
obj.group_map[lev] = (group, evnum.index(r))
def do_distribute_uncore(evgroups):
cg = [g for g in evgroups if not g.outgroup]
og = [g for g in evgroups if g.outgroup]
return [x for x in chain(*zip_longest(cg, og)) if x is not None]
def gen_res_map(solist):
for obj in solist:
for k in obj.group_map.keys():
gr = obj.group_map[k]
obj.res_map[k] = gr[0].base + gr[1]
def print_group(g):
evkeys = [k for o in g.objl for k in o.group_map.keys() if o.group_map[k][0] == g]
objnames = {("%s" % quote(x[2])) + ("[%d]" % x[1] if x[1] else "") for x in evkeys}
if len(objnames) == 0:
return
evnames = {mark_fixed(x[0]) for x in evkeys}
pwrap(" ".join(objnames) + ":", 78)
pwrap(" ".join(evnames).lower() +
(" [%d counters]" % needed_counters(g.evnum)) +
(" [%d]" % g.base if args.debug else ""), 75, " ")
def match_patlist(l, s):
for x in l.split(","):
if fnmatch(s, x):
return True
return False
class Scheduler(object):
"""Schedule events into groups."""
def __init__(self):
self.evnum = [] # flat global list
self.evgroups = [] # of Group
self.og_groups = {}
# list of groups that still have generic counters, for faster
# duplicate checks
self.evgroups_nf = []
self.nextgnum = 0
self.event_to_group = {}
# should avoid adding those in the first place instead
def dummy_unreferenced(self, olist):
refs = defaultdict(set)
for o in olist:
for g, ind in o.group_map.values():
refs[g].add(ind)
for g in self.evgroups:
ref = refs[g]
if len(ref) < len(g.evnum):
for i in range(len(g.evnum)):
if i not in ref:
debug_print("unreferenced %s [%d] %s" % (g.evnum[i],
i,
" ".join([o.name for o in g.objl])))
g.evnum[i] = "dummy"
def split_groups(self, obj, evlev):
for lev, evl in groupby(sorted(evlev, key=get_level), get_level):
evlev = list(evl)
evnum = [raw_event(x[0]) for x in evlev]
while evlev:
n = grab_group(evnum)
self.add(obj, evnum[:n], None)
evlev = evlev[n:]
evnum = evnum[n:]
# may modify evnum
def add_duplicate(self, evnum, obj):
evset = set(evnum)
num_gen = num_generic_counters(evset)
full = set()
if ((has(obj, 'area') and match_patlist(DEDUP_AREA, obj.area)) or
match_patlist(DEDUP_NODE, obj.name)):
# reuse any previous event independent of group subsets
# for bottleneck nodes which are too large for the usual
# heuristics
duped = []
for ind, e in enumerate(evnum):
if ismetric(e) or is_slots(e):
continue
if e in self.event_to_group:
g = self.event_to_group[e]
debug_print("dedup %s %s to %s" % (obj.name, e, " ".join([x.name for x in g.objl])))
g.objl.add(obj)
update_group_map(g.evnum, obj, g)
duped.append(ind)
# need to remove in place so that caller sees it
# remove backwards so that indexes stay valid
for ind in reversed(duped):
del evnum[ind]
if len(evnum) == 0:
debug_print("%s fully deduped" % obj.name)
return True
for g in reversed(self.evgroups_nf if num_gen else self.evgroups):
if g.outgroup:
continue
#
# In principle we should only merge if there is any overlap,
# otherwise completely unrelated nodes get merged. But the perf
# scheduler isn't very good at handling smaller groups, and
# with eventual exclusive use we would like as big groups as
# possible. Still keep it as a --tune option to play around.
if ((any_merge or not evset.isdisjoint(g.evnum)) and
needed_counters(cat_unique(g.evnum, evnum)) <= ectx.counters):
obj_debug_print(obj, "add_duplicate %s in %s obj %s to group %d" % (
" ".join(evnum),
" ".join(g.evnum),
obj.name,
g.num))
for k in evnum:
if k not in g.evnum:
g.evnum.append(k)
if k not in self.event_to_group:
self.event_to_group[k] = g
g.objl.add(obj)
update_group_map(g.evnum, obj, g)
return True
# memorize already full groups
elif num_generic_counters(set(g.evnum)) >= ectx.counters:
full.add(g)
if full:
self.evgroups_nf = [g for g in self.evgroups_nf if g not in full]
return False
def add(self, obj, evnum, evlev):
# does not fit into a group.
if needed_counters(evnum) > ectx.counters:
self.split_groups(obj, evlev)
return
evnum = dedup(evnum)
if not self.add_duplicate(evnum, obj):
g = Group(evnum, [obj], self.nextgnum)
obj_debug_print(obj, "add %s to group %d" % (evnum, g.num))
for k in evnum:
if k not in self.event_to_group:
self.event_to_group[k] = g
self.nextgnum += 1
self.evgroups.append(g)
self.evgroups_nf.append(g)
update_group_map(evnum, obj, g)
def add_outgroup(self, obj, evnum):
obj_debug_print(obj, "add_outgroup %s" % evnum)
for ev in evnum:
if ev in self.og_groups:
g = self.og_groups[ev]
g.objl.add(obj)
else:
g = Group([ev], [obj], self.nextgnum, True)
self.nextgnum += 1
self.og_groups[ev] = g
self.evgroups.append(g)
self.evgroups_nf.append(g)
update_group_map([ev], obj, g)
def allocate_bases(self):
base = 0
for g in self.evgroups:
g.base = base
self.evnum += g.evnum
base += len(g.evnum)
def print_group_summary(self, olist):
num_groups = len([g for g in self.evgroups if not g.outgroup])
print("%d cpu groups, %d outgroups with %d events total (%d unique) for %d objects, %d dummies" % (
num_groups,
len(self.evgroups) - num_groups,
len(self.evnum),
len(set(self.evnum)),
len(olist),
self.evnum.count("dummy")),
file=sys.stderr)
# fit events into available counters
def schedule(self, olist):
# sort objects by level and inside each level by num-counters
solist = sorted(olist, key=lambda x: (x.level, x.nc))
# try to fit each objects events into groups
# that fit into the available CPU counters
for obj in solist:
obj_debug_print(obj, "schedule %s " % obj.name)
evnum = obj.evnum
evlevels = obj.evlevels
oe = [e in ectx.outgroup_events for e in obj.evnum]
if any(oe):
# add events outside group separately
og_evnum = list(compress(obj.evnum, oe))
self.add_outgroup(obj, og_evnum)
if all(oe):
continue
# keep other events
ie = not_list(oe)
evlevels = list(compress(obj.evlevels, ie))
evnum = list(compress(obj.evnum, ie))
self.add(obj, evnum, evlevels)
if args.no_multiplex or distribute_uncore:
self.evgroups = do_distribute_uncore(self.evgroups)
if not KEEP_UNREF:
self.dummy_unreferenced(olist)
self.allocate_bases()
if args.print_group:
for g in self.evgroups:
print_group(g)
gen_res_map(olist)
if args.print_group:
self.print_group_summary(olist)
def should_print_obj(obj, match, thresh_mg, bn):
assert not isinstance(obj.val, DummyArith)
if obj.val is None:
return False
if obj.thresh or obj.metric or args.verbose:
if not match(obj):
return False
elif args.only_bottleneck and obj != bn:
if args.node_metrics and 'group_select' in obj.__dict__ and get_mg(obj) & get_mg(bn):
return True
if args.bottlenecks and 'area' in obj.__dict__ and obj.area == "Bottleneck":
return True
# XXX handle more explicit options like metrics?
if 'forced' in obj.__dict__ and obj.forced:
return True
elif obj.metric:
if args.node_metrics and 'group_select' in obj.__dict__ and not (get_mg(obj) & thresh_mg):
return False
if args.verbose or (obj.metric and obj.thresh and obj.val != 0.0):
return True
elif check_ratio(obj.val): # somewhat redundant
thresh_mg |= get_mg(obj) - tma_mgroups
return True
return False
def get_uval(ob):
u = ob.val if isinstance(ob.val, UVal) else UVal(ob.name, ob.val)
u.name = ob.name
return u
# pre compute column lengths
def compute_column_lengths(olist, out):
for obj in olist:
out.set_hdr(full_name(obj), obj_area(obj))
if obj.metric:
out.set_unit(metric_unit(obj))
else:
out.set_unit(node_unit(obj))
out.set_below(node_below(obj))
class Printer(object):
"""Print measurements while accumulating some metadata."""
def __init__(self, metricgroups):
self.sample_obj = set()
self.bottlenecks = set()
self.numprint = 0
self.metricgroups = metricgroups
def print_res(self, olist, out, timestamp, title, match, bn, idlemark=False):
if bn:
self.bottlenecks.add(bn)
out.logf.flush()
# determine all objects to print
thresh_mg = set() # type: Set[str]
olist = [o for o in olist if should_print_obj(o, match, thresh_mg, bn)]
# sort by metric group
olist = olist_by_metricgroup(olist, self.metricgroups)
compute_column_lengths(olist, out)
# step 3: print
for i, obj in enumerate(olist):
val = get_uval(obj)
if has(obj, 'maxval') and obj.maxval is not None and obj.maxval != 0:
maxval = UVal(obj.name, obj.maxval)
val = min(val, maxval)
desc = obj_desc_runtime(obj, olist[i + 1:], bn)
if obj.metric:
out.metric(obj_area(obj), obj.name, val, timestamp,
desc,
title,
metric_unit(obj),
idlemark)
else:
out.ratio(obj_area(obj),
full_name(obj), val, timestamp,
"%" + node_unit(obj),
desc,
title,
sample_desc(obj.sample, self.emap) if has(obj, 'sample') else None,
"<==" if obj == bn else "",
node_below(obj),
idlemark)
if obj.thresh or args.verbose:
self.sample_obj.add(obj)
self.numprint += 1
def init_emap(self, emap):
self.emap = emap
# check nodes argument for typos
def check_nodes(runner_list, nodesarg):
def opt_obj_name(s):
if s[:1] in ('+', '^', '-'):
s = s[1:]
if "/" in s:
s = s[:s.index("/")]
if s.endswith("^"):
s = s[:-1]
return s
if nodesarg[:1] == "!":
nodesarg = nodesarg[1:]
options = [opt_obj_name(s) for s in nodesarg.split(",")]
def valid_node(s):
if s == "":
return True
for r in runner_list:
if s in r.odict:
return True
for k in r.olist:
if fnmatch(k.name, s) or fnmatch(full_name(k), s):
return True
return False
valid = list(map(valid_node, options))
if not all(valid):
sys.exit("Unknown node(s) in --nodes: " +
" ".join([o for o, v in zip(options, valid) if not v]))
class Runner(object):
"""Handle measurements of event groups. Map events to groups."""
def reset(self):
self.stat = ComputeStat(args.quiet)
self.olist = []
self.idle_keys = set()
self.sched = Scheduler()
self.printer = Printer(self.metricgroups)
def __init__(self, max_level, idle_threshold, kernel_version, pmu=None):
# always needs to be filtered by olist:
self.metricgroups = defaultdict(list)
self.reset()
self.odict = {}
self.max_level = max_level
self.max_node_level = 0
self.idle_threshold = idle_threshold
self.ectx = EventContext(pmu)
self.pmu = pmu
self.full_olist = []
self.cpu_list = [] # type: List[int]
self.kernel_version = kernel_version
def set_ectx(self):
global ectx
ectx = self.ectx
def clear_ectx(self):
# confuses the type checker
#global ectx
#ectx = None
pass
def do_run(self, obj):
obj.res = None
obj.res_map = {}
obj.group_map = {}
self.olist.append(obj)
self.full_olist.append(obj)
self.odict[obj.name] = obj
if has(obj, 'metricgroup'):
for j in sorted(obj.metricgroup):
self.metricgroups[j].append(obj)
self.max_node_level = max(self.max_node_level, obj.level)
# remove unwanted nodes after their parent relationship has been set up
def filter_nodes(self):
add_met, remove_met = parse_metric_group(args.metric_group, self.metricgroups)
add_obj = {self.odict[x] for x in add_met}
parents = [get_parents(x) for x in add_obj]
if add_obj:
for o in self.olist:
if safe_ref(o, 'sibling') is None:
continue
m = set(o.sibling) & add_obj
for s in m:
parents.append(s)
parents += get_parents(s)
self.sibmatch = set() # type: Set[Any]
mgroups = set() # type: Set[str]
def want_node(obj, mgroups, tma_mgroups):
area = safe_ref(obj, 'area')
if args.no_uncore and area == "Info.System":
return False
if args.areas and area and any([fnmatch(area, p) for p in args.areas.split(",")]):
return True
if args.bottlenecks and area == "Info.Bottleneck":
return True
want = ((obj.metric and args.metrics) or
(('force_metric' in obj.__dict__) and obj.force_metric) or
obj.name in add_met or
obj in parents) and obj.name not in remove_met
if not obj.metric and obj.level <= self.max_level:
want = True
want = node_filter(obj, want, self.sibmatch, mgroups)
mg = get_mg(obj)
tma_mgroups |= set([x for x in mg if x.startswith("Tma")])
if args.node_metrics and want and not obj.metric:
mgroups |= set(mg) - tma_mgroups
return want
for x in self.olist:
if 'forced' in x.__dict__:
x.forced = False
# this updates sibmatch
fmatch = [want_node(x, mgroups, tma_mgroups) for x in self.olist]
def select_node(obj):
if obj in self.sibmatch:
return True
if get_mg(obj) & mgroups:
obj.group_select = True
return True
return False
# now keep what is both in fmatch and sibmatch and mgroups
# assume that mgroups matches do not need propagation
self.olist = [obj for obj, fil in zip(self.olist, fmatch) if fil or select_node(obj)]
def setup_children(self):
for obj in self.olist:
if not obj.metric and 'parent' in obj.__dict__ and obj.parent:
obj.parent.children.append(obj)
def reset_thresh(self):
for obj in self.olist:
obj.thresh = Undef
def run(self, obj):
obj.thresh = False
obj.metric = False
obj.children = []
self.do_run(obj)
def metric(self, obj):
obj.thresh = Undef
obj.metric = True
obj.level = 0
obj.sibling = None
self.do_run(obj)
def force_metric(self, obj):
obj.force_metric = True
self.metric(obj)
# collect the events by pre-computing the equation
def collect(self):
self.set_ectx()
bad_nodes = set()
bad_events = set()
unsup_nodes = set()
errata_nodes = set() # type: Set[Any]
errata_warn_nodes = set() # type: Set[Any]
errata_names = set() # type: Set[str]
errata_warn_names = set() # type: Set[str]
min_kernel = [] # type: List[int]
for obj in self.olist:
obj.evlevels = []
obj.compute(lambda ev, level: ev_collect(ev, level, obj))
obj.val = None
obj.evlist = [x[0] for x in obj.evlevels]
obj.evnum = raw_events(obj.evlist, initialize=True)
obj.nc = needed_counters(dedup(obj.evnum))
# work arounds for lots of different problems
unsup = [x for x in obj.evlist if unsup_event(x, unsup_events, self.kernel_version, min_kernel)]
if any(unsup):
bad_nodes.add(obj)
bad_events |= set(unsup)
unsup = [x for x in obj.evlist if missing_pmu(x)]
if any(unsup):
unsup_nodes.add(obj)
query_errata(obj, ectx.errata_events, errata_nodes, errata_names)
query_errata(obj, ectx.errata_warn_events, errata_warn_nodes, errata_warn_names)
if bad_nodes:
if args.force_events:
pwrap_not_quiet("warning: Using --force-events. Nodes: " +
" ".join([x.name for x in bad_nodes]) + " may be unreliable")
else:
if not args.quiet:
pwrap("warning: removing " +
" ".join([x.name for x in bad_nodes]) +
" due to unsupported events in kernel: " +
" ".join(sorted(bad_events)), 80, "")
if min_kernel:
print("Fixed in kernel %d" % (sorted(min_kernel, key=kv_to_key, reverse=True)[0]),
file=sys.stderr)
print("Use --force-events to override (may result in wrong measurements)",
file=sys.stderr)
self.olist = [x for x in self.olist if x not in bad_nodes]
if unsup_nodes:
pwrap_not_quiet("Nodes " + " ".join(x.name for x in unsup_nodes) + " has unsupported PMUs")
self.olist = [x for x in self.olist if x not in unsup_nodes]
if errata_nodes and not args.ignore_errata:
pwrap_not_quiet("Nodes " + " ".join(x.name for x in errata_nodes) + " have errata " +
" ".join(errata_names) + " and were disabled. " +
"Override with --ignore-errata")
self.olist = [x for x in self.olist if x in errata_nodes]
if errata_warn_nodes and not args.ignore_errata:
pwrap_not_quiet("Nodes " + " ".join(x.name for x in errata_warn_nodes) + " have errata " +
" ".join(errata_warn_names))
self.clear_ectx()
def propagate_siblings(self):
changed = [0]
def propagate(k, changed, srco):
if args.debug:
print("propagate", srco.name, "->", k.name)
if not k.thresh:
k.thresh = True
changed[0] += 1
for obj in self.olist:
if obj in self.sibmatch:
propagate(obj, changed, obj)
if obj.thresh and obj.sibling:
if isinstance(obj.sibling, (list, tuple)):
for k in obj.sibling:
propagate(k, changed, obj)
else:
propagate(obj.sibling, changed, obj)
return changed[0]
def compute(self, res, rev, valstats, env, match, stat, runner_list):
self.set_ectx()
changed = 0
# step 1: compute
for obj in self.olist:
obj.errcount = 0
if not match(obj):
continue
ref = set() # type: Set[int]
oldthresh = obj.thresh
if 'parent' in obj.__dict__ and obj.parent and obj.parent not in self.olist:
obj.parent.thresh = True
obj.compute(lambda e, level:
lookup_res(res, rev, e, obj, env, level, ref, -1, valstats, runner_list))
# compatibility for models that don't set thresh for metrics
if isinstance(obj.thresh, UVal) and obj.name == "Undef":
obj.thresh = True
if args.force_bn and obj.name in args.force_bn:
obj.thresh = True
if obj.thresh != oldthresh and oldthresh != Undef:
changed += 1
if stat:
stat.referenced |= ref
if not obj.res_map and not all([x in env for x in obj.evnum]) and not args.quiet:
print("%s not measured" % (obj.__class__.__name__,), file=sys.stderr)
if not obj.metric and not check_ratio(obj.val):
obj.thresh = False
if stat:
stat.mismeasured.add(obj.name)
if stat and has(obj, 'errcount') and obj.errcount > 0:
if obj.name not in stat.errors:
stat.errcount += obj.errcount
stat.errors.add(obj.name)
stat.referenced |= set(obj.res_map.values())
# step 2: propagate siblings
changed += self.propagate_siblings()
self.clear_ectx()
return changed
def list_metric_groups(self):
if not args.quiet:
print("MetricGroups:")
mg = sorted(self.metricgroups.keys())
if args.quiet or args.csv:
pre = ""
else:
pre = "\t"
for j in mg:
print(pre + j)
def list_nodes(self, title, filt, rest):
def match(rest, n, fn):
return not rest or any([n.startswith(x) or fn.startswith(x) if
not x.endswith("^") else
n == x[:-1] or fn == x[:-1]
for x in rest])
if title and not args.quiet:
print("%s:" % title)
for obj in self.olist:
fn = full_name(obj)
if args.csv:
pre, sep, dsep = "", "\n", ""
elif args.quiet:
pre, sep, dsep = "", "\n", "\n"
else:
pre, sep, dsep = "\t", "\n", "\n\t"
if filt(obj) and match(rest, obj.name, fn):
print(fn, end=sep)
if not args.no_desc:
print(pre + obj_desc(obj, sep=dsep))
def filter_per_core(self, single_thread, rest):
if ("Slots" not in self.ectx.core_domains and
cpu.ht and
not single_thread and has_core_node(self)):
if not feat.supports_percore:
self.olist = filternot(lambda obj:
safe_ref(obj, 'domain') in self.ectx.core_domains,
self.olist)
else:
rest = add_args(rest, "--percore-show-thread")
return rest
def runner_restart(runner, offset):
emap = runner.printer.emap
runner.reset()
runner.printer.emap = emap
runner.olist = list(runner.full_olist)
for o in runner.olist:
o.group_map = {}
o.res_map = {}
runner.filter_nodes()
runner.collect()
runner.set_ectx()
runner.sched.schedule(runner.olist)
runner.clear_ectx()
runner.sched.offset = offset
offset += len(runner.sched.evnum)
return offset
def runner_init(runner):
runner.setup_children()
runner.filter_nodes()
runner.collect()
def supports_pebs():
if feat.has_max_precise:
return feat.max_precise > 0
return not cpu.hypervisor
def remove_pp(s):
if s.endswith(":pp"):
return s[:-3]
return s
def clean_event(e):
return remove_pp(e).replace(".", "_").replace(":", "_").replace('=','')
def do_sample(sample_obj, rest, count, ret, kernel_version):
samples = [("cycles:pp", "Precise cycles", )]
for obj in sample_obj:
for s in obj.sample:
samples.append((s, obj.name))
# first dedup
samples = dedup(samples)
# now merge objects with the same sample event into one
def sample_event(x):
return x[0]
samples = sorted(samples, key=sample_event)
samples = [(k, "_".join([x[1] for x in g])) for k, g in groupby(samples, key=sample_event)]
# find unsupported events
nsamp = [x for x in samples if not unsup_event(x[0], unsup_events, kernel_version)]
nsamp = [(remove_pp(x[0]), x[1])
if unsup_event(x[0], unsup_pebs, kernel_version) else x
for x in nsamp]
if nsamp != samples:
missing = [x[0] for x in set(samples) - set(nsamp)]
warn("Update kernel to handle sample events:" + "\n".join(missing))
def force_pebs(ev):
return ev in ectx.require_pebs_events
no_pebs = not supports_pebs()
if no_pebs:
for j in nsamp:
# initialize ectx.require_pebs_events
raw_event(j[0], initialize=True)
nnopebs = {x[0] for x in nsamp if force_pebs(x[0])}
if nnopebs and not args.quiet:
for o in nnopebs:
warn_no_assert("sample event %s not (currently) supported in virtualization" % o[0])
nsamp = [x for x in nsamp if x[0] not in nnopebs]
sl = [raw_event(s[0], s[1] + "_" + clean_event(s[0]), period=True) for s in nsamp]
sl = add_filter(sl)
sample = ",".join([x for x in sl if x])
if no_pebs:
sample = re.sub(r'/p+', '/', sample)
sample = re.sub(r':p+', '', sample)
if not args.quiet:
print("Sampling:")
extra_args = args.sample_args.replace("+", "-").split()
perf_data = args.sample_basename
if count is not None:
perf_data += ".%d" % count
sperf = ([feat.perf, "record"] +
extra_args +
["-e", sample, "-o", perf_data] +
[x for x in rest if x not in ("-A", "--percore-show-thread")])
cmd = " ".join(sperf)
if not (args.run_sample and args.quiet):
print(cmd)
if args.run_sample and ret == 0:
ret = os.system(cmd)
if ret:
print("Sampling failed")
sys.exit(1)
if not args.quiet:
print("Run `" + feat.perf + " report%s%s' to show the sampling results" % (
(" -i %s" % perf_data) if perf_data != "perf.data" else "",
" --no-branch-history" if "-b" in extra_args else ""))
def suggest_bottlenecks(runner):
def gen_bn(o):
if o.children:
return "+%s*/%d" % (o.name, o.level + BOTTLENECK_LEVEL_INC)
if 'sibling' in o.__dict__ and o.sibling:
return "+%s^" % full_name(o)
return None
printer = runner.printer
children = [gen_bn(o) for o in printer.bottlenecks]
measured = set([x.name for x in runner.olist])
children = [x for x in children if x and x not in measured]
parents = []
for b in printer.bottlenecks:
parents += ["+" + full_name(o) for o in get_parents(b) if o.name not in measured]
if args.nodes:
children = [x for x in children if x[1:-1] not in args.nodes]
parents = [x for x in parents if x[1:] not in args.nodes]
if children:
mux = ",+MUX" if not (args.metrics or args.all) and (args.nodes is None or "MUX" not in args.nodes) else ""
if not args.quiet:
print("Add%s --nodes '!%s%s' for breakdown." % (
"ing" if args.drilldown else "",
",".join(children + parents),
mux))
if args.drilldown:
if runner.pmu != "cpu" and not args.quiet:
print("Please make sure workload does not move between core types for drilldown", file=sys.stderr)
if args.nodes:
args.nodes += ","
else:
args.nodes = ""
if args.nodes == "" or args.nodes[0] != '!':
args.nodes = "!" + args.nodes
args.nodes += ",".join(children) + mux
return True
return False
def suggest_desc(runner):
def nummatch(n):
return sum([x.name.startswith(n) for x in runner.olist])
printer = runner.printer
print("Run toplev --describe %s to get more information on bottleneck%s%s" % (
" ".join([full_name(x) + "^" if nummatch(x.name) > 1 else x.name + "^" for x in printer.bottlenecks]),
"s" if len(printer.bottlenecks) > 1 else "",
(" for " + runner.pmu.replace("cpu_", "")) if runner.pmu and runner.pmu != "cpu" else ""),
file=sys.stderr)
if not args.run_sample:
print_once("Add --run-sample to find locations")
def sysctl(name):
try:
with open("/proc/sys/" + name.replace(".","/"), "r") as f:
val = int(f.readline())
except IOError:
return 0
return val
def update_cpu(args, cpu):
# check nmi watchdog
# XXX need to get this state from CSV import
if sysctl("kernel.nmi_watchdog") != 0 or os.getenv("FORCE_NMI_WATCHDOG"):
# XXX should probe if nmi watchdog runs on fixed or generic counter
for j in cpu.counters.keys():
cpu.counters[j] -= 1 # FIXME
if not args.quiet and not args.import_:
print("Consider disabling nmi watchdog to minimize multiplexing", file=sys.stderr)
print("(echo 0 | sudo tee /proc/sys/kernel/nmi_watchdog or\n echo kernel.nmi_watchdog=0 >> /etc/sysctl.conf ; sysctl -p as root)", file=sys.stderr)
for j in cpu.counters.keys():
cpu.counters[j] -= args.reserved_counters
if cpu.cpu is None:
sys.exit("Unsupported CPU model %s %d" % (cpu.vendor, cpu.model,))
def get_kernel():
kv = os.getenv("KERNEL_VERSION")
if not kv:
kv = platform.release()
return kv_to_key(list(map(int, kv.split(".")[:2])))
def check_exclusive(args, kernel_version):
if args.exclusive:
if kernel_version < 510:
sys.exit("--exclusive needs kernel 5.10+")
global metrics_own_group
metrics_own_group = False
global run_l1_parallel
run_l1_parallel = False
def ht_warning():
if cpu.ht and not args.quiet:
print("WARNING: HT enabled", file=sys.stderr)
print("Measuring multiple processes/threads on the same core may is not reliable.",
file=sys.stderr)
def setup_metrics(model, pmu):
fmenv = os.getenv("FORCEMETRICS")
ectx.force_metrics = fmenv is not None
if ectx.force_metrics:
try:
ectx.metrics_override = True if int(fmenv if fmenv else "0") else False
except ValueError:
ectx.metrics_override = False
if ectx.force_metrics:
model.topdown_use_fixed = ectx.metrics_override
else:
model.topdown_use_fixed = os.path.exists("/sys/devices/%s/events/topdown-fe-bound" % pmu)
if args.no_group:
model.topdown_use_fixed = False
ectx.core_domains = ectx.core_domains - set(["Slots"])
if ectx.force_metrics:
ectx.slots_available = ectx.metrics_override
else:
ectx.slots_available = os.path.exists("/sys/devices/%s/events/slots" % pmu)
def parse_cpu_list(s):
l = []
for j in s.split(","):
m = re.match(r'(\d+)(-\d+)?', j)
if m is None:
continue
if m.group(2):
for k in range(int(m.group(1)), int(m.group(2)[1:])+1):
l.append(k)
else:
l.append(int(m.group(1)))
return l
def read_cpus(base):
with open(base + "/cpus") as cpus:
return parse_cpu_list(cpus.readline())
return []
def use_cpu(cpu):
if args.core:
return display_core(cpu, True)
if args.cpu:
return cpu in parse_cpu_list(args.cpu)
return True
def get_cpu_list(fn):
return [k for k in read_cpus(fn) if use_cpu(k)]
def init_runner_list(kernel_version):
idle_threshold = init_idle_threshold(args)
runner_list = []
hybrid_pmus = []
hybrid_pmus = glob.glob("/sys/devices/cpu_*")
if args.force_cpu and args.force_cpu not in hybrid_cpus:
hybrid_pmus = hybrid_pmus[:1]
# real hybrid
if hybrid_pmus and cpu.cpu in hybrid_cpus:
for j in hybrid_pmus:
pmuname = os.path.basename(j).replace("cpu_", "")
if args.cputype and pmuname != args.cputype:
continue
cpu_list = get_cpu_list(j)
if len(cpu_list) == 0:
continue
r = Runner(args.level, idle_threshold, kernel_version, pmu=os.path.basename(j))
runner_list.append(r)
r.cpu_list = cpu_list
# hybrid, but faking non hybrid cpu
elif hybrid_pmus:
runner_list = [Runner(args.level, idle_threshold, kernel_version,
pmu= "cpu_atom" if cpu.cpu in atom_hybrid_cpus else "cpu_core")]
runner_list[0].cpu_list = get_cpu_list("/sys/devices/cpu_core")
if len(runner_list[0].cpu_list) == 0:
sys.exit("cpu_core fallback has no cpus")
# no hybrid
else:
r = Runner(args.level, idle_threshold, kernel_version, pmu="cpu")
r.cpu_list = []
runner_list = [r]
if args.all:
assert all([ru.max_level <= args.level for ru in runner_list])
return runner_list
def handle_more_options(args):
if args.single_thread:
cpu.ht = False
if args.quiet:
if not args.desc:
args.no_desc = True
args.no_util = True
def tune_model(model):
if args.tune_model:
for t in args.tune_model:
exec(t)
def init_model(model, runner, pe):
version = model.version
model.print_error = pe
model.check_event = lambda ev: ectx.emap.getevent(ev) is not None
model.Setup(runner)
model.num_cores = len(cpu.coreids) # includes sockets
if cpu.ht:
model.num_threads = 2
if "Errata_Whitelist" in model.__dict__:
ectx.errata_whitelist += model.Errata_Whitelist.split(";")
if "base_frequency" in model.__dict__:
model.base_frequency = cpu.freq * 1000
if "model" in model.__dict__:
model.model = cpu.modelid
if "Num_CPUs" in model.__dict__:
model.Num_CPUs = lambda a, b, c: len(cpu.allcpus)
if args.fp16:
if "FP16" in model.__dict__:
model.FP16 = lambda a, b, c: True
else:
sys.exit("--fp16 option but no support in model")
if args.hbm_only:
if "HBM_Only" in model.__dict__:
model.HBM_Only = lambda a, b, c: True
else:
sys.exit("--hbm-only option but no support in model")
tune_model(model)
return version
def legacy_smt_setup(model):
global smt_mode
if args.thread:
model.ebs_mode = cpu.ht
return
model.smt_enabled = cpu.ht
smt_mode |= cpu.ht
def load_default_retlat():
global ret_latency
if ret_latency is None:
name = cpu.cpu.split("-")[0]
fn = os.path.dirname(os.path.realpath(__file__)) + ("/%s-retlat.json" % name)
try:
ret_latency = json.load(open(fn))["Data"]
except IOError:
sys.exit("Cannot find default ret latency file %s\n" % fn +
"Please generate with representative workload using genretlat -o %s workload" % fn)
def model_setup(runner, cpuname, pe, kernel_version):
global smt_mode
if cpuname == "ivb":
import ivb_client_ratios
model = ivb_client_ratios
legacy_smt_setup(model)
elif cpuname == "ivt":
import ivb_server_ratios
model = ivb_server_ratios
legacy_smt_setup(model)
elif cpuname == "snb":
import snb_client_ratios
model = snb_client_ratios
legacy_smt_setup(model)
elif cpuname == "jkt":
import jkt_server_ratios
model = jkt_server_ratios
legacy_smt_setup(model)
elif cpuname == "hsw":
import hsw_client_ratios
model = hsw_client_ratios
legacy_smt_setup(model)
elif cpuname == "hsx":
import hsx_server_ratios
model = hsx_server_ratios
legacy_smt_setup(model)
elif cpuname == "bdw":
import bdw_client_ratios
model = bdw_client_ratios
legacy_smt_setup(model)
elif cpuname == "bdx":
import bdx_server_ratios
model = bdx_server_ratios
legacy_smt_setup(model)
elif cpuname == "skl":
import skl_client_ratios
model = skl_client_ratios
legacy_smt_setup(model)
elif cpuname == "skx":
import skx_server_ratios
model = skx_server_ratios
legacy_smt_setup(model)
elif cpuname == "clx":
import clx_server_ratios
model = clx_server_ratios
legacy_smt_setup(model)
elif cpuname == "icx":
import icx_server_ratios
icx_server_ratios.smt_enabled = cpu.ht
model = icx_server_ratios
setup_metrics(model, runner.pmu)
# work around kernel constraint table bug in some kernel versions
if kernel_version < 510:
ectx.constraint_fixes["CYCLE_ACTIVITY.STALLS_MEM_ANY"] = "0,1,2,3"
smt_mode = cpu.ht
elif cpu.cpu == "spr":
import spr_server_ratios
spr_server_ratios.smt_enabled = cpu.ht
model = spr_server_ratios
setup_metrics(model, runner.pmu)
smt_mode = cpu.ht
if kernel_version < 670: # expect to be fixed in 6.7
# kernel incorrectly schedules ocr on 0-3 only
ectx.constraint_patterns.append(("OCR.", "0,1,2,3", ))
elif cpu.cpu == "sprmax":
import spr_max_server_ratios
spr_max_server_ratios.smt_enabled = cpu.ht
model = spr_max_server_ratios
setup_metrics(model, runner.pmu)
smt_mode = cpu.ht
if kernel_version < 670: # expect to be fixed in 6.7
# kernel incorrectly schedules ocr on 0-3 only
ectx.constraint_patterns.append(("OCR.", "0,1,2,3", ))
elif cpuname == "icl":
import icl_client_ratios
icl_client_ratios.smt_enabled = cpu.ht
model = icl_client_ratios
setup_metrics(model, runner.pmu)
# work around kernel constraint table bug in some kernel versions
if kernel_version < 510:
ectx.constraint_fixes["CYCLE_ACTIVITY.STALLS_MEM_ANY"] = "0,1,2,3"
smt_mode = cpu.ht
elif cpuname == "tgl":
import icl_client_ratios
icl_client_ratios.smt_enabled = cpu.ht
model = icl_client_ratios
setup_metrics(model, runner.pmu)
if kernel_version < 510:
ectx.constraint_fixes["CYCLE_ACTIVITY.STALLS_MEM_ANY"] = "0,1,2,3"
smt_mode = cpu.ht
elif (cpuname == "adl" and runner.pmu in ("cpu_core", "cpu")) or cpuname == "adl-glc":
import adl_glc_ratios
setup_metrics(adl_glc_ratios, runner.pmu)
adl_glc_ratios.smt_enabled = cpu.ht
model = adl_glc_ratios
smt_mode = cpu.ht
if kernel_version < 670: # expect to be fixed in 6.7
# kernel incorrectly schedules ocr on 0-3 only
ectx.constraint_patterns.append(("OCR.", "0,1,2,3", ))
elif (cpuname == "adl" and runner.pmu == "cpu_atom") or cpuname == "adl-grt":
import adl_grt_ratios
model = adl_grt_ratios
model.use_aux = args.aux
elif (cpuname == "mtl" and runner.pmu in ("cpu_core", "cpu")) or cpuname == "mtl-rwc":
import mtl_rwc_ratios
setup_metrics(mtl_rwc_ratios, runner.pmu)
mtl_rwc_ratios.smt_enabled = cpu.ht
model = mtl_rwc_ratios
ectx.constraint_patterns.append(("OCR.", "0,1,2,3", ))
elif (cpuname == "mtl" and runner.pmu == "cpu_atom") or cpuname == "mtl-cmt":
import mtl_cmt_ratios
model = mtl_cmt_ratios
model.use_aux = args.aux
elif cpuname == "slm":
import slm_ratios
model = slm_ratios
elif cpuname == "knl":
import knl_ratios
knl_ratios.smt_enabled = smt_mode = cpu.ht
model = knl_ratios
elif cpuname == "ehl":
import ehl_ratios
model = ehl_ratios
else:
if not args.quiet:
print("Warning: Unknown CPU model number, falling back to simple model")
ht_warning()
import simple_ratios
model = simple_ratios
return init_model(model, runner, pe)
def runner_emaps(pe, runner_list):
version = ""
for runner in runner_list:
runner.set_ectx()
emap = ocperf.find_emap(pmu=runner.pmu if runner.pmu else "cpu")
if not emap:
ocperf.ocverbose = True
ocperf.find_emap()
sys.exit("Unknown CPU or CPU event map not found (EVENTMAP:%s, model:%d)" %
(os.environ["EVENTMAP"] if "EVENTMAP" in os.environ else "?", cpu.model))
runner.ectx.init_emap(emap)
runner.printer.init_emap(emap)
if version:
version += ", "
version += model_setup(runner, cpu.cpu, pe, runner.kernel_version)
runner.clear_ectx()
return version
def setup_pe():
pe = lambda x: None
if args.debug:
printed_error = set()
def print_err(x):
if x not in printed_error:
print(x)
printed_error.add(x)
pe = lambda e: print_err(e)
return pe
def handle_misc_options(args, version):
if args.version:
print("toplev, CPU: %s, TMA version: %s" % (cpu.cpu, version))
sys.exit(0)
if args.gen_script:
args.quiet = True
if args.subset:
if not args.import_:
sys.exit("--subset requires --import mode")
if args.script_record:
sys.exit("--subset cannot be used with --script-record. Generate temp file with perf stat report -x\\;")
def handle_cmd(args, runner_list, rest):
if args.describe:
args.long_desc = True
if not rest:
sys.exit("No nodes to describe")
for r in runner_list:
r.list_nodes(None, any_node, rest)
if args.list_metrics or args.list_all:
for r in runner_list:
r.list_nodes("Metrics", lambda obj: obj.metric, rest)
if args.list_nodes or args.list_all:
for r in runner_list:
r.list_nodes("Nodes", lambda obj: not obj.metric, rest)
if args.list_metric_groups or args.list_all:
for r in runner_list:
r.list_metric_groups()
if args.list_metric_groups or args.list_metrics or args.list_nodes or args.list_all or args.describe:
if any([x.startswith("-") for x in rest]):
sys.exit("Unknown arguments for --list*/--describe")
sys.exit(0)
def has_core_node(runner):
res = False
runner.set_ectx()
for o in runner.olist:
if core_node(o):
res = True
break
runner.clear_ectx()
return res
def any_core_node(runner_list):
for r in runner_list:
if has_core_node(r):
return True
return False
def check_root():
if not (os.geteuid() == 0 or sysctl("kernel.perf_event_paranoid") == -1) and not args.quiet:
print("Warning: Needs root or echo -1 > /proc/sys/kernel/perf_event_paranoid",
file=sys.stderr)
def extra_setup_once(runner, rest):
if not args.no_util:
import perf_metrics
perf_metrics.Setup(runner)
if args.power and feat.supports_power:
import power_metrics
power_metrics.Setup(runner)
if args.sw:
import linux_metrics
linux_metrics.Setup(runner)
if args.power and feat.supports_power:
if not args.quiet and not args.import_ and not args.print:
print("Running with --power. Will measure complete system.")
if args.single_thread:
print("--single-thread conflicts with --power")
check_root()
rest = add_args(rest, "-a")
return rest
def extra_setup(runner):
if args.tsx and cpu.has_tsx and cpu.cpu in tsx_cpus and runner.pmu in ("cpu", "cpu_core"):
import tsx_metrics
tsx_metrics.Setup(runner)
if args.frequency:
import frequency
frequency.SetupCPU(runner, cpu)
def runner_extra_init(args, rest, runner_list):
rest = extra_setup_once(runner_list[0], rest)
for r in runner_list:
extra_setup(r)
if args.nodes:
check_nodes(runner_list, args.nodes)
for r in runner_list:
r.setup_children()
return rest
def runner_filter(args, rest, runner_list):
for r in runner_list:
rest = r.filter_per_core(args.single_thread, rest)
return rest
def update_smt(args, rest):
if not smt_mode and not args.single_thread and not args.no_aggr:
hybrid = cpu.cpu in hybrid_cpus
multi = output_count()
if multi > 0:
rest = add_args(rest, "-a")
if (multi > 1 or args.per_thread) and not hybrid:
args.no_aggr = True
if args.per_socket and multi == 1 and not hybrid:
rest = add_args(rest, "--per-socket")
if args.per_core and multi == 1 and not hybrid:
rest = add_args(rest, "--per-core")
return rest
def runner_node_filter(runner_list):
for r in runner_list:
r.filter_nodes()
def update_smt_mode(runner_list):
if smt_mode and not os.getenv('FORCEHT'):
# do not need SMT mode if no objects have Core scope
if not any_core_node(runner_list):
return False
return smt_mode
def check_full_system(args, rest):
full_system = False
if not args.single_thread and smt_mode:
if not args.quiet and not args.import_:
print("Will measure complete system.")
if smt_mode:
if args.cpu:
print("Warning: --cpu/-C mode with HyperThread must specify all core thread pairs!",
file=sys.stderr)
if args.pid:
sys.exit("-p/--pid mode not compatible with SMT. Use sleep in global mode.")
check_root()
rest = add_args(rest, "-a")
args.no_aggr = True
full_system = True
else:
full_system = args.no_aggr or "--per-core" in rest or "--per-socket" in rest
if args.no_aggr:
rest = add_args(rest, "-A")
return full_system, rest
output_numcpus = False
def init_perf_output(args, rest, full_system):
if (args.perf_output or args.perf_summary) and not args.no_csv_header:
ph = []
if args.interval:
ph.append("Timestamp")
if full_system:
ph.append("Location")
if ("--per-socket" in rest or "--per-core" in rest) and not args.no_aggr:
ph.append("Num-CPUs")
global output_numcpus
output_numcpus = True
ph += ["Value", "Unit", "Event", "Run-Time", "Enabled", "", ""]
if args.perf_output:
args.perf_output.write(";".join(ph) + "\n")
if args.perf_summary:
args.perf_summary.write(";".join(ph) + "\n")
def setup_cpus(args, rest, cpu, runner_list):
if args.cpu:
allcpus = parse_cpu_list(args.cpu)
else:
allcpus = cpu.allcpus
if args.core:
allowed_threads = [x for x in allcpus if display_core(x, False)]
allowed_cores = [x for x in allcpus if display_core(x, True)]
rest = ["-C", ",".join(["%d" % x for x in allowed_cores])] + rest
else:
allowed_threads = allcpus
if len(runner_list) > 1 and args.no_aggr and not runner_list[0].cpu_list: # XXX
cores = list(sorted(cpu.coreids.keys()))
part = len(cores)//len(runner_list)
start = 0
for r in runner_list:
r.cpu_list = sorted(flatten([cpu.coreids[x] for x in cores[start:start+part]]))
start += part
else:
for r in runner_list:
if r.cpu_list:
r.cpu_list = sorted(list(set(r.cpu_list) & set(allowed_threads)))
else:
r.cpu_list = list(allowed_threads)
return rest
def init_output(args, version):
if args.json:
if args.csv:
sys.exit("Cannot combine --csv with --json")
if args.columns:
sys.exit("Cannot combine --columns with --json")
out = tl_output.OutputJSON(args.output, args.csv, args, version, cpu) # type: tl_output.Output
elif args.csv:
if args.columns:
out = tl_output.OutputColumnsCSV(args.output, args.csv, args, version, cpu)
else:
out = tl_output.OutputCSV(args.output, args.csv, args, version, cpu)
elif args.columns:
out = tl_output.OutputColumns(args.output, args, version, cpu)
else:
out = tl_output.OutputHuman(args.output, args, version, cpu)
return out
def init_valcsv(out, args):
if args.valcsv:
out.valcsv = csv.writer(args.valcsv, lineterminator='\n', delimiter=';')
if not args.no_csv_header:
out.valcsv.writerow(("Timestamp", "CPU", "Group", "Event", "Value",
"Perf-event", "Index", "STDDEV", "MULTI", "Nodes"))
# XXX use runner_restart
def runner_first_init(args, runner_list):
nnodes = 0
for r in runner_list:
runner_init(r)
nnodes += len(r.olist)
if nnodes == 0:
sys.exit("No nodes enabled")
if args.nodes:
check_nodes(runner_list, args.nodes)
offset = 0
for r in runner_list:
r.set_ectx()
r.sched.schedule(r.olist)
r.sched.offset = offset
offset += len(r.sched.evnum)
r.clear_ectx()
def suggest(runner):
printer = runner.printer
if printer.bottlenecks and not args.quiet:
suggest_desc(runner)
if args.level < runner.max_node_level and printer.bottlenecks:
return suggest_bottlenecks(runner)
return False
def measure_and_sample(runner_list, count, out, orig_smt_mode, rest, full_system):
rrest = rest
while True:
summary = Summary()
try:
if args.no_multiplex and not args.import_:
ret = execute_no_multiplex(runner_list, out, rrest, summary)
else:
ret = execute(runner_list, out, rrest, summary)
except KeyboardInterrupt:
ret = 1
print_summary(summary, out, runner_list, full_system)
repeat = False
for runner in runner_list:
runner.stat.compute_errors()
repeat |= suggest(runner)
if (args.show_sample or args.run_sample) and ret == 0:
for runner in runner_list:
runner.set_ectx()
do_sample(runner.printer.sample_obj, rest, count, ret, runner.kernel_version)
runner.clear_ectx()
if 100 <= ret <= 200 and repeat:
print("Perf or workload appears to have failed with error %d. Not drilling down" % ret,
file=sys.stderr)
break
if count is not None:
count += 1
if repeat:
if not args.quiet:
print("Rerunning workload", file=sys.stderr)
offset = 0
nnodes = 0
for r in runner_list:
offset = runner_restart(r, offset)
nnodes += len(r.olist)
global smt_mode
smt_mode = orig_smt_mode
if smt_mode and not os.getenv('FORCEHT'):
if not any_core_node(runner_list):
smt_mode = False
# XXX do all checks for incompatible arguments like top level
if smt_mode and not args.single_thread:
check_root()
rrest = add_args(rrest, "-a")
rrest = add_args(rrest, "-A")
full_system = True
if nnodes == 0:
sys.exit("No nodes enabled")
else:
break
return ret, count, full_system
def report_idle(runner_list):
ik = set()
for r in runner_list:
ik |= r.idle_keys
if ik and not args.quiet:
print("Idle CPUs %s may have been hidden. Override with --idle-threshold 100" %
idle_range_list(ik), file=sys.stderr)
def report_not_supported(runner_list):
notfound_caches = {}
for r in runner_list:
notfound_caches.update(r.ectx.notfound_cache)
if notfound_caches and any(["not supported" not in x for x in notfound_caches.values()]) and not args.quiet:
print("Some events not found. Consider running event_download to update event lists", file=sys.stderr)
def measure(out, orig_smt_mode, rest, runner_list, full_system):
if args.sample_repeat:
cnt = 1
for j in range(args.sample_repeat):
ret, cnt, full_system = measure_and_sample(runner_list, cnt, out, orig_smt_mode,
rest, full_system)
if ret:
break
else:
ret, count, full_system = measure_and_sample(runner_list, 0 if args.drilldown else None, out,
orig_smt_mode, rest, full_system)
return ret
def idle_range_list(l):
if all([x.isdigit() for x in l]):
# adapted from https://stackoverflow.com/questions/2154249/identify-groups-of-continuous-numbers-in-a-list
def get_range(g):
group = [x[1] for x in g]
if len(group) == 1:
return "%d" % group[0]
return "%d-%d" % (group[0], group[-1])
l = [get_range(g) for k, g in groupby(enumerate(sorted([int(x) for x in l])), lambda x: x[0] - x[1])]
return ",".join(l)
def finish_graph(graphp):
if args.graph:
args.output.close()
graphp.wait()
def main(args, rest, feat, env, cpu):
pversion = ocperf.PerfVersion()
handle_parallel(args, env)
rest = handle_rest(args, rest)
open_output_files(args)
update_args2(args)
graphp = handle_graph(args)
args.ring_filter = init_ring_filter(args)
update_args_cpu(args, pversion)
update_cpu(args, cpu)
kernel_version = get_kernel()
check_exclusive(args, kernel_version)
runner_list = init_runner_list(kernel_version)
global KEEP_UNREF
if len(runner_list) > 1 and isinstance(KEEP_UNREF, bool):
KEEP_UNREF = True # for now -- dummy can get assigned to wrong runner
global INAME
global FUZZYINPUT
if len(runner_list) > 1 and (INAME or FUZZYINPUT):
sys.exit("INAME and FUZZYINPUT do not support hybrid")
handle_more_options(args)
version = runner_emaps(setup_pe(), runner_list)
handle_misc_options(args, version)
handle_cmd(args, runner_list, rest)
rest = runner_extra_init(args, rest, runner_list)
rest = runner_filter(args, rest, runner_list)
rest = update_smt(args, rest)
runner_node_filter(runner_list)
global smt_mode
orig_smt_mode = smt_mode
smt_mode = update_smt_mode(runner_list)
full_system, rest = check_full_system(args, rest)
init_perf_output(args, rest, full_system)
rest = setup_cpus(args, rest, cpu, runner_list)
if args.pinned:
run_l1_parallel = True
out = init_output(args, version)
init_valcsv(out, args)
runner_first_init(args, runner_list)
if args.repl:
import code
code.interact(banner='toplev repl', local=locals())
sys.exit(0)
ret = measure(out, orig_smt_mode, rest, runner_list, full_system)
out.print_footer()
out.flushfiles()
if args.xlsx and ret == 0:
ret = do_xlsx(env, args)
report_idle(runner_list)
report_not_supported(runner_list)
finish_graph(graphp)
sys.exit(ret)
if __name__ == '__main__':
# these are top level to avoid globals, which break the type checker
# alternative would be to pass them everywhere, but that would be tedious
args, rest_ = init_args()
feat = PerfFeatures(args)
ectx = EventContextBase() # only for type checker
# allow tune to override toplevel without global
if args.tune:
for t in args.tune:
exec(t)
env_ = tl_cpu.Env()
update_args(args, env_)
# XXX move into ectx
cpu = tl_cpu.CPU(known_cpus, nocheck=event_nocheck(), env=env_)
main(args, rest_, feat, env_, cpu)
| 175,261 | Python | .py | 4,179 | 32.340512 | 193 | 0.574306 | andikleen/pmu-tools | 1,984 | 331 | 178 | GPL-2.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,890 | mtl_rwc_ratios.py | andikleen_pmu-tools/mtl_rwc_ratios.py | # -*- coding: latin-1 -*-
#
# auto generated TopDown/TMA 4.8-full-perf description for Intel 14th gen Core (code name Meteor Lake) with Redwood Cove
# Please see http://ark.intel.com for more details on these CPUs.
#
# References:
# http://bit.ly/tma-ispass14
# http://halobates.de/blog/p/262
# https://sites.google.com/site/analysismethods/yasin-pubs
# https://download.01.org/perfmon/
# https://github.com/andikleen/pmu-tools/wiki/toplev-manual
#
# Helpers
print_error = lambda msg: False
smt_enabled = False
ebs_mode = False
version = "4.8-full-perf"
base_frequency = -1.0
Memory = 0
Average_Frequency = 0.0
num_cores = 1
num_threads = 1
num_sockets = 1
topdown_use_fixed = False
def handle_error(obj, msg):
print_error(msg)
obj.errcount += 1
obj.val = 0
obj.thresh = False
def handle_error_metric(obj, msg):
print_error(msg)
obj.errcount += 1
obj.val = 0
# Constants
Exe_Ports = 12
Mem_L2_Store_Cost = 10
Mem_STLB_Hit_Cost = 7
MS_Switches_Cost = 3
Avg_Assist_Cost = ( 99 *3 + 63 + 30 ) / 5
Pipeline_Width = 6
OneMillion = 1000000
OneBillion = 1000000000
Energy_Unit = 61
PERF_METRICS_MSR = 1
DS = 0
# Aux. formulas
def Br_DoI_Jumps(self, EV, level):
return EV("BR_INST_RETIRED.NEAR_TAKEN", level) - EV("BR_INST_RETIRED.COND_TAKEN", level) - 2 * EV("BR_INST_RETIRED.NEAR_CALL", level)
def Branching_Retired(self, EV, level):
return (EV("BR_INST_RETIRED.ALL_BRANCHES", level) + 2 * EV("BR_INST_RETIRED.NEAR_CALL", level) + EV("INST_RETIRED.NOP", level)) / SLOTS(self, EV, level)
def Serialize_Core(self, EV, level):
return self.Core_Bound.compute(EV) * (self.Serializing_Operation.compute(EV) + EV("RS.EMPTY:u1", level) / CLKS(self, EV, level) * self.Ports_Utilized_0.compute(EV)) / (self.Serializing_Operation.compute(EV) + self.Ports_Utilization.compute(EV) + self.Divider.compute(EV))
def Umisp(self, EV, level):
return 10 * self.Microcode_Sequencer.compute(EV) * self.Other_Mispredicts.compute(EV) / self.Branch_Mispredicts.compute(EV)
def Assist(self, EV, level):
return (self.Microcode_Sequencer.compute(EV) / (self.Microcode_Sequencer.compute(EV) + self.Few_Uops_Instructions.compute(EV))) * (self.Assists.compute(EV) / self.Microcode_Sequencer.compute(EV))
def Assist_Frontend(self, EV, level):
return (1 - EV("INST_RETIRED.REP_ITERATION", level) / EV("UOPS_RETIRED.MS:c1", level)) * (self.Fetch_Latency.compute(EV) * (self.MS_Switches.compute(EV) + self.Branch_Resteers.compute(EV) * (self.Clears_Resteers.compute(EV) + self.Mispredicts_Resteers.compute(EV) * self.Other_Mispredicts.compute(EV) / self.Branch_Mispredicts.compute(EV)) / (self.Clears_Resteers.compute(EV) + self.Unknown_Branches.compute(EV) + self.Mispredicts_Resteers.compute(EV))) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV)))
def Assist_Retired(self, EV, level):
return Assist(self, EV, level) * self.Heavy_Operations.compute(EV)
def Core_Bound_Cycles(self, EV, level):
return self.Ports_Utilized_0.compute(EV) * CLKS(self, EV, level) + Few_Uops_Executed_Threshold(self, EV, level)
def DurationTimeInSeconds(self, EV, level):
return EV("interval-ms", 0) / 1000
def Execute_Cycles(self, EV, level):
return (EV("UOPS_EXECUTED.CORE_CYCLES_GE_1", level) / 2) if smt_enabled else EV("UOPS_EXECUTED.THREAD:c1", level)
# factor used for metrics associating fixed costs for FB Hits - according to probability theory if all FB Hits come at a random rate in original L1_Miss cost interval then the average cost for each one is 0.5 of the fixed cost
def FB_Factor(self, EV, level):
return 1 + FBHit_per_L1Miss(self, EV, level) / 2
def FBHit_per_L1Miss(self, EV, level):
return EV("MEM_LOAD_RETIRED.FB_HIT", level) / EV("MEM_LOAD_RETIRED.L1_MISS", level)
def Fetched_Uops(self, EV, level):
return EV("UOPS_ISSUED.ANY", level)
def Few_Uops_Executed_Threshold(self, EV, level):
return EV("EXE_ACTIVITY.1_PORTS_UTIL", level) + self.Retiring.compute(EV) * EV("EXE_ACTIVITY.2_PORTS_UTIL:u0xc", level)
# Floating Point computational (arithmetic) Operations Count
def FLOP_Count(self, EV, level):
return EV("FP_ARITH_INST_RETIRED.SCALAR", level) + 2 * EV("FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", level) + 4 * EV("FP_ARITH_INST_RETIRED.4_FLOPS", level) + 8 * EV("FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE", level)
# Floating Point computational (arithmetic) Operations Count
def FP_Arith_Scalar(self, EV, level):
return EV("FP_ARITH_INST_RETIRED.SCALAR", level)
# Floating Point computational (arithmetic) Operations Count
def FP_Arith_Vector(self, EV, level):
return EV("FP_ARITH_INST_RETIRED.VECTOR", level)
def HighIPC(self, EV, level):
val = IPC(self, EV, level) / Pipeline_Width
return val
def Light_Ops_Sum(self, EV, level):
return self.FP_Arith.compute(EV) + self.Int_Operations.compute(EV) + self.Memory_Operations.compute(EV) + self.Fused_Instructions.compute(EV) + self.Non_Fused_Branches.compute(EV)
def MEM_Bound_Ratio(self, EV, level):
return EV("MEMORY_ACTIVITY.STALLS_L3_MISS", level) / CLKS(self, EV, level)
def Mem_Lock_St_Fraction(self, EV, level):
return EV("MEM_INST_RETIRED.LOCK_LOADS", level) / EV("MEM_INST_RETIRED.ALL_STORES", level)
def Mispred_Clears_Fraction(self, EV, level):
return self.Branch_Mispredicts.compute(EV) / self.Bad_Speculation.compute(EV)
def ORO_Demand_RFO_C1(self, EV, level):
return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO", level)) , level )
def ORO_DRD_Any_Cycles(self, EV, level):
return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", level)) , level )
def ORO_DRD_BW_Cycles(self, EV, level):
return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.DATA_RD:c4", level)) , level )
def Store_L2_Hit_Cycles(self, EV, level):
return EV("MEM_STORE_RETIRED.L2_HIT", level) * Mem_L2_Store_Cost *(1 - Mem_Lock_St_Fraction(self, EV, level))
def True_XSNP_HitM_Fraction(self, EV, level):
return EV("OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM", level) / (EV("OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM", level) + EV("OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD", level))
def Mem_XSNP_HitM_Cost(self, EV, level):
return 28 * Core_Frequency(self, EV, level)
def Mem_XSNP_Hit_Cost(self, EV, level):
return 27 * Core_Frequency(self, EV, level)
def Mem_XSNP_None_Cost(self, EV, level):
return 12 * Core_Frequency(self, EV, level)
def Mem_L2_Hit_Cost(self, EV, level):
return 3 * Core_Frequency(self, EV, level)
def PERF_METRICS_SUM(self, EV, level):
return (EV("PERF_METRICS.FRONTEND_BOUND", level) / EV("TOPDOWN.SLOTS", level)) + (EV("PERF_METRICS.BAD_SPECULATION", level) / EV("TOPDOWN.SLOTS", level)) + (EV("PERF_METRICS.RETIRING", level) / EV("TOPDOWN.SLOTS", level)) + (EV("PERF_METRICS.BACKEND_BOUND", level) / EV("TOPDOWN.SLOTS", level))
def Retire_Fraction(self, EV, level):
return EV("UOPS_RETIRED.SLOTS", level) / EV("UOPS_ISSUED.ANY", level)
# Retired slots per Logical Processor
def Retired_Slots(self, EV, level):
return self.Retiring.compute(EV) * SLOTS(self, EV, level)
# Number of logical processors (enabled or online) on the target system
def Num_CPUs(self, EV, level):
return num_cores * num_threads if num_cores else(8 + 16 /(2 - smt_enabled))
# A system parameter for dependent-loads (pointer chasing like access pattern) of the workload. An integer fraction in range from 0 (no dependent loads) to 100 (all loads are dependent loads)
def Dependent_Loads_Weight(self, EV, level):
return 20
# Total pipeline cost of Branch Misprediction related bottlenecks
def Mispredictions(self, EV, level):
val = 100 *(1 - Umisp(self, EV, level)) * (self.Branch_Mispredicts.compute(EV) + self.Fetch_Latency.compute(EV) * self.Mispredicts_Resteers.compute(EV) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV)))
self.thresh = (val > 20)
return val
# Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)
def Big_Code(self, EV, level):
val = 100 * self.Fetch_Latency.compute(EV) * (self.ITLB_Misses.compute(EV) + self.ICache_Misses.compute(EV) + self.Unknown_Branches.compute(EV)) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV))
self.thresh = (val > 20)
return val
# Total pipeline cost of instruction fetch bandwidth related bottlenecks (when the front-end could not sustain operations delivery to the back-end)
def Instruction_Fetch_BW(self, EV, level):
val = 100 *(self.Frontend_Bound.compute(EV) - (1 - Umisp(self, EV, level)) * self.Fetch_Latency.compute(EV) * self.Mispredicts_Resteers.compute(EV) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV)) - Assist_Frontend(self, EV, level)) - Big_Code(self, EV, level)
self.thresh = (val > 20)
return val
# Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks
def Cache_Memory_Bandwidth(self, EV, level):
val = 100 *((self.Memory_Bound.compute(EV) * (self.DRAM_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * (self.MEM_Bandwidth.compute(EV) / (self.MEM_Latency.compute(EV) + self.MEM_Bandwidth.compute(EV)))) + (self.Memory_Bound.compute(EV) * (self.L3_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * (self.SQ_Full.compute(EV) / (self.L3_Hit_Latency.compute(EV) + self.Contested_Accesses.compute(EV) + self.SQ_Full.compute(EV) + self.Data_Sharing.compute(EV)))) + (self.Memory_Bound.compute(EV) * (self.L1_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * (self.FB_Full.compute(EV) / (self.Store_Fwd_Blk.compute(EV) + self.L1_Hit_Latency.compute(EV) + self.DTLB_Load.compute(EV) + self.Lock_Latency.compute(EV) + self.Split_Loads.compute(EV) + self.FB_Full.compute(EV)))))
self.thresh = (val > 20)
return val
# Total pipeline cost of external Memory- or Cache-Latency related bottlenecks
def Cache_Memory_Latency(self, EV, level):
val = 100 *((self.Memory_Bound.compute(EV) * (self.DRAM_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * (self.MEM_Latency.compute(EV) / (self.MEM_Latency.compute(EV) + self.MEM_Bandwidth.compute(EV)))) + (self.Memory_Bound.compute(EV) * (self.L3_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * (self.L3_Hit_Latency.compute(EV) / (self.L3_Hit_Latency.compute(EV) + self.Contested_Accesses.compute(EV) + self.SQ_Full.compute(EV) + self.Data_Sharing.compute(EV)))) + (self.Memory_Bound.compute(EV) * self.L2_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) + (self.Memory_Bound.compute(EV) * (self.Store_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * (self.Store_Latency.compute(EV) / (self.Split_Stores.compute(EV) + self.DTLB_Store.compute(EV) + self.Streaming_Stores.compute(EV) + self.Store_Latency.compute(EV) + self.False_Sharing.compute(EV)))) + (self.Memory_Bound.compute(EV) * (self.L1_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * (self.L1_Hit_Latency.compute(EV) / (self.Store_Fwd_Blk.compute(EV) + self.L1_Hit_Latency.compute(EV) + self.DTLB_Load.compute(EV) + self.Lock_Latency.compute(EV) + self.Split_Loads.compute(EV) + self.FB_Full.compute(EV)))))
self.thresh = (val > 20)
return val
# Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)
def Memory_Data_TLBs(self, EV, level):
val = 100 *(self.Memory_Bound.compute(EV) * (self.L1_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * (self.DTLB_Load.compute(EV) / (self.Store_Fwd_Blk.compute(EV) + self.L1_Hit_Latency.compute(EV) + self.DTLB_Load.compute(EV) + self.Lock_Latency.compute(EV) + self.Split_Loads.compute(EV) + self.FB_Full.compute(EV))) + (self.Memory_Bound.compute(EV) * (self.Store_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * (self.DTLB_Store.compute(EV) / (self.Split_Stores.compute(EV) + self.DTLB_Store.compute(EV) + self.Streaming_Stores.compute(EV) + self.Store_Latency.compute(EV) + self.False_Sharing.compute(EV)))))
self.thresh = (val > 20)
return val
# Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors)
def Memory_Synchronization(self, EV, level):
val = 100 *(self.Memory_Bound.compute(EV) * ((self.L3_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * (self.Contested_Accesses.compute(EV) + self.Data_Sharing.compute(EV)) / (self.L3_Hit_Latency.compute(EV) + self.Contested_Accesses.compute(EV) + self.SQ_Full.compute(EV) + self.Data_Sharing.compute(EV)) + (self.Store_Bound.compute(EV) / (self.L1_Bound.compute(EV) + self.L3_Bound.compute(EV) + self.DRAM_Bound.compute(EV) + self.Store_Bound.compute(EV) + self.L2_Bound.compute(EV))) * self.False_Sharing.compute(EV) / ((self.Split_Stores.compute(EV) + self.DTLB_Store.compute(EV) + self.Streaming_Stores.compute(EV) + self.Store_Latency.compute(EV) + self.False_Sharing.compute(EV)) - self.Store_Latency.compute(EV))) + self.Machine_Clears.compute(EV) * (1 - self.Other_Nukes.compute(EV) / (self.Other_Nukes.compute(EV))))
self.thresh = (val > 10)
return val
# Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy.
def Compute_Bound_Est(self, EV, level):
val = 100 *((self.Core_Bound.compute(EV) * self.Divider.compute(EV) / (self.Serializing_Operation.compute(EV) + self.Ports_Utilization.compute(EV) + self.Divider.compute(EV))) + (self.Core_Bound.compute(EV) * (self.Ports_Utilization.compute(EV) / (self.Serializing_Operation.compute(EV) + self.Ports_Utilization.compute(EV) + self.Divider.compute(EV))) * (self.Ports_Utilized_3m.compute(EV) / (self.Ports_Utilized_0.compute(EV) + self.Ports_Utilized_1.compute(EV) + self.Ports_Utilized_2.compute(EV) + self.Ports_Utilized_3m.compute(EV)))))
self.thresh = (val > 20)
return val
# Total pipeline cost of irregular execution (e.g. FP-assists in HPC, Wait time with work imbalance multithreaded workloads, overhead in system services or virtualized environments)
def Irregular_Overhead(self, EV, level):
val = 100 *(Assist_Frontend(self, EV, level) + Umisp(self, EV, level) * self.Branch_Mispredicts.compute(EV) + (self.Machine_Clears.compute(EV) * self.Other_Nukes.compute(EV) / (self.Other_Nukes.compute(EV))) + Serialize_Core(self, EV, level) + Assist_Retired(self, EV, level))
self.thresh = (val > 10)
return val
# Total pipeline cost of remaining bottlenecks in the back-end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls.
def Other_Bottlenecks(self, EV, level):
val = 100 -(Big_Code(self, EV, level) + Instruction_Fetch_BW(self, EV, level) + Mispredictions(self, EV, level) + Cache_Memory_Bandwidth(self, EV, level) + Cache_Memory_Latency(self, EV, level) + Memory_Data_TLBs(self, EV, level) + Memory_Synchronization(self, EV, level) + Compute_Bound_Est(self, EV, level) + Irregular_Overhead(self, EV, level) + Branching_Overhead(self, EV, level) + Useful_Work(self, EV, level))
self.thresh = (val > 20)
return val
# Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA. Examples include function calls; loops and alignments. (A lower bound). Consider Loop Unrolling or function inlining optimizations
def Branching_Overhead(self, EV, level):
val = 100 * Branching_Retired(self, EV, level)
self.thresh = (val > 5)
return val
# Total pipeline cost of "useful operations" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead.
def Useful_Work(self, EV, level):
val = 100 *(self.Retiring.compute(EV) - Branching_Retired(self, EV, level) - Assist_Retired(self, EV, level))
self.thresh = (val > 20)
return val
# Probability of Core Bound bottleneck hidden by SMT-profiling artifacts. Tip: consider analysis with SMT disabled
def Core_Bound_Likely(self, EV, level):
val = 100 *(1 - self.Core_Bound.compute(EV) / self.Ports_Utilization.compute(EV) if self.Core_Bound.compute(EV)< self.Ports_Utilization.compute(EV) else 1) if SMT_2T_Utilization(self, EV, level)> 0.5 else 0
self.thresh = (val > 0.5)
return val
# Instructions Per Cycle (per Logical Processor)
def IPC(self, EV, level):
return EV("INST_RETIRED.ANY", level) / CLKS(self, EV, level)
# Uops Per Instruction
def UopPI(self, EV, level):
val = Retired_Slots(self, EV, level) / EV("INST_RETIRED.ANY", level)
self.thresh = (val > 1.05)
return val
# Uops per taken branch
def UpTB(self, EV, level):
val = Retired_Slots(self, EV, level) / EV("BR_INST_RETIRED.NEAR_TAKEN", level)
self.thresh = val < Pipeline_Width * 1.5
return val
# Cycles Per Instruction (per Logical Processor)
def CPI(self, EV, level):
return 1 / IPC(self, EV, level)
# Per-Logical Processor actual clocks when the Logical Processor is active.
def CLKS(self, EV, level):
return EV("CPU_CLK_UNHALTED.THREAD", level)
# Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward)
def SLOTS(self, EV, level):
return EV("TOPDOWN.SLOTS", level) if topdown_use_fixed else EV("TOPDOWN.SLOTS", level)
# Fraction of Physical Core issue-slots utilized by this Logical Processor
def Slots_Utilization(self, EV, level):
return SLOTS(self, EV, level) / (EV("TOPDOWN.SLOTS:percore", level) / 2) if smt_enabled else 1
# The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of "execute" at rename stage.
def Execute_per_Issue(self, EV, level):
return EV("UOPS_EXECUTED.THREAD", level) / EV("UOPS_ISSUED.ANY", level)
# Instructions Per Cycle across hyper-threads (per physical core)
def CoreIPC(self, EV, level):
return EV("INST_RETIRED.ANY", level) / CORE_CLKS(self, EV, level)
# Floating Point Operations Per Cycle
def FLOPc(self, EV, level):
return FLOP_Count(self, EV, level) / CORE_CLKS(self, EV, level)
# Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to Fused-Multiply Add use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common.
def FP_Arith_Utilization(self, EV, level):
return (EV("FP_ARITH_DISPATCHED.PORT_0", level) + EV("FP_ARITH_DISPATCHED.PORT_1", level) + EV("FP_ARITH_DISPATCHED.PORT_5", level)) / (2 * CORE_CLKS(self, EV, level))
# Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor)
def ILP(self, EV, level):
return EV("UOPS_EXECUTED.THREAD", level) / EV("UOPS_EXECUTED.THREAD:c1", level)
# uops Executed per Cycle
def EPC(self, EV, level):
return EV("UOPS_EXECUTED.THREAD", level) / CLKS(self, EV, level)
# Core actual clocks when any Logical Processor is active on the Physical Core
def CORE_CLKS(self, EV, level):
return EV("CPU_CLK_UNHALTED.DISTRIBUTED", level) if smt_enabled else CLKS(self, EV, level)
# Instructions per Load (lower number means higher occurrence rate). Tip: reduce memory accesses. #Link Opt Guide section: Minimize Register Spills
def IpLoad(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("MEM_INST_RETIRED.ALL_LOADS", level)
self.thresh = (val < 3)
return val
# Instructions per Store (lower number means higher occurrence rate). Tip: reduce memory accesses. #Link Opt Guide section: Minimize Register Spills
def IpStore(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("MEM_INST_RETIRED.ALL_STORES", level)
self.thresh = (val < 8)
return val
# Instructions per Branch (lower number means higher occurrence rate)
def IpBranch(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.ALL_BRANCHES", level)
self.thresh = (val < 8)
return val
# Instructions per (near) call (lower number means higher occurrence rate)
def IpCall(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.NEAR_CALL", level)
self.thresh = (val < 200)
return val
# Instructions per taken branch
def IpTB(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.NEAR_TAKEN", level)
self.thresh = val < Pipeline_Width * 2 + 1
return val
# Branch instructions per taken branch. . Can be used to approximate PGO-likelihood for non-loopy codes.
def BpTkBranch(self, EV, level):
return EV("BR_INST_RETIRED.ALL_BRANCHES", level) / EV("BR_INST_RETIRED.NEAR_TAKEN", level)
# Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate). Reference: Tuning Performance via Metrics with Expectations. https://doi.org/10.1109/LCA.2019.2916408
def IpFLOP(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / FLOP_Count(self, EV, level)
self.thresh = (val < 10)
return val
# Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW.
def IpArith(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / (FP_Arith_Scalar(self, EV, level) + FP_Arith_Vector(self, EV, level))
self.thresh = (val < 10)
return val
# Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.
def IpArith_Scalar_SP(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("FP_ARITH_INST_RETIRED.SCALAR_SINGLE", level)
self.thresh = (val < 10)
return val
# Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.
def IpArith_Scalar_DP(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("FP_ARITH_INST_RETIRED.SCALAR_DOUBLE", level)
self.thresh = (val < 10)
return val
# Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.
def IpArith_AVX128(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / (EV("FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", level) + EV("FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE", level))
self.thresh = (val < 10)
return val
# Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.
def IpArith_AVX256(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / (EV("FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE", level) + EV("FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE", level))
self.thresh = (val < 10)
return val
# Instructions per PAUSE (lower number means higher occurrence rate)
def IpPause(self, EV, level):
return Instructions(self, EV, level) / EV("CPU_CLK_UNHALTED.PAUSE_INST", level)
# Instructions per Software prefetch instruction (of any type: NTA/T0/T1/T2/Prefetch) (lower number means higher occurrence rate)
def IpSWPF(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("SW_PREFETCH_ACCESS.T0:u0xF", level)
self.thresh = (val < 100)
return val
# Total number of retired Instructions
def Instructions(self, EV, level):
return EV("INST_RETIRED.ANY", level)
# Average number of Uops retired in cycles where at least one uop has retired.
def Retire(self, EV, level):
return Retired_Slots(self, EV, level) / EV("UOPS_RETIRED.SLOTS:c1", level)
# Estimated fraction of retirement-cycles dealing with repeat instructions
def Strings_Cycles(self, EV, level):
val = EV("INST_RETIRED.REP_ITERATION", level) / EV("UOPS_RETIRED.SLOTS:c1", level)
self.thresh = (val > 0.1)
return val
# Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate)
def IpAssist(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("ASSISTS.ANY", level)
self.thresh = (val < 100000)
return val
def Execute(self, EV, level):
return EV("UOPS_EXECUTED.THREAD", level) / Execute_Cycles(self, EV, level)
# Average number of uops fetched from LSD per cycle
def Fetch_LSD(self, EV, level):
return EV("LSD.UOPS", level) / EV("LSD.CYCLES_ACTIVE", level)
# Average number of uops fetched from DSB per cycle
def Fetch_DSB(self, EV, level):
return EV("IDQ.DSB_UOPS", level) / EV("IDQ.DSB_CYCLES_ANY", level)
# Average number of uops fetched from MITE per cycle
def Fetch_MITE(self, EV, level):
return EV("IDQ.MITE_UOPS", level) / EV("IDQ.MITE_CYCLES_ANY", level)
# Average number of Uops issued by front-end when it issued something
def Fetch_UpC(self, EV, level):
return EV("UOPS_ISSUED.ANY", level) / EV("UOPS_ISSUED.ANY:c1", level)
# Fraction of Uops delivered by the LSD (Loop Stream Detector; aka Loop Cache)
def LSD_Coverage(self, EV, level):
return EV("LSD.UOPS", level) / Fetched_Uops(self, EV, level)
# Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). See section 'Decoded ICache' in Optimization Manual. http://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-optimization-manual.html
def DSB_Coverage(self, EV, level):
val = EV("IDQ.DSB_UOPS", level) / Fetched_Uops(self, EV, level)
self.thresh = (val < 0.7) and HighIPC(self, EV, 1)
return val
# Average number of cycles the front-end was delayed due to an Unknown Branch detection. See Unknown_Branches node.
def Unknown_Branch_Cost(self, EV, level):
return EV("INT_MISC.UNKNOWN_BRANCH_CYCLES", level) / EV("INT_MISC.UNKNOWN_BRANCH_CYCLES:c1:e1", level)
# Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.
def DSB_Switch_Cost(self, EV, level):
return EV("DSB2MITE_SWITCHES.PENALTY_CYCLES", level) / EV("DSB2MITE_SWITCHES.PENALTY_CYCLES:c1:e1", level)
# Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck.
def DSB_Misses(self, EV, level):
val = 100 *(self.Fetch_Latency.compute(EV) * self.DSB_Switches.compute(EV) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV)) + self.Fetch_Bandwidth.compute(EV) * self.MITE.compute(EV) / (self.LSD.compute(EV) + self.MITE.compute(EV) + self.DSB.compute(EV)))
self.thresh = (val > 10)
return val
# Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck.
def DSB_Bandwidth(self, EV, level):
val = 100 *(self.Frontend_Bound.compute(EV) * (self.Fetch_Bandwidth.compute(EV) / (self.Fetch_Bandwidth.compute(EV) + self.Fetch_Latency.compute(EV))) * (self.DSB.compute(EV) / (self.LSD.compute(EV) + self.MITE.compute(EV) + self.DSB.compute(EV))))
self.thresh = (val > 10)
return val
# Average Latency for L1 instruction cache misses
def ICache_Miss_Latency(self, EV, level):
return EV("ICACHE_DATA.STALLS", level) / EV("ICACHE_DATA.STALLS:c1:e1", level)
# Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck.
def IC_Misses(self, EV, level):
val = 100 *(self.Fetch_Latency.compute(EV) * self.ICache_Misses.compute(EV) / (self.LCP.compute(EV) + self.ICache_Misses.compute(EV) + self.DSB_Switches.compute(EV) + self.Branch_Resteers.compute(EV) + self.MS_Switches.compute(EV) + self.ITLB_Misses.compute(EV)))
self.thresh = (val > 5)
return val
# Instructions per non-speculative DSB miss (lower number means higher occurrence rate)
def IpDSB_Miss_Ret(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("FRONTEND_RETIRED.ANY_DSB_MISS", level)
self.thresh = (val < 50)
return val
# Instructions per speculative Unknown Branch Misprediction (BAClear) (lower number means higher occurrence rate)
def IpUnknown_Branch(self, EV, level):
return Instructions(self, EV, level) / EV("BACLEARS.ANY", level)
# L2 cache true code cacheline misses per kilo instruction
def L2MPKI_Code(self, EV, level):
return 1000 * EV("FRONTEND_RETIRED.L2_MISS", level) / EV("INST_RETIRED.ANY", level)
# L2 cache speculative code cacheline misses per kilo instruction
def L2MPKI_Code_All(self, EV, level):
return 1000 * EV("L2_RQSTS.CODE_RD_MISS", level) / EV("INST_RETIRED.ANY", level)
# Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)
def IpMispredict(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("BR_MISP_RETIRED.ALL_BRANCHES", level)
self.thresh = (val < 200)
return val
# Instructions per retired Mispredicts for conditional non-taken branches (lower number means higher occurrence rate).
def IpMisp_Cond_Ntaken(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("BR_MISP_RETIRED.COND_NTAKEN", level)
self.thresh = (val < 200)
return val
# Instructions per retired Mispredicts for conditional taken branches (lower number means higher occurrence rate).
def IpMisp_Cond_Taken(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("BR_MISP_RETIRED.COND_TAKEN", level)
self.thresh = (val < 200)
return val
# Instructions per retired Mispredicts for return branches (lower number means higher occurrence rate).
def IpMisp_Ret(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("BR_MISP_RETIRED.RET", level)
self.thresh = (val < 500)
return val
# Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).
def IpMisp_Indirect(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("BR_MISP_RETIRED.INDIRECT", level)
self.thresh = (val < 1000)
return val
# Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)
def Branch_Misprediction_Cost(self, EV, level):
return Mispredictions(self, EV, level) * SLOTS(self, EV, level) / EV("BR_MISP_RETIRED.ALL_BRANCHES", level) / 100
# Speculative to Retired ratio of all clears (covering Mispredicts and nukes)
def Spec_Clears_Ratio(self, EV, level):
return EV("INT_MISC.CLEARS_COUNT", level) / (EV("BR_MISP_RETIRED.ALL_BRANCHES", level) + EV("MACHINE_CLEARS.COUNT", level))
# Fraction of branches that are non-taken conditionals
def Cond_NT(self, EV, level):
return EV("BR_INST_RETIRED.COND_NTAKEN", level) / EV("BR_INST_RETIRED.ALL_BRANCHES", level)
# Fraction of branches that are taken conditionals
def Cond_TK(self, EV, level):
return EV("BR_INST_RETIRED.COND_TAKEN", level) / EV("BR_INST_RETIRED.ALL_BRANCHES", level)
# Fraction of branches that are CALL or RET
def CallRet(self, EV, level):
return (EV("BR_INST_RETIRED.NEAR_CALL", level) + EV("BR_INST_RETIRED.NEAR_RETURN", level)) / EV("BR_INST_RETIRED.ALL_BRANCHES", level)
# Fraction of branches that are unconditional (direct or indirect) jumps
def Jump(self, EV, level):
return Br_DoI_Jumps(self, EV, level) / EV("BR_INST_RETIRED.ALL_BRANCHES", level)
# Fraction of branches of other types (not individually covered by other metrics in Info.Branches group)
def Other_Branches(self, EV, level):
return 1 -(Cond_NT(self, EV, level) + Cond_TK(self, EV, level) + CallRet(self, EV, level) + Jump(self, EV, level))
# Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)
def Load_Miss_Real_Latency(self, EV, level):
return EV("L1D_PEND_MISS.PENDING", level) / EV("MEM_LOAD_COMPLETED.L1_MISS_ANY", level)
# Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)
def MLP(self, EV, level):
return EV("L1D_PEND_MISS.PENDING", level) / EV("L1D_PEND_MISS.PENDING_CYCLES", level)
# L1 cache true misses per kilo instruction for retired demand loads
def L1MPKI(self, EV, level):
return 1000 * EV("MEM_LOAD_RETIRED.L1_MISS", level) / EV("INST_RETIRED.ANY", level)
# L1 cache true misses per kilo instruction for all demand loads (including speculative)
def L1MPKI_Load(self, EV, level):
return 1000 * EV("L2_RQSTS.ALL_DEMAND_DATA_RD", level) / EV("INST_RETIRED.ANY", level)
# L2 cache true misses per kilo instruction for retired demand loads
def L2MPKI(self, EV, level):
return 1000 * EV("MEM_LOAD_RETIRED.L2_MISS", level) / EV("INST_RETIRED.ANY", level)
# L2 cache true misses per kilo instruction for all request types (including speculative)
def L2MPKI_All(self, EV, level):
return 1000 * EV("L2_RQSTS.MISS", level) / EV("INST_RETIRED.ANY", level)
# L2 cache true misses per kilo instruction for all demand loads (including speculative)
def L2MPKI_Load(self, EV, level):
return 1000 * EV("L2_RQSTS.DEMAND_DATA_RD_MISS", level) / EV("INST_RETIRED.ANY", level)
# Offcore requests (L2 cache miss) per kilo instruction for demand RFOs
def L2MPKI_RFO(self, EV, level):
return 1000 * EV("L2_RQSTS.RFO_MISS", level) / EV("INST_RETIRED.ANY", level)
# L2 cache hits per kilo instruction for all request types (including speculative)
def L2HPKI_All(self, EV, level):
return 1000 *(EV("L2_RQSTS.REFERENCES", level) - EV("L2_RQSTS.MISS", level)) / EV("INST_RETIRED.ANY", level)
# L2 cache hits per kilo instruction for all demand loads (including speculative)
def L2HPKI_Load(self, EV, level):
return 1000 * EV("L2_RQSTS.DEMAND_DATA_RD_HIT", level) / EV("INST_RETIRED.ANY", level)
# L3 cache true misses per kilo instruction for retired demand loads
def L3MPKI(self, EV, level):
return 1000 * EV("MEM_LOAD_RETIRED.L3_MISS", level) / EV("INST_RETIRED.ANY", level)
# Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries)
def FB_HPKI(self, EV, level):
return 1000 * EV("MEM_LOAD_RETIRED.FB_HIT", level) / EV("INST_RETIRED.ANY", level)
def L1D_Cache_Fill_BW(self, EV, level):
return 64 * EV("L1D.REPLACEMENT", level) / OneBillion / Time(self, EV, level)
def L2_Cache_Fill_BW(self, EV, level):
return 64 * EV("L2_LINES_IN.ALL", level) / OneBillion / Time(self, EV, level)
def L3_Cache_Fill_BW(self, EV, level):
return 64 * EV("LONGEST_LAT_CACHE.MISS", level) / OneBillion / Time(self, EV, level)
def L3_Cache_Access_BW(self, EV, level):
return 64 * EV("OFFCORE_REQUESTS.ALL_REQUESTS", level) / OneBillion / Time(self, EV, level)
# Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses
def Page_Walks_Utilization(self, EV, level):
val = (EV("ITLB_MISSES.WALK_PENDING", level) + EV("DTLB_LOAD_MISSES.WALK_PENDING", level) + EV("DTLB_STORE_MISSES.WALK_PENDING", level)) / (4 * CORE_CLKS(self, EV, level))
self.thresh = (val > 0.5)
return val
# STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)
def Code_STLB_MPKI(self, EV, level):
return 1000 * EV("ITLB_MISSES.WALK_COMPLETED", level) / EV("INST_RETIRED.ANY", level)
# STLB (2nd level TLB) data load speculative misses per kilo instruction (misses of any page-size that complete the page walk)
def Load_STLB_MPKI(self, EV, level):
return 1000 * EV("DTLB_LOAD_MISSES.WALK_COMPLETED", level) / EV("INST_RETIRED.ANY", level)
# STLB (2nd level TLB) data store speculative misses per kilo instruction (misses of any page-size that complete the page walk)
def Store_STLB_MPKI(self, EV, level):
return 1000 * EV("DTLB_STORE_MISSES.WALK_COMPLETED", level) / EV("INST_RETIRED.ANY", level)
# Average per-core data fill bandwidth to the L1 data cache [GB / sec]
def L1D_Cache_Fill_BW_2T(self, EV, level):
return L1D_Cache_Fill_BW(self, EV, level)
# Average per-core data fill bandwidth to the L2 cache [GB / sec]
def L2_Cache_Fill_BW_2T(self, EV, level):
return L2_Cache_Fill_BW(self, EV, level)
# Average per-core data fill bandwidth to the L3 cache [GB / sec]
def L3_Cache_Fill_BW_2T(self, EV, level):
return L3_Cache_Fill_BW(self, EV, level)
# Average per-core data access bandwidth to the L3 cache [GB / sec]
def L3_Cache_Access_BW_2T(self, EV, level):
return L3_Cache_Access_BW(self, EV, level)
# Average Latency for L2 cache miss demand Loads
def Load_L2_Miss_Latency(self, EV, level):
return EV("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD", level) / EV("OFFCORE_REQUESTS.DEMAND_DATA_RD", level)
# Average Latency for L3 cache miss demand Loads
def Load_L3_Miss_Latency(self, EV, level):
return EV("OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD", level) / EV("OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD", level)
# Average Parallel L2 cache miss demand Loads
def Load_L2_MLP(self, EV, level):
return EV("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD", level) / EV("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD:c1", level)
# Average Parallel L2 cache miss data reads
def Data_L2_MLP(self, EV, level):
return EV("OFFCORE_REQUESTS_OUTSTANDING.DATA_RD", level) / EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", level)
# Un-cacheable retired load per kilo instruction
def UC_Load_PKI(self, EV, level):
return 1000 * EV("MEM_LOAD_MISC_RETIRED.UC", level) / EV("INST_RETIRED.ANY", level)
# "Bus lock" per kilo instruction
def Bus_Lock_PKI(self, EV, level):
return 1000 * EV("SQ_MISC.BUS_LOCK", level) / EV("INST_RETIRED.ANY", level)
# Average CPU Utilization (percentage)
def CPU_Utilization(self, EV, level):
return CPUs_Utilized(self, EV, level) / Num_CPUs(self, EV, level)
# Average number of utilized CPUs
def CPUs_Utilized(self, EV, level):
return EV("CPU_CLK_UNHALTED.REF_TSC", level) / EV("msr/tsc/", 0)
# Measured Average Core Frequency for unhalted processors [GHz]
def Core_Frequency(self, EV, level):
return Turbo_Utilization(self, EV, level) * EV("msr/tsc/", 0) / OneBillion / Time(self, EV, level)
# Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width
def GFLOPs(self, EV, level):
return (FLOP_Count(self, EV, level) / OneBillion) / Time(self, EV, level)
# Average Frequency Utilization relative nominal frequency
def Turbo_Utilization(self, EV, level):
return CLKS(self, EV, level) / EV("CPU_CLK_UNHALTED.REF_TSC", level)
# Fraction of cycles where both hardware Logical Processors were active
def SMT_2T_Utilization(self, EV, level):
return 1 - EV("CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE", level) / EV("CPU_CLK_UNHALTED.REF_DISTRIBUTED", level) if smt_enabled else 0
# Fraction of cycles spent in the Operating System (OS) Kernel mode
def Kernel_Utilization(self, EV, level):
val = EV("CPU_CLK_UNHALTED.THREAD_P:SUP", level) / EV("CPU_CLK_UNHALTED.THREAD", level)
self.thresh = (val > 0.05)
return val
# Cycles Per Instruction for the Operating System (OS) Kernel mode
def Kernel_CPI(self, EV, level):
return EV("CPU_CLK_UNHALTED.THREAD_P:SUP", level) / EV("INST_RETIRED.ANY_P:SUP", level)
# Fraction of cycles the processor is waiting yet unhalted; covering legacy PAUSE instruction, as well as C0.1 / C0.2 power-performance optimized states. Sample code of TPAUSE: https://github.com/torvalds/linux/blob/master/arch/x86/lib/delay.c#L105. If running on Linux, please check the power control interface: https://github.com/torvalds/linux/blob/master/arch/x86/kernel/cpu/umwait.c and https://github.com/torvalds/linux/blob/master/Documentation/ABI/testing/sysfs-devices-system-cpu#L587
def C0_Wait(self, EV, level):
val = EV("CPU_CLK_UNHALTED.C0_WAIT", level) / CLKS(self, EV, level)
self.thresh = (val > 0.05)
return val
# Average external Memory Bandwidth Use for reads and writes [GB / sec]
def DRAM_BW_Use(self, EV, level):
return 64 *(EV("UNC_HAC_ARB_TRK_REQUESTS.ALL", level) + EV("UNC_HAC_ARB_COH_TRK_REQUESTS.ALL", level)) / OneBillion / Time(self, EV, level)
# Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches
def MEM_Parallel_Reads(self, EV, level):
return EV("UNC_ARB_DAT_OCCUPANCY.RD", level) / EV("UNC_ARB_DAT_OCCUPANCY.RD:c1", level)
# Total package Power in Watts
def Power(self, EV, level):
return EV("UNC_PKG_ENERGY_STATUS", level) * Energy_Unit /(Time(self, EV, level) * OneMillion )
# Run duration time in seconds
def Time(self, EV, level):
val = EV("interval-s", 0)
self.thresh = (val < 1)
return val
# Socket actual clocks when any core is active on that socket
def Socket_CLKS(self, EV, level):
return EV("UNC_CLOCK.SOCKET", level)
# Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]
def IpFarBranch(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.FAR_BRANCH:USER", level)
self.thresh = (val < 1000000)
return val
# Event groups
class Frontend_Bound:
name = "Frontend_Bound"
domain = "Slots"
area = "FE"
level = 1
htoff = False
sample = ['FRONTEND_RETIRED.LATENCY_GE_4:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['BvFB', 'BvIO', 'TmaL1', 'PGO'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("PERF_METRICS.FRONTEND_BOUND", 1) / EV("TOPDOWN.SLOTS", 1)) / PERF_METRICS_SUM(self, EV, 1) - EV("INT_MISC.UOP_DROPPING", 1) / SLOTS(self, EV, 1) if topdown_use_fixed else(EV("IDQ_BUBBLES.CORE", 1) - EV("INT_MISC.UOP_DROPPING", 1)) / SLOTS(self, EV, 1)
self.thresh = (self.val > 0.15)
except ZeroDivisionError:
handle_error(self, "Frontend_Bound zero division")
return self.val
desc = """
This category represents fraction of slots where the
processor's Frontend undersupplies its Backend. Frontend
denotes the first part of the processor core responsible to
fetch operations that are executed later on by the Backend
part. Within the Frontend; a branch predictor predicts the
next address to fetch; cache-lines are fetched from the
memory subsystem; parsed into instructions; and lastly
decoded into micro-operations (uops). Ideally the Frontend
can issue Pipeline_Width uops every cycle to the Backend.
Frontend Bound denotes unutilized issue-slots when there is
no Backend stall; i.e. bubbles where Frontend delivered no
uops while Backend could have accepted them. For example;
stalls due to instruction-cache misses would be categorized
under Frontend Bound."""
class Fetch_Latency:
name = "Fetch_Latency"
domain = "Slots"
area = "FE"
level = 2
htoff = False
sample = ['FRONTEND_RETIRED.LATENCY_GE_16:pp', 'FRONTEND_RETIRED.LATENCY_GE_8:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['Frontend', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = ((EV("PERF_METRICS.FETCH_LATENCY", 2) / EV("TOPDOWN.SLOTS", 2)) / PERF_METRICS_SUM(self, EV, 2) - EV("INT_MISC.UOP_DROPPING", 2) / SLOTS(self, EV, 2)) if topdown_use_fixed else(EV("IDQ_BUBBLES.CYCLES_0_UOPS_DELIV.CORE", 2) * Pipeline_Width - EV("INT_MISC.UOP_DROPPING", 2)) / SLOTS(self, EV, 2)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Fetch_Latency zero division")
return self.val
desc = """
This metric represents fraction of slots the CPU was stalled
due to Frontend latency issues. For example; instruction-
cache misses; iTLB misses or fetch stalls after a branch
misprediction are categorized under Frontend Latency. In
such cases; the Frontend eventually delivers no uops for
some period."""
class ICache_Misses:
name = "ICache_Misses"
domain = "Clocks"
area = "FE"
level = 3
htoff = False
sample = ['FRONTEND_RETIRED.L2_MISS:pp', 'FRONTEND_RETIRED.L1I_MISS:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['BigFootprint', 'BvBC', 'FetchLat', 'IcMiss'])
maxval = None
def compute(self, EV):
try:
self.val = EV("ICACHE_DATA.STALLS", 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "ICache_Misses zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due to instruction cache misses.. Using compiler's
Profile-Guided Optimization (PGO) can reduce i-cache misses
through improved hot code layout."""
class ITLB_Misses:
name = "ITLB_Misses"
domain = "Clocks"
area = "FE"
level = 3
htoff = False
sample = ['FRONTEND_RETIRED.STLB_MISS:pp', 'FRONTEND_RETIRED.ITLB_MISS:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['BigFootprint', 'BvBC', 'FetchLat', 'MemoryTLB'])
maxval = None
def compute(self, EV):
try:
self.val = EV("ICACHE_TAG.STALLS", 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "ITLB_Misses zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due to Instruction TLB (ITLB) misses.. Consider
large 2M pages for code (selectively prefer hot large-size
function, due to limited 2M entries). Linux options:
standard binaries use libhugetlbfs; Hfsort.. https://github.
com/libhugetlbfs/libhugetlbfs;https://research.fb.com/public
ations/optimizing-function-placement-for-large-scale-data-
center-applications-2/"""
class Branch_Resteers:
name = "Branch_Resteers"
domain = "Clocks"
area = "FE"
level = 3
htoff = False
sample = ['BR_MISP_RETIRED.ALL_BRANCHES']
errcount = 0
sibling = None
metricgroup = frozenset(['FetchLat'])
maxval = None
def compute(self, EV):
try:
self.val = EV("INT_MISC.CLEAR_RESTEER_CYCLES", 3) / CLKS(self, EV, 3) + self.Unknown_Branches.compute(EV)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Branch_Resteers zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due to Branch Resteers. Branch Resteers estimates
the Frontend delay in fetching operations from corrected
path; following all sorts of miss-predicted branches. For
example; branchy code with lots of miss-predictions might
get categorized under Branch Resteers. Note the value of
this node may overlap with its siblings."""
class Mispredicts_Resteers:
name = "Mispredicts_Resteers"
domain = "Clocks"
area = "FE"
level = 4
htoff = False
sample = ['INT_MISC.CLEAR_RESTEER_CYCLES']
errcount = 0
sibling = None
metricgroup = frozenset(['BadSpec', 'BrMispredicts', 'BvMP'])
maxval = None
def compute(self, EV):
try:
self.val = Mispred_Clears_Fraction(self, EV, 4) * EV("INT_MISC.CLEAR_RESTEER_CYCLES", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Mispredicts_Resteers zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due to Branch Resteers as a result of Branch
Misprediction at execution stage."""
class Clears_Resteers:
name = "Clears_Resteers"
domain = "Clocks"
area = "FE"
level = 4
htoff = False
sample = ['INT_MISC.CLEAR_RESTEER_CYCLES']
errcount = 0
sibling = None
metricgroup = frozenset(['BadSpec', 'MachineClears'])
maxval = None
def compute(self, EV):
try:
self.val = (1 - Mispred_Clears_Fraction(self, EV, 4)) * EV("INT_MISC.CLEAR_RESTEER_CYCLES", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Clears_Resteers zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due to Branch Resteers as a result of Machine
Clears."""
class Unknown_Branches:
name = "Unknown_Branches"
domain = "Clocks"
area = "FE"
level = 4
htoff = False
sample = ['FRONTEND_RETIRED.UNKNOWN_BRANCH']
errcount = 0
sibling = None
metricgroup = frozenset(['BigFootprint', 'BvBC', 'FetchLat'])
maxval = None
def compute(self, EV):
try:
self.val = EV("INT_MISC.UNKNOWN_BRANCH_CYCLES", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Unknown_Branches zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due to new branch address clears. These are fetched
branches the Branch Prediction Unit was unable to recognize
(e.g. first time the branch is fetched or hitting BTB
capacity limit) hence called Unknown Branches"""
class MS_Switches:
name = "MS_Switches"
domain = "Clocks_Estimated"
area = "FE"
level = 3
htoff = False
sample = ['FRONTEND_RETIRED.MS_FLOWS']
errcount = 0
sibling = None
metricgroup = frozenset(['FetchLat', 'MicroSeq'])
maxval = 1.0
def compute(self, EV):
try:
self.val = MS_Switches_Cost * EV("UOPS_RETIRED.MS:c1:e1", 3) / Retire_Fraction(self, EV, 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "MS_Switches zero division")
return self.val
desc = """
This metric estimates the fraction of cycles when the CPU
was stalled due to switches of uop delivery to the Microcode
Sequencer (MS). Commonly used instructions are optimized for
delivery by the DSB (decoded i-cache) or MITE (legacy
instruction decode) pipelines. Certain operations cannot be
handled natively by the execution pipeline; and must be
performed by microcode (small programs injected into the
execution stream). Switching to the MS too often can
negatively impact performance. The MS is designated to
deliver long uop flows required by CISC instructions like
CPUID; or uncommon conditions like Floating Point Assists
when dealing with Denormals."""
class LCP:
name = "LCP"
domain = "Clocks"
area = "FE"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['FetchLat'])
maxval = None
def compute(self, EV):
try:
self.val = EV("DECODE.LCP", 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "LCP zero division")
return self.val
desc = """
This metric represents fraction of cycles CPU was stalled
due to Length Changing Prefixes (LCPs). Using proper
compiler flags or Intel Compiler by default will certainly
avoid this."""
class DSB_Switches:
name = "DSB_Switches"
domain = "Clocks"
area = "FE"
level = 3
htoff = False
sample = ['FRONTEND_RETIRED.DSB_MISS:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['DSBmiss', 'FetchLat'])
maxval = None
def compute(self, EV):
try:
self.val = EV("DSB2MITE_SWITCHES.PENALTY_CYCLES", 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "DSB_Switches zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due to switches from DSB to MITE pipelines. The DSB
(decoded i-cache) is a Uop Cache where the front-end
directly delivers Uops (micro operations) avoiding heavy x86
decoding. The DSB pipeline has shorter latency and delivered
higher bandwidth than the MITE (legacy instruction decode
pipeline). Switching between the two pipelines can cause
penalties hence this metric measures the exposed penalty..
See section 'Optimization for Decoded Icache' in
Optimization Manual:. http://www.intel.com/content/www/us/en
/architecture-and-technology/64-ia-32-architectures-
optimization-manual.html"""
class Fetch_Bandwidth:
name = "Fetch_Bandwidth"
domain = "Slots"
area = "FE"
level = 2
htoff = False
sample = ['FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1:pp', 'FRONTEND_RETIRED.LATENCY_GE_1:pp', 'FRONTEND_RETIRED.LATENCY_GE_2:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['FetchBW', 'Frontend', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = max(0 , self.Frontend_Bound.compute(EV) - self.Fetch_Latency.compute(EV))
self.thresh = (self.val > 0.2)
except ZeroDivisionError:
handle_error(self, "Fetch_Bandwidth zero division")
return self.val
desc = """
This metric represents fraction of slots the CPU was stalled
due to Frontend bandwidth issues. For example;
inefficiencies at the instruction decoders; or restrictions
for caching in the DSB (decoded uops cache) are categorized
under Fetch Bandwidth. In such cases; the Frontend typically
delivers suboptimal amount of uops to the Backend."""
class MITE:
name = "MITE"
domain = "Slots_Estimated"
area = "FE"
level = 3
htoff = False
sample = ['FRONTEND_RETIRED.ANY_DSB_MISS']
errcount = 0
sibling = None
metricgroup = frozenset(['DSBmiss', 'FetchBW'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("IDQ.MITE_CYCLES_ANY", 3) - EV("IDQ.MITE_CYCLES_OK", 3)) / CORE_CLKS(self, EV, 3) / 2
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "MITE zero division")
return self.val
desc = """
This metric represents Core fraction of cycles in which CPU
was likely limited due to the MITE pipeline (the legacy
decode pipeline). This pipeline is used for code that was
not pre-cached in the DSB or LSD. For example;
inefficiencies due to asymmetric decoders; use of long
immediate or LCP can manifest as MITE fetch bandwidth
bottleneck.. Consider tuning codegen of 'small hotspots'
that can fit in DSB. Read about 'Decoded ICache' in
Optimization Manual:. http://www.intel.com/content/www/us/en
/architecture-and-technology/64-ia-32-architectures-
optimization-manual.html"""
class Decoder0_Alone:
name = "Decoder0_Alone"
domain = "Slots_Estimated"
area = "FE"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['DSBmiss', 'FetchBW'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("INST_DECODED.DECODERS:c1", 4) - EV("INST_DECODED.DECODERS:c2", 4)) / CORE_CLKS(self, EV, 4) / 2
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Decoder0_Alone zero division")
return self.val
desc = """
This metric represents fraction of cycles where decoder-0
was the only active decoder"""
class DSB:
name = "DSB"
domain = "Slots_Estimated"
area = "FE"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['DSB', 'FetchBW'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("IDQ.DSB_CYCLES_ANY", 3) - EV("IDQ.DSB_CYCLES_OK", 3)) / CORE_CLKS(self, EV, 3) / 2
self.thresh = (self.val > 0.15) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "DSB zero division")
return self.val
desc = """
This metric represents Core fraction of cycles in which CPU
was likely limited due to DSB (decoded uop cache) fetch
pipeline. For example; inefficient utilization of the DSB
cache structure or bank conflict when reading from it; are
categorized here."""
class LSD:
name = "LSD"
domain = "Slots_Estimated"
area = "FE"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['FetchBW', 'LSD'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("LSD.CYCLES_ACTIVE", 3) - EV("LSD.CYCLES_OK", 3)) / CORE_CLKS(self, EV, 3) / 2
self.thresh = (self.val > 0.15) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "LSD zero division")
return self.val
desc = """
This metric represents Core fraction of cycles in which CPU
was likely limited due to LSD (Loop Stream Detector) unit.
LSD typically does well sustaining Uop supply. However; in
some rare cases; optimal uop-delivery could not be reached
for small loops whose size (in terms of number of uops) does
not suit well the LSD structure."""
class Bad_Speculation:
name = "Bad_Speculation"
domain = "Slots"
area = "BAD"
level = 1
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['TmaL1'])
maxval = None
def compute(self, EV):
try:
self.val = max(1 -(self.Frontend_Bound.compute(EV) + self.Backend_Bound.compute(EV) + self.Retiring.compute(EV)) , 0 )
self.thresh = (self.val > 0.15)
except ZeroDivisionError:
handle_error(self, "Bad_Speculation zero division")
return self.val
desc = """
This category represents fraction of slots wasted due to
incorrect speculations. This include slots used to issue
uops that do not eventually get retired and slots for which
the issue-pipeline was blocked due to recovery from earlier
incorrect speculation. For example; wasted work due to miss-
predicted branches are categorized under Bad Speculation
category. Incorrect data speculation followed by Memory
Ordering Nukes is another example."""
class Branch_Mispredicts:
name = "Branch_Mispredicts"
domain = "Slots"
area = "BAD"
level = 2
htoff = False
sample = ['TOPDOWN.BR_MISPREDICT_SLOTS']
errcount = 0
sibling = None
metricgroup = frozenset(['BadSpec', 'BrMispredicts', 'BvMP', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("PERF_METRICS.BRANCH_MISPREDICTS", 2) / EV("TOPDOWN.SLOTS", 2)) / PERF_METRICS_SUM(self, EV, 2) if topdown_use_fixed else EV("TOPDOWN.BR_MISPREDICT_SLOTS", 2) / SLOTS(self, EV, 2)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Branch_Mispredicts zero division")
return self.val
desc = """
This metric represents fraction of slots the CPU has wasted
due to Branch Misprediction. These slots are either wasted
by uops fetched from an incorrectly speculated program path;
or stalls when the out-of-order part of the machine needs to
recover its state from a speculative path.. Using profile
feedback in the compiler may help. Please see the
Optimization Manual for general strategies for addressing
branch misprediction issues..
http://www.intel.com/content/www/us/en/architecture-and-
technology/64-ia-32-architectures-optimization-manual.html"""
class Other_Mispredicts:
name = "Other_Mispredicts"
domain = "Slots"
area = "BAD"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvIO', 'BrMispredicts'])
maxval = None
def compute(self, EV):
try:
self.val = max(self.Branch_Mispredicts.compute(EV) * (1 - EV("BR_MISP_RETIRED.ALL_BRANCHES", 3) / (EV("INT_MISC.CLEARS_COUNT", 3) - EV("MACHINE_CLEARS.COUNT", 3))) , 0.0001 )
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Other_Mispredicts zero division")
return self.val
desc = """
This metric estimates fraction of slots the CPU was stalled
due to other cases of misprediction (non-retired x86
branches or other types)."""
class Machine_Clears:
name = "Machine_Clears"
domain = "Slots"
area = "BAD"
level = 2
htoff = False
sample = ['MACHINE_CLEARS.COUNT']
errcount = 0
sibling = None
metricgroup = frozenset(['BadSpec', 'BvMS', 'MachineClears', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = max(0 , self.Bad_Speculation.compute(EV) - self.Branch_Mispredicts.compute(EV))
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Machine_Clears zero division")
return self.val
desc = """
This metric represents fraction of slots the CPU has wasted
due to Machine Clears. These slots are either wasted by
uops fetched prior to the clear; or stalls the out-of-order
portion of the machine needs to recover its state after the
clear. For example; this can happen due to memory ordering
Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code
(SMC) nukes.. See \"Memory Disambiguation\" in Optimization
Manual and:. https://software.intel.com/sites/default/files/
m/d/4/1/d/8/sma.pdf"""
class Other_Nukes:
name = "Other_Nukes"
domain = "Slots"
area = "BAD"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvIO', 'Machine_Clears'])
maxval = None
def compute(self, EV):
try:
self.val = max(self.Machine_Clears.compute(EV) * (1 - EV("MACHINE_CLEARS.MEMORY_ORDERING", 3) / EV("MACHINE_CLEARS.COUNT", 3)) , 0.0001 )
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Other_Nukes zero division")
return self.val
desc = """
This metric represents fraction of slots the CPU has wasted
due to Nukes (Machine Clears) not related to memory
ordering."""
class Backend_Bound:
name = "Backend_Bound"
domain = "Slots"
area = "BE"
level = 1
htoff = False
sample = ['TOPDOWN.BACKEND_BOUND_SLOTS']
errcount = 0
sibling = None
metricgroup = frozenset(['BvOB', 'TmaL1'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("PERF_METRICS.BACKEND_BOUND", 1) / EV("TOPDOWN.SLOTS", 1)) / PERF_METRICS_SUM(self, EV, 1) if topdown_use_fixed else EV("TOPDOWN.BACKEND_BOUND_SLOTS", 1) / SLOTS(self, EV, 1)
self.thresh = (self.val > 0.2)
except ZeroDivisionError:
handle_error(self, "Backend_Bound zero division")
return self.val
desc = """
This category represents fraction of slots where no uops are
being delivered due to a lack of required resources for
accepting new uops in the Backend. Backend is the portion of
the processor core where the out-of-order scheduler
dispatches ready uops into their respective execution units;
and once completed these uops get retired according to
program order. For example; stalls due to data-cache misses
or stalls due to the divider unit being overloaded are both
categorized under Backend Bound. Backend Bound is further
divided into two main categories: Memory Bound and Core
Bound."""
class Memory_Bound:
name = "Memory_Bound"
domain = "Slots"
area = "BE/Mem"
level = 2
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Backend', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("PERF_METRICS.MEMORY_BOUND", 2) / EV("TOPDOWN.SLOTS", 2)) / PERF_METRICS_SUM(self, EV, 2) if topdown_use_fixed else EV("TOPDOWN.MEMORY_BOUND_SLOTS", 2) / SLOTS(self, EV, 2)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Memory_Bound zero division")
return self.val
desc = """
This metric represents fraction of slots the Memory
subsystem within the Backend was a bottleneck. Memory Bound
estimates fraction of slots where pipeline is likely stalled
due to demand load or store instructions. This accounts
mainly for (1) non-completed in-flight memory demand loads
which coincides with execution units starvation; in addition
to (2) cases where stores could impose backpressure on the
pipeline when many of them get buffered at the same time
(less common out of the two)."""
class L1_Bound:
name = "L1_Bound"
domain = "Stalls"
area = "BE/Mem"
level = 3
htoff = False
sample = ['MEM_LOAD_RETIRED.L1_HIT:pp', 'MEM_LOAD_RETIRED.FB_HIT:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['CacheHits', 'MemoryBound', 'TmaL3mem'])
maxval = None
def compute(self, EV):
try:
self.val = max((EV("EXE_ACTIVITY.BOUND_ON_LOADS", 3) - EV("MEMORY_ACTIVITY.STALLS_L1D_MISS", 3)) / CLKS(self, EV, 3) , 0 )
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "L1_Bound zero division")
return self.val
desc = """
This metric estimates how often the CPU was stalled without
loads missing the L1 data cache. The L1 data cache
typically has the shortest latency. However; in certain
cases like loads blocked on older stores; a load might
suffer due to high latency even though it is being satisfied
by the L1. Another example is loads who miss in the TLB.
These cases are characterized by execution unit stalls;
while some non-completed demand load lives in the machine
without having that demand load missing the L1 cache."""
class DTLB_Load:
name = "DTLB_Load"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_INST_RETIRED.STLB_MISS_LOADS:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['BvMT', 'MemoryTLB'])
maxval = 1.0
def compute(self, EV):
try:
self.val = EV("MEM_INST_RETIRED.STLB_HIT_LOADS", 4) * min(EV("MEM_INST_RETIRED.STLB_HIT_LOADS", 999) , Mem_STLB_Hit_Cost) / CLKS(self, EV, 4) + self.Load_STLB_Miss.compute(EV)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "DTLB_Load zero division")
return self.val
desc = """
This metric roughly estimates the fraction of cycles where
the Data TLB (DTLB) was missed by load accesses. TLBs
(Translation Look-aside Buffers) are processor caches for
recently used entries out of the Page Tables that are used
to map virtual- to physical-addresses by the operating
system. This metric approximates the potential delay of
demand loads missing the first-level data TLB (assuming
worst case scenario with back to back misses to different
pages). This includes hitting in the second-level TLB (STLB)
as well as performing a hardware page walk on an STLB miss.."""
class Load_STLB_Hit:
name = "Load_STLB_Hit"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['MemoryTLB'])
maxval = 1.0
def compute(self, EV):
try:
self.val = max(0 , self.DTLB_Load.compute(EV) - self.Load_STLB_Miss.compute(EV))
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Load_STLB_Hit zero division")
return self.val
desc = """
This metric roughly estimates the fraction of cycles where
the (first level) DTLB was missed by load accesses, that
later on hit in second-level TLB (STLB)"""
class Load_STLB_Miss:
name = "Load_STLB_Miss"
domain = "Clocks_Calculated"
area = "BE/Mem"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['MemoryTLB'])
maxval = 1.0
def compute(self, EV):
try:
self.val = EV("DTLB_LOAD_MISSES.WALK_ACTIVE", 5) / CLKS(self, EV, 5)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Load_STLB_Miss zero division")
return self.val
desc = """
This metric estimates the fraction of cycles where the
Second-level TLB (STLB) was missed by load accesses,
performing a hardware page walk"""
class Store_Fwd_Blk:
name = "Store_Fwd_Blk"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = 1.0
def compute(self, EV):
try:
self.val = 13 * EV("LD_BLOCKS.STORE_FORWARD", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Store_Fwd_Blk zero division")
return self.val
desc = """
This metric roughly estimates fraction of cycles when the
memory subsystem had loads blocked since they could not
forward data from earlier (in program order) overlapping
stores. To streamline memory operations in the pipeline; a
load can avoid waiting for memory if a prior in-flight store
is writing the data that the load wants to read (store
forwarding process). However; in some cases the load may be
blocked for a significant time pending the store forward.
For example; when the prior store is writing a smaller
region than the load is reading."""
class L1_Hit_Latency:
name = "L1_Hit_Latency"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_LOAD_RETIRED.L1_HIT']
errcount = 0
sibling = None
metricgroup = frozenset(['BvML', 'MemoryLat'])
maxval = 1.0
def compute(self, EV):
try:
self.val = min(2 *(EV("MEM_INST_RETIRED.ALL_LOADS", 4) - EV("MEM_LOAD_RETIRED.FB_HIT", 4) - EV("MEM_LOAD_RETIRED.L1_MISS", 4)) * Dependent_Loads_Weight(self, EV, 4) / 100 , max(EV("CYCLE_ACTIVITY.CYCLES_MEM_ANY", 4) - EV("MEMORY_ACTIVITY.CYCLES_L1D_MISS", 4) , 0)) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "L1_Hit_Latency zero division")
return self.val
desc = """
This metric roughly estimates fraction of cycles with demand
load accesses that hit the L1 cache. The short latency of
the L1 data cache may be exposed in pointer-chasing memory
access patterns as an example."""
class Lock_Latency:
name = "Lock_Latency"
domain = "Clocks"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_INST_RETIRED.LOCK_LOADS']
errcount = 0
sibling = None
metricgroup = frozenset(['Offcore'])
maxval = 1.0
def compute(self, EV):
try:
self.val = EV("MEM_INST_RETIRED.LOCK_LOADS", 4) * EV("MEM_INST_RETIRED.LOCK_LOADS", 999) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Lock_Latency zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU spent
handling cache misses due to lock operations. Due to the
microarchitecture handling of locks; they are classified as
L1_Bound regardless of what memory source satisfied them."""
class Split_Loads:
name = "Split_Loads"
domain = "Clocks_Calculated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_INST_RETIRED.SPLIT_LOADS:pp']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = 1.0
def compute(self, EV):
try:
self.val = EV("MEM_INST_RETIRED.SPLIT_LOADS", 4) * min(EV("MEM_INST_RETIRED.SPLIT_LOADS", 999) , Load_Miss_Real_Latency(self, EV, 4)) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Split_Loads zero division")
return self.val
desc = """
This metric estimates fraction of cycles handling memory
load split accesses - load that cross 64-byte cache line
boundary. . Consider aligning data or hot structure fields.
See the Optimization Manual for more details"""
class FB_Full:
name = "FB_Full"
domain = "Clocks_Calculated"
area = "BE/Mem"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvMB', 'MemoryBW'])
maxval = None
def compute(self, EV):
try:
self.val = EV("L1D_PEND_MISS.FB_FULL", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.3)
except ZeroDivisionError:
handle_error(self, "FB_Full zero division")
return self.val
desc = """
This metric does a *rough estimation* of how often L1D Fill
Buffer unavailability limited additional L1D miss memory
access requests to proceed. The higher the metric value; the
deeper the memory hierarchy level the misses are satisfied
from (metric values >1 are valid). Often it hints on
approaching bandwidth limits (to L2 cache; L3 cache or
external memory).. See $issueBW and $issueSL hints. Avoid
software prefetches if indeed memory BW limited."""
class L2_Bound:
name = "L2_Bound"
domain = "Stalls"
area = "BE/Mem"
level = 3
htoff = False
sample = ['MEM_LOAD_RETIRED.L2_HIT:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['BvML', 'CacheHits', 'MemoryBound', 'TmaL3mem'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("MEMORY_ACTIVITY.STALLS_L1D_MISS", 3) - EV("MEMORY_ACTIVITY.STALLS_L2_MISS", 3)) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "L2_Bound zero division")
return self.val
desc = """
This metric estimates how often the CPU was stalled due to
L2 cache accesses by loads. Avoiding cache misses (i.e. L1
misses/L2 hits) can improve the latency and increase
performance."""
class L3_Bound:
name = "L3_Bound"
domain = "Stalls"
area = "BE/Mem"
level = 3
htoff = False
sample = ['MEM_LOAD_RETIRED.L3_HIT:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['CacheHits', 'MemoryBound', 'TmaL3mem'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("MEMORY_ACTIVITY.STALLS_L2_MISS", 3) - EV("MEMORY_ACTIVITY.STALLS_L3_MISS", 3)) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "L3_Bound zero division")
return self.val
desc = """
This metric estimates how often the CPU was stalled due to
loads accesses to L3 cache or contended with a sibling Core.
Avoiding cache misses (i.e. L2 misses/L3 hits) can improve
the latency and increase performance."""
class Contested_Accesses:
name = "Contested_Accesses"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD', 'MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS']
errcount = 0
sibling = None
metricgroup = frozenset(['BvMS', 'DataSharing', 'Offcore', 'Snoop'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (EV("MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS", 4) * min(EV("MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS", 999) , Mem_XSNP_Hit_Cost(self, EV, 4) - Mem_L2_Hit_Cost(self, EV, 4)) + EV("MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD", 4) * min(EV("MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD", 999) , Mem_XSNP_HitM_Cost(self, EV, 4) - Mem_L2_Hit_Cost(self, EV, 4)) * True_XSNP_HitM_Fraction(self, EV, 4)) * FB_Factor(self, EV, 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Contested_Accesses zero division")
return self.val
desc = """
This metric estimates fraction of cycles while the memory
subsystem was handling synchronizations due to contested
accesses. Contested accesses occur when data written by one
Logical Processor are read by another Logical Processor on a
different Physical Core. Examples of contested accesses
include synchronizations such as locks; true data sharing
such as modified locked variables; and false sharing."""
class Data_Sharing:
name = "Data_Sharing"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD']
errcount = 0
sibling = None
metricgroup = frozenset(['BvMS', 'Offcore', 'Snoop'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (EV("MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD", 4) * min(EV("MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD", 999) , Mem_XSNP_Hit_Cost(self, EV, 4) - Mem_L2_Hit_Cost(self, EV, 4)) + EV("MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD", 4) * min(EV("MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD", 999) , Mem_XSNP_Hit_Cost(self, EV, 4) - Mem_L2_Hit_Cost(self, EV, 4)) * (1 - True_XSNP_HitM_Fraction(self, EV, 4))) * FB_Factor(self, EV, 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Data_Sharing zero division")
return self.val
desc = """
This metric estimates fraction of cycles while the memory
subsystem was handling synchronizations due to data-sharing
accesses. Data shared by multiple Logical Processors (even
just read shared) may cause increased access latency due to
cache coherency. Excessive data sharing can drastically harm
multithreaded performance."""
class L3_Hit_Latency:
name = "L3_Hit_Latency"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_LOAD_RETIRED.L3_HIT:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['BvML', 'MemoryLat'])
maxval = 1.0
def compute(self, EV):
try:
self.val = EV("MEM_LOAD_RETIRED.L3_HIT", 4) * min(EV("MEM_LOAD_RETIRED.L3_HIT", 999) , Mem_XSNP_None_Cost(self, EV, 4) - Mem_L2_Hit_Cost(self, EV, 4)) * FB_Factor(self, EV, 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "L3_Hit_Latency zero division")
return self.val
desc = """
This metric estimates fraction of cycles with demand load
accesses that hit the L3 cache under unloaded scenarios
(possibly L3 latency limited). Avoiding private cache
misses (i.e. L2 misses/L3 hits) will improve the latency;
reduce contention with sibling physical cores and increase
performance. Note the value of this node may overlap with
its siblings."""
class SQ_Full:
name = "SQ_Full"
domain = "Clocks"
area = "BE/Mem"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvMB', 'MemoryBW', 'Offcore'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("XQ.FULL_CYCLES", 4) + EV("L1D_PEND_MISS.L2_STALLS", 4)) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.3) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "SQ_Full zero division")
return self.val
desc = """
This metric measures fraction of cycles where the Super
Queue (SQ) was full taking into account all request-types
and both hardware SMT threads (Logical Processors)."""
class DRAM_Bound:
name = "DRAM_Bound"
domain = "Stalls"
area = "BE/Mem"
level = 3
htoff = False
sample = ['MEM_LOAD_RETIRED.L3_MISS:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['MemoryBound', 'TmaL3mem'])
maxval = 1.0
def compute(self, EV):
try:
self.val = MEM_Bound_Ratio(self, EV, 3)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "DRAM_Bound zero division")
return self.val
desc = """
This metric estimates how often the CPU was stalled on
accesses to external memory (DRAM) by loads. Better caching
can improve the latency and increase performance."""
class MEM_Bandwidth:
name = "MEM_Bandwidth"
domain = "Clocks"
area = "BE/Mem"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvMB', 'MemoryBW', 'Offcore'])
maxval = None
def compute(self, EV):
try:
self.val = ORO_DRD_BW_Cycles(self, EV, 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "MEM_Bandwidth zero division")
return self.val
desc = """
This metric estimates fraction of cycles where the core's
performance was likely hurt due to approaching bandwidth
limits of external memory - DRAM ([SPR-HBM] and/or HBM).
The underlying heuristic assumes that a similar off-core
traffic is generated by all IA cores. This metric does not
aggregate non-data-read requests by this logical processor;
requests from other IA Logical Processors/Physical
Cores/sockets; or other non-IA devices like GPU; hence the
maximum external memory bandwidth limits may or may not be
approached when this metric is flagged (see Uncore counters
for that).. Improve data accesses to reduce cacheline
transfers from/to memory. Examples: 1) Consume all bytes of
a each cacheline before it is evicted (e.g. reorder
structure elements and split non-hot ones), 2) merge
computed-limited with BW-limited loops, 3) NUMA
optimizations in multi-socket system. Note: software
prefetches will not help BW-limited application.."""
class MEM_Latency:
name = "MEM_Latency"
domain = "Clocks"
area = "BE/Mem"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvML', 'MemoryLat', 'Offcore'])
maxval = None
def compute(self, EV):
try:
self.val = ORO_DRD_Any_Cycles(self, EV, 4) / CLKS(self, EV, 4) - self.MEM_Bandwidth.compute(EV)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "MEM_Latency zero division")
return self.val
desc = """
This metric estimates fraction of cycles where the
performance was likely hurt due to latency from external
memory - DRAM ([SPR-HBM] and/or HBM). This metric does not
aggregate requests from other Logical Processors/Physical
Cores/sockets (see Uncore counters for that).. Improve data
accesses or interleave them with compute. Examples: 1) Data
layout re-structuring, 2) Software Prefetches (also through
the compiler).."""
class Store_Bound:
name = "Store_Bound"
domain = "Stalls"
area = "BE/Mem"
level = 3
htoff = False
sample = ['MEM_INST_RETIRED.ALL_STORES:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['MemoryBound', 'TmaL3mem'])
maxval = None
def compute(self, EV):
try:
self.val = EV("EXE_ACTIVITY.BOUND_ON_STORES", 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Store_Bound zero division")
return self.val
desc = """
This metric estimates how often CPU was stalled due to RFO
store memory accesses; RFO store issue a read-for-ownership
request before the write. Even though store accesses do not
typically stall out-of-order CPUs; there are few cases where
stores can lead to actual stalls. This metric will be
flagged should RFO stores be a bottleneck."""
class Store_Latency:
name = "Store_Latency"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvML', 'MemoryLat', 'Offcore'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (Store_L2_Hit_Cycles(self, EV, 4) + (1 - Mem_Lock_St_Fraction(self, EV, 4)) * ORO_Demand_RFO_C1(self, EV, 4)) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Store_Latency zero division")
return self.val
desc = """
This metric estimates fraction of cycles the CPU spent
handling L1D store misses. Store accesses usually less
impact out-of-order core performance; however; holding
resources for longer time can lead into undesired
implications (e.g. contention on L1D fill-buffer entries -
see FB_Full). Consider to avoid/reduce unnecessary (or
easily load-able/computable) memory store."""
class False_Sharing:
name = "False_Sharing"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM']
errcount = 0
sibling = None
metricgroup = frozenset(['BvMS', 'DataSharing', 'Offcore', 'Snoop'])
maxval = 1.0
def compute(self, EV):
try:
self.val = Mem_XSNP_HitM_Cost(self, EV, 4) * EV("OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "False_Sharing zero division")
return self.val
desc = """
This metric roughly estimates how often CPU was handling
synchronizations due to False Sharing. False Sharing is a
multithreading hiccup; where multiple Logical Processors
contend on different data-elements mapped into the same
cache line. . False Sharing can be easily avoided by padding
to make Logical Processors access different lines."""
class Split_Stores:
name = "Split_Stores"
domain = "Core_Utilization"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_INST_RETIRED.SPLIT_STORES:pp']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = EV("MEM_INST_RETIRED.SPLIT_STORES", 4) * min(EV("MEM_INST_RETIRED.SPLIT_STORES", 999) , 1) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Split_Stores zero division")
return self.val
desc = """
This metric represents rate of split store accesses.
Consider aligning your data to the 64-byte cache line
granularity."""
class Streaming_Stores:
name = "Streaming_Stores"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['OCR.STREAMING_WR.ANY_RESPONSE']
errcount = 0
sibling = None
metricgroup = frozenset(['MemoryBW', 'Offcore'])
maxval = 1.0
def compute(self, EV):
try:
self.val = 9 * EV("OCR.STREAMING_WR.ANY_RESPONSE", 4) / CLKS(self, EV, 4) if DS else 0
EV("OCR.STREAMING_WR.ANY_RESPONSE", 4)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Streaming_Stores zero division")
return self.val
desc = """
This metric estimates how often CPU was stalled due to
Streaming store memory accesses; Streaming store optimize
out a read request required by RFO stores. Even though store
accesses do not typically stall out-of-order CPUs; there are
few cases where stores can lead to actual stalls. This
metric will be flagged should Streaming stores be a
bottleneck."""
class DTLB_Store:
name = "DTLB_Store"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_INST_RETIRED.STLB_MISS_STORES:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['BvMT', 'MemoryTLB'])
maxval = 1.0
def compute(self, EV):
try:
self.val = EV("MEM_INST_RETIRED.STLB_HIT_STORES", 4) * min(EV("MEM_INST_RETIRED.STLB_HIT_STORES", 999) , Mem_STLB_Hit_Cost) / CLKS(self, EV, 4) + self.Store_STLB_Miss.compute(EV)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "DTLB_Store zero division")
return self.val
desc = """
This metric roughly estimates the fraction of cycles spent
handling first-level data TLB store misses. As with
ordinary data caching; focus on improving data locality and
reducing working-set size to reduce DTLB overhead.
Additionally; consider using profile-guided optimization
(PGO) to collocate frequently-used data on the same page.
Try using larger page sizes for large amounts of frequently-
used data."""
class Store_STLB_Hit:
name = "Store_STLB_Hit"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['MemoryTLB'])
maxval = 1.0
def compute(self, EV):
try:
self.val = max(0 , self.DTLB_Store.compute(EV) - self.Store_STLB_Miss.compute(EV))
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Store_STLB_Hit zero division")
return self.val
desc = """
This metric roughly estimates the fraction of cycles where
the TLB was missed by store accesses, hitting in the second-
level TLB (STLB)"""
class Store_STLB_Miss:
name = "Store_STLB_Miss"
domain = "Clocks_Calculated"
area = "BE/Mem"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['MemoryTLB'])
maxval = 1.0
def compute(self, EV):
try:
self.val = EV("DTLB_STORE_MISSES.WALK_ACTIVE", 5) / CORE_CLKS(self, EV, 5)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Store_STLB_Miss zero division")
return self.val
desc = """
This metric estimates the fraction of cycles where the STLB
was missed by store accesses, performing a hardware page
walk"""
class Core_Bound:
name = "Core_Bound"
domain = "Slots"
area = "BE/Core"
level = 2
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Backend', 'TmaL2', 'Compute'])
maxval = None
def compute(self, EV):
try:
self.val = max(0 , self.Backend_Bound.compute(EV) - self.Memory_Bound.compute(EV))
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Core_Bound zero division")
return self.val
desc = """
This metric represents fraction of slots where Core non-
memory issues were of a bottleneck. Shortage in hardware
compute resources; or dependencies in software's
instructions are both categorized under Core Bound. Hence it
may indicate the machine ran out of an out-of-order
resource; certain execution units are overloaded or
dependencies in program's data- or instruction-flow are
limiting the performance (e.g. FP-chained long-latency
arithmetic operations).. Tip: consider Port Saturation
analysis as next step."""
class Divider:
name = "Divider"
domain = "Clocks"
area = "BE/Core"
level = 3
htoff = False
sample = ['ARITH.DIV_ACTIVE']
errcount = 0
sibling = None
metricgroup = frozenset(['BvCB'])
maxval = 1.0
def compute(self, EV):
try:
self.val = EV("ARITH.DIV_ACTIVE", 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Divider zero division")
return self.val
desc = """
This metric represents fraction of cycles where the Divider
unit was active. Divide and square root instructions are
performed by the Divider unit and can take considerably
longer latency than integer or Floating Point addition;
subtraction; or multiplication."""
class Serializing_Operation:
name = "Serializing_Operation"
domain = "Clocks"
area = "BE/Core"
level = 3
htoff = False
sample = ['RESOURCE_STALLS.SCOREBOARD']
errcount = 0
sibling = None
metricgroup = frozenset(['BvIO', 'PortsUtil'])
maxval = None
def compute(self, EV):
try:
self.val = EV("RESOURCE_STALLS.SCOREBOARD", 3) / CLKS(self, EV, 3) + self.C02_WAIT.compute(EV)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Serializing_Operation zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU issue-
pipeline was stalled due to serializing operations.
Instructions like CPUID; WRMSR or LFENCE serialize the out-
of-order execution which may limit performance."""
class Slow_Pause:
name = "Slow_Pause"
domain = "Clocks"
area = "BE/Core"
level = 4
htoff = False
sample = ['CPU_CLK_UNHALTED.PAUSE_INST']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = EV("CPU_CLK_UNHALTED.PAUSE", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Slow_Pause zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due to PAUSE Instructions."""
class C01_WAIT:
name = "C01_WAIT"
domain = "Clocks"
area = "BE/Core"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['C0Wait'])
maxval = None
def compute(self, EV):
try:
self.val = EV("CPU_CLK_UNHALTED.C01", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "C01_WAIT zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due staying in C0.1 power-performance optimized
state (Faster wakeup time; Smaller power savings)."""
class C02_WAIT:
name = "C02_WAIT"
domain = "Clocks"
area = "BE/Core"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['C0Wait'])
maxval = None
def compute(self, EV):
try:
self.val = EV("CPU_CLK_UNHALTED.C02", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "C02_WAIT zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due staying in C0.2 power-performance optimized
state (Slower wakeup time; Larger power savings)."""
class Memory_Fence:
name = "Memory_Fence"
domain = "Clocks"
area = "BE/Core"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = 1.0
def compute(self, EV):
try:
self.val = 13 * EV("MISC2_RETIRED.LFENCE", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Memory_Fence zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due to LFENCE Instructions."""
class Ports_Utilization:
name = "Ports_Utilization"
domain = "Clocks"
area = "BE/Core"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['PortsUtil'])
maxval = None
def compute(self, EV):
try:
self.val = Core_Bound_Cycles(self, EV, 3) / CLKS(self, EV, 3) if (EV("ARITH.DIV_ACTIVE", 3)<(EV("CYCLE_ACTIVITY.STALLS_TOTAL", 3) - EV("EXE_ACTIVITY.BOUND_ON_LOADS", 3))) else Few_Uops_Executed_Threshold(self, EV, 3) / CLKS(self, EV, 3)
EV("EXE_ACTIVITY.BOUND_ON_LOADS", 3)
EV("CYCLE_ACTIVITY.STALLS_TOTAL", 3)
EV("ARITH.DIV_ACTIVE", 3)
self.thresh = (self.val > 0.15) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Ports_Utilization zero division")
return self.val
desc = """
This metric estimates fraction of cycles the CPU performance
was potentially limited due to Core computation issues (non
divider-related). Two distinct categories can be attributed
into this metric: (1) heavy data-dependency among contiguous
instructions would manifest in this metric - such cases are
often referred to as low Instruction Level Parallelism
(ILP). (2) Contention on some hardware execution unit other
than Divider. For example; when there are too many multiply
operations.. Loop Vectorization -most compilers feature
auto-Vectorization options today- reduces pressure on the
execution ports as multiple elements are calculated with
same uop."""
class Ports_Utilized_0:
name = "Ports_Utilized_0"
domain = "Clocks"
area = "BE/Core"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['PortsUtil'])
maxval = None
def compute(self, EV):
try:
self.val = max((EV("EXE_ACTIVITY.EXE_BOUND_0_PORTS", 4) + max(EV("RS.EMPTY_RESOURCE", 4) - EV("RESOURCE_STALLS.SCOREBOARD", 4) , 0)) / CLKS(self, EV, 4) , 1) * (EV("CYCLE_ACTIVITY.STALLS_TOTAL", 4) - EV("EXE_ACTIVITY.BOUND_ON_LOADS", 4)) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Ports_Utilized_0 zero division")
return self.val
desc = """
This metric represents fraction of cycles CPU executed no
uops on any execution port (Logical Processor cycles since
ICL, Physical Core cycles otherwise). Long-latency
instructions like divides may contribute to this metric..
Check assembly view and Appendix C in Optimization Manual to
find out instructions with say 5 or more cycles latency..
http://www.intel.com/content/www/us/en/architecture-and-
technology/64-ia-32-architectures-optimization-manual.html"""
class Mixing_Vectors:
name = "Mixing_Vectors"
domain = "Clocks"
area = "BE/Core"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = 1.0
def compute(self, EV):
try:
self.val = 160 * EV("ASSISTS.SSE_AVX_MIX", 5) / CLKS(self, EV, 5)
self.thresh = (self.val > 0.05)
except ZeroDivisionError:
handle_error(self, "Mixing_Vectors zero division")
return self.val
desc = """
This metric estimates penalty in terms of percentage of
cycles. Usually a Mixing_Vectors over 5% is worth
investigating. Read more in Appendix B1 of the Optimizations
Guide for this topic."""
class Ports_Utilized_1:
name = "Ports_Utilized_1"
domain = "Clocks"
area = "BE/Core"
level = 4
htoff = False
sample = ['EXE_ACTIVITY.1_PORTS_UTIL']
errcount = 0
sibling = None
metricgroup = frozenset(['PortsUtil'])
maxval = None
def compute(self, EV):
try:
self.val = EV("EXE_ACTIVITY.1_PORTS_UTIL", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Ports_Utilized_1 zero division")
return self.val
desc = """
This metric represents fraction of cycles where the CPU
executed total of 1 uop per cycle on all execution ports
(Logical Processor cycles since ICL, Physical Core cycles
otherwise). This can be due to heavy data-dependency among
software instructions; or over oversubscribing a particular
hardware resource. In some other cases with high
1_Port_Utilized and L1_Bound; this metric can point to L1
data-cache latency bottleneck that may not necessarily
manifest with complete execution starvation (due to the
short L1 latency e.g. walking a linked list) - looking at
the assembly can be helpful."""
class Ports_Utilized_2:
name = "Ports_Utilized_2"
domain = "Clocks"
area = "BE/Core"
level = 4
htoff = False
sample = ['EXE_ACTIVITY.2_PORTS_UTIL']
errcount = 0
sibling = None
metricgroup = frozenset(['PortsUtil'])
maxval = None
def compute(self, EV):
try:
self.val = EV("EXE_ACTIVITY.2_PORTS_UTIL", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.15) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Ports_Utilized_2 zero division")
return self.val
desc = """
This metric represents fraction of cycles CPU executed total
of 2 uops per cycle on all execution ports (Logical
Processor cycles since ICL, Physical Core cycles otherwise).
Loop Vectorization -most compilers feature auto-
Vectorization options today- reduces pressure on the
execution ports as multiple elements are calculated with
same uop."""
class Ports_Utilized_3m:
name = "Ports_Utilized_3m"
domain = "Clocks"
area = "BE/Core"
level = 4
htoff = False
sample = ['UOPS_EXECUTED.CYCLES_GE_3']
errcount = 0
sibling = None
metricgroup = frozenset(['BvCB', 'PortsUtil'])
maxval = None
def compute(self, EV):
try:
self.val = EV("UOPS_EXECUTED.CYCLES_GE_3", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.4) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Ports_Utilized_3m zero division")
return self.val
desc = """
This metric represents fraction of cycles CPU executed total
of 3 or more uops per cycle on all execution ports (Logical
Processor cycles since ICL, Physical Core cycles otherwise)."""
class ALU_Op_Utilization:
name = "ALU_Op_Utilization"
domain = "Core_Execution"
area = "BE/Core"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = (EV("UOPS_DISPATCHED.PORT_0", 5) + EV("UOPS_DISPATCHED.PORT_1", 5) + EV("UOPS_DISPATCHED.PORT_5_11", 5) + EV("UOPS_DISPATCHED.PORT_6", 5)) / (5 * CORE_CLKS(self, EV, 5))
self.thresh = (self.val > 0.4)
except ZeroDivisionError:
handle_error(self, "ALU_Op_Utilization zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution ports for ALU operations."""
class Port_0:
name = "Port_0"
domain = "Core_Clocks"
area = "BE/Core"
level = 6
htoff = False
sample = ['UOPS_DISPATCHED.PORT_0']
errcount = 0
sibling = None
metricgroup = frozenset(['Compute'])
maxval = None
def compute(self, EV):
try:
self.val = EV("UOPS_DISPATCHED.PORT_0", 6) / CORE_CLKS(self, EV, 6)
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Port_0 zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution port 0 ALU and 2nd branch"""
class Port_1:
name = "Port_1"
domain = "Core_Clocks"
area = "BE/Core"
level = 6
htoff = False
sample = ['UOPS_DISPATCHED.PORT_1']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = EV("UOPS_DISPATCHED.PORT_1", 6) / CORE_CLKS(self, EV, 6)
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Port_1 zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution port 1 (ALU)"""
class Port_6:
name = "Port_6"
domain = "Core_Clocks"
area = "BE/Core"
level = 6
htoff = False
sample = ['UOPS_DISPATCHED.PORT_6']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = EV("UOPS_DISPATCHED.PORT_6", 6) / CORE_CLKS(self, EV, 6)
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Port_6 zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution port 6 Primary Branch and
simple ALU"""
class Load_Op_Utilization:
name = "Load_Op_Utilization"
domain = "Core_Execution"
area = "BE/Core"
level = 5
htoff = False
sample = ['UOPS_DISPATCHED.PORT_2_3_10']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = EV("UOPS_DISPATCHED.PORT_2_3_10", 5) / (3 * CORE_CLKS(self, EV, 5))
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Load_Op_Utilization zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution port for Load operations"""
class Store_Op_Utilization:
name = "Store_Op_Utilization"
domain = "Core_Execution"
area = "BE/Core"
level = 5
htoff = False
sample = ['UOPS_DISPATCHED.PORT_7_8']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = (EV("UOPS_DISPATCHED.PORT_4_9", 5) + EV("UOPS_DISPATCHED.PORT_7_8", 5)) / (4 * CORE_CLKS(self, EV, 5))
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Store_Op_Utilization zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution port for Store operations"""
class Retiring:
name = "Retiring"
domain = "Slots"
area = "RET"
level = 1
htoff = False
sample = ['UOPS_RETIRED.SLOTS']
errcount = 0
sibling = None
metricgroup = frozenset(['BvUW', 'TmaL1'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("PERF_METRICS.RETIRING", 1) / EV("TOPDOWN.SLOTS", 1)) / PERF_METRICS_SUM(self, EV, 1) if topdown_use_fixed else EV("UOPS_RETIRED.SLOTS", 1) / SLOTS(self, EV, 1)
self.thresh = (self.val > 0.7) or self.Heavy_Operations.thresh
except ZeroDivisionError:
handle_error(self, "Retiring zero division")
return self.val
desc = """
This category represents fraction of slots utilized by
useful work i.e. issued uops that eventually get retired.
Ideally; all pipeline slots would be attributed to the
Retiring category. Retiring of 100% would indicate the
maximum Pipeline_Width throughput was achieved. Maximizing
Retiring typically increases the Instructions-per-cycle (see
IPC metric). Note that a high Retiring value does not
necessary mean there is no room for more performance. For
example; Heavy-operations or Microcode Assists are
categorized under Retiring. They often indicate suboptimal
performance and can often be optimized or avoided. . A high
Retiring value for non-vectorized code may be a good hint
for programmer to consider vectorizing his code. Doing so
essentially lets more computations be done without
significantly increasing number of instructions thus
improving the performance."""
class Light_Operations:
name = "Light_Operations"
domain = "Slots"
area = "RET"
level = 2
htoff = False
sample = ['INST_RETIRED.PREC_DIST']
errcount = 0
sibling = None
metricgroup = frozenset(['Retire', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = max(0 , self.Retiring.compute(EV) - self.Heavy_Operations.compute(EV))
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Light_Operations zero division")
return self.val
desc = """
This metric represents fraction of slots where the CPU was
retiring light-weight operations -- instructions that
require no more than one uop (micro-operation). This
correlates with total number of instructions used by the
program. A uops-per-instruction (see UopPI metric) ratio of
1 or less should be expected for decently optimized code
running on Intel Core/Xeon products. While this often
indicates efficient X86 instructions were executed; high
value does not necessarily mean better performance cannot be
achieved. .. Focus on techniques that reduce instruction
count or result in more efficient instructions generation
such as vectorization."""
class FP_Arith:
name = "FP_Arith"
domain = "Uops"
area = "RET"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['HPC'])
maxval = None
def compute(self, EV):
try:
self.val = self.X87_Use.compute(EV) + self.FP_Scalar.compute(EV) + self.FP_Vector.compute(EV)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "FP_Arith zero division")
return self.val
desc = """
This metric represents overall arithmetic floating-point
(FP) operations fraction the CPU has executed (retired).
Note this metric's value may exceed its parent due to use of
\"Uops\" CountDomain and FMA double-counting."""
class X87_Use:
name = "X87_Use"
domain = "Uops"
area = "RET"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Compute'])
maxval = None
def compute(self, EV):
try:
self.val = self.Retiring.compute(EV) * EV("UOPS_EXECUTED.X87", 4) / EV("UOPS_EXECUTED.THREAD", 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "X87_Use zero division")
return self.val
desc = """
This metric serves as an approximation of legacy x87 usage.
It accounts for instructions beyond X87 FP arithmetic
operations; hence may be used as a thermometer to avoid X87
high usage and preferably upgrade to modern ISA. See Tip
under Tuning Hint.. Tip: consider compiler flags to generate
newer AVX (or SSE) instruction sets; which typically perform
better and feature vectors."""
class FP_Scalar:
name = "FP_Scalar"
domain = "Uops"
area = "RET"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Compute', 'Flops'])
maxval = None
def compute(self, EV):
try:
self.val = FP_Arith_Scalar(self, EV, 4) / Retired_Slots(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "FP_Scalar zero division")
return self.val
desc = """
This metric approximates arithmetic floating-point (FP)
scalar uops fraction the CPU has retired. May overcount due
to FMA double counting.. Investigate what limits (compiler)
generation of vector code."""
class FP_Vector:
name = "FP_Vector"
domain = "Uops"
area = "RET"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Compute', 'Flops'])
maxval = 1.0
def compute(self, EV):
try:
self.val = FP_Arith_Vector(self, EV, 4) / Retired_Slots(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "FP_Vector zero division")
return self.val
desc = """
This metric approximates arithmetic floating-point (FP)
vector uops fraction the CPU has retired aggregated across
all vector widths. May overcount due to FMA double
counting.. Check if vector width is expected"""
class FP_Vector_128b:
name = "FP_Vector_128b"
domain = "Uops"
area = "RET"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Compute', 'Flops'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (EV("FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", 5) + EV("FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE", 5)) / Retired_Slots(self, EV, 5)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "FP_Vector_128b zero division")
return self.val
desc = """
This metric approximates arithmetic FP vector uops fraction
the CPU has retired for 128-bit wide vectors. May overcount
due to FMA double counting.. Try to exploit wider vector
length"""
class FP_Vector_256b:
name = "FP_Vector_256b"
domain = "Uops"
area = "RET"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Compute', 'Flops'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (EV("FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE", 5) + EV("FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE", 5)) / Retired_Slots(self, EV, 5)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "FP_Vector_256b zero division")
return self.val
desc = """
This metric approximates arithmetic FP vector uops fraction
the CPU has retired for 256-bit wide vectors. May overcount
due to FMA double counting.. Try to exploit wider vector
length"""
class Int_Operations:
name = "Int_Operations"
domain = "Uops"
area = "RET"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Pipeline'])
maxval = None
def compute(self, EV):
try:
self.val = self.Int_Vector_128b.compute(EV) + self.Int_Vector_256b.compute(EV)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Int_Operations zero division")
return self.val
desc = """
This metric represents overall Integer (Int) select
operations fraction the CPU has executed (retired).
Vector/Matrix Int operations and shuffles are counted. Note
this metric's value may exceed its parent due to use of
\"Uops\" CountDomain."""
class Int_Vector_128b:
name = "Int_Vector_128b"
domain = "Uops"
area = "RET"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Compute', 'IntVector', 'Pipeline'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("INT_VEC_RETIRED.ADD_128", 4) + EV("INT_VEC_RETIRED.VNNI_128", 4)) / Retired_Slots(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Int_Vector_128b zero division")
return self.val
desc = """
This metric represents 128-bit vector Integer ADD/SUB/SAD or
VNNI (Vector Neural Network Instructions) uops fraction the
CPU has retired."""
class Int_Vector_256b:
name = "Int_Vector_256b"
domain = "Uops"
area = "RET"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Compute', 'IntVector', 'Pipeline'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("INT_VEC_RETIRED.ADD_256", 4) + EV("INT_VEC_RETIRED.MUL_256", 4) + EV("INT_VEC_RETIRED.VNNI_256", 4)) / Retired_Slots(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Int_Vector_256b zero division")
return self.val
desc = """
This metric represents 256-bit vector Integer
ADD/SUB/SAD/MUL or VNNI (Vector Neural Network Instructions)
uops fraction the CPU has retired."""
class Memory_Operations:
name = "Memory_Operations"
domain = "Slots"
area = "RET"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Pipeline'])
maxval = None
def compute(self, EV):
try:
self.val = self.Light_Operations.compute(EV) * EV("MEM_UOP_RETIRED.ANY", 3) / Retired_Slots(self, EV, 3)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Memory_Operations zero division")
return self.val
desc = """
This metric represents fraction of slots where the CPU was
retiring memory operations -- uops for memory load or store
accesses."""
class Fused_Instructions:
name = "Fused_Instructions"
domain = "Slots"
area = "RET"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Branches', 'BvBO', 'Pipeline'])
maxval = None
def compute(self, EV):
try:
self.val = self.Light_Operations.compute(EV) * EV("INST_RETIRED.MACRO_FUSED", 3) / Retired_Slots(self, EV, 3)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Fused_Instructions zero division")
return self.val
desc = """
This metric represents fraction of slots where the CPU was
retiring fused instructions -- where one uop can represent
multiple contiguous instructions. CMP+JCC or DEC+JCC are
common examples of legacy fusions. { Note new MOV+OP and
Load+OP fusions appear under Other_Light_Ops in MTL!}. See
section 'Optimizing for Macro-fusion' in Optimization
Manual:"""
class Non_Fused_Branches:
name = "Non_Fused_Branches"
domain = "Slots"
area = "RET"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Branches', 'BvBO', 'Pipeline'])
maxval = None
def compute(self, EV):
try:
self.val = self.Light_Operations.compute(EV) * (EV("BR_INST_RETIRED.ALL_BRANCHES", 3) - EV("INST_RETIRED.MACRO_FUSED", 3)) / Retired_Slots(self, EV, 3)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Non_Fused_Branches zero division")
return self.val
desc = """
This metric represents fraction of slots where the CPU was
retiring branch instructions that were not fused. Non-
conditional branches like direct JMP or CALL would count
here. Can be used to examine fusible conditional jumps that
were not fused."""
class Other_Light_Ops:
name = "Other_Light_Ops"
domain = "Slots"
area = "RET"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Pipeline'])
maxval = None
def compute(self, EV):
try:
self.val = max(0 , self.Light_Operations.compute(EV) - Light_Ops_Sum(self, EV, 3))
self.thresh = (self.val > 0.3) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Other_Light_Ops zero division")
return self.val
desc = """
This metric represents the remaining light uops fraction the
CPU has executed - remaining means not covered by other
sibling nodes. May undercount due to FMA double counting"""
class Nop_Instructions:
name = "Nop_Instructions"
domain = "Slots"
area = "RET"
level = 4
htoff = False
sample = ['INST_RETIRED.NOP']
errcount = 0
sibling = None
metricgroup = frozenset(['BvBO', 'Pipeline'])
maxval = None
def compute(self, EV):
try:
self.val = self.Light_Operations.compute(EV) * EV("INST_RETIRED.NOP", 4) / Retired_Slots(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Nop_Instructions zero division")
return self.val
desc = """
This metric represents fraction of slots where the CPU was
retiring NOP (no op) instructions. Compilers often use NOPs
for certain address alignments - e.g. start address of a
function or loop body.. Improve Codegen by correctly placing
NOPs outside hot sections (e.g. outside loop body)."""
class Shuffles_256b:
name = "Shuffles_256b"
domain = "Slots"
area = "RET"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['HPC', 'Pipeline'])
maxval = None
def compute(self, EV):
try:
self.val = self.Light_Operations.compute(EV) * EV("INT_VEC_RETIRED.SHUFFLES", 4) / Retired_Slots(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Shuffles_256b zero division")
return self.val
desc = """
This metric represents fraction of slots where the CPU was
retiring Shuffle operations of 256-bit vector size (FP or
Integer). Shuffles may incur slow cross \"vector lane\" data
transfers."""
class Heavy_Operations:
name = "Heavy_Operations"
domain = "Slots"
area = "RET"
level = 2
htoff = False
sample = ['UOPS_RETIRED.HEAVY']
errcount = 0
sibling = None
metricgroup = frozenset(['Retire', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("PERF_METRICS.HEAVY_OPERATIONS", 2) / EV("TOPDOWN.SLOTS", 2)) / PERF_METRICS_SUM(self, EV, 2) if topdown_use_fixed else EV("UOPS_RETIRED.HEAVY", 2) / SLOTS(self, EV, 2)
self.thresh = (self.val > 0.1)
except ZeroDivisionError:
handle_error(self, "Heavy_Operations zero division")
return self.val
desc = """
This metric represents fraction of slots where the CPU was
retiring heavy-weight operations -- instructions that
require two or more uops or micro-coded sequences. This
highly-correlates with the uop length of these
instructions/sequences. ."""
class Few_Uops_Instructions:
name = "Few_Uops_Instructions"
domain = "Slots"
area = "RET"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = max(0 , self.Heavy_Operations.compute(EV) - self.Microcode_Sequencer.compute(EV))
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Few_Uops_Instructions zero division")
return self.val
desc = """
This metric represents fraction of slots where the CPU was
retiring instructions that that are decoder into two or up
to five uops. This highly-correlates with the number of
uops in such instructions."""
class Microcode_Sequencer:
name = "Microcode_Sequencer"
domain = "Slots"
area = "RET"
level = 3
htoff = False
sample = ['UOPS_RETIRED.MS']
errcount = 0
sibling = None
metricgroup = frozenset(['MicroSeq'])
maxval = None
def compute(self, EV):
try:
self.val = EV("UOPS_RETIRED.MS", 3) / SLOTS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Microcode_Sequencer zero division")
return self.val
desc = """
This metric represents fraction of slots the CPU was
retiring uops fetched by the Microcode Sequencer (MS) unit.
The MS is used for CISC instructions not supported by the
default decoders (like repeat move strings; or CPUID); or by
microcode assists used to address some operation modes (like
in Floating Point assists). These cases can often be
avoided.."""
class Assists:
name = "Assists"
domain = "Slots_Estimated"
area = "RET"
level = 4
htoff = False
sample = ['ASSISTS.ANY']
errcount = 0
sibling = None
metricgroup = frozenset(['BvIO'])
maxval = 1.0
def compute(self, EV):
try:
self.val = Avg_Assist_Cost * EV("ASSISTS.ANY", 4) / SLOTS(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Assists zero division")
return self.val
desc = """
This metric estimates fraction of slots the CPU retired uops
delivered by the Microcode_Sequencer as a result of Assists.
Assists are long sequences of uops that are required in
certain corner-cases for operations that cannot be handled
natively by the execution pipeline. For example; when
working with very small floating point values (so-called
Denormals); the FP units are not set up to perform these
operations natively. Instead; a sequence of instructions to
perform the computation on the Denormals is injected into
the pipeline. Since these microcode sequences might be
dozens of uops long; Assists can be extremely deleterious to
performance and they can be avoided in many cases."""
class Page_Faults:
name = "Page_Faults"
domain = "Slots_Estimated"
area = "RET"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = 99 * EV("ASSISTS.PAGE_FAULT", 5) / SLOTS(self, EV, 5)
self.thresh = (self.val > 0.05)
except ZeroDivisionError:
handle_error(self, "Page_Faults zero division")
return self.val
desc = """
This metric roughly estimates fraction of slots the CPU
retired uops as a result of handing Page Faults. A Page
Fault may apply on first application access to a memory
page. Note operating system handling of page faults accounts
for the majority of its cost."""
class FP_Assists:
name = "FP_Assists"
domain = "Slots_Estimated"
area = "RET"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['HPC'])
maxval = None
def compute(self, EV):
try:
self.val = 30 * EV("ASSISTS.FP", 5) / SLOTS(self, EV, 5)
self.thresh = (self.val > 0.1)
except ZeroDivisionError:
handle_error(self, "FP_Assists zero division")
return self.val
desc = """
This metric roughly estimates fraction of slots the CPU
retired uops as a result of handing Floating Point (FP)
Assists. FP Assist may apply when working with very small
floating point values (so-called Denormals).. Consider DAZ
(Denormals Are Zero) and/or FTZ (Flush To Zero) options in
your compiler; \"-ffast-math\" with -O2 in GCC for example.
This option may improve performance if the denormal values
are not critical in your application. Also note that the DAZ
and FTZ modes are not compatible with the IEEE Standard
754.. https://www.intel.com/content/www/us/en/develop/docume
ntation/vtune-help/top/reference/cpu-metrics-reference/bad-
speculation-back-end-bound-pipeline-slots/fp-assists.html"""
class AVX_Assists:
name = "AVX_Assists"
domain = "Slots_Estimated"
area = "RET"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['HPC'])
maxval = None
def compute(self, EV):
try:
self.val = 63 * EV("ASSISTS.SSE_AVX_MIX", 5) / SLOTS(self, EV, 5)
self.thresh = (self.val > 0.1)
except ZeroDivisionError:
handle_error(self, "AVX_Assists zero division")
return self.val
desc = """
This metric estimates fraction of slots the CPU retired uops
as a result of handing SSE to AVX* or AVX* to SSE transition
Assists."""
class CISC:
name = "CISC"
domain = "Slots"
area = "RET"
level = 4
htoff = False
sample = ['FRONTEND_RETIRED.MS_FLOWS']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = max(0 , self.Microcode_Sequencer.compute(EV) - self.Assists.compute(EV))
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "CISC zero division")
return self.val
desc = """
This metric estimates fraction of cycles the CPU retired
uops originated from CISC (complex instruction set computer)
instruction. A CISC instruction has multiple uops that are
required to perform the instruction's functionality as in
the case of read-modify-write as an example. Since these
instructions require multiple uops they may or may not imply
sub-optimal use of machine resources."""
class Metric_Mispredictions:
name = "Mispredictions"
domain = "Scaled_Slots"
maxval = 0
errcount = 0
area = "Info.Bottleneck"
metricgroup = frozenset(['Bad', 'BadSpec', 'BrMispredicts', 'BvMP'])
sibling = None
def compute(self, EV):
try:
self.val = Mispredictions(self, EV, 0)
self.thresh = (self.val > 20)
except ZeroDivisionError:
handle_error_metric(self, "Mispredictions zero division")
desc = """
Total pipeline cost of Branch Misprediction related
bottlenecks"""
class Metric_Big_Code:
name = "Big_Code"
domain = "Scaled_Slots"
maxval = 0
errcount = 0
area = "Info.Bottleneck"
metricgroup = frozenset(['BvBC', 'BigFootprint', 'Fed', 'Frontend', 'IcMiss', 'MemoryTLB'])
sibling = None
def compute(self, EV):
try:
self.val = Big_Code(self, EV, 0)
self.thresh = (self.val > 20)
except ZeroDivisionError:
handle_error_metric(self, "Big_Code zero division")
desc = """
Total pipeline cost of instruction fetch related bottlenecks
by large code footprint programs (i-side cache; TLB and BTB
misses)"""
class Metric_Instruction_Fetch_BW:
name = "Instruction_Fetch_BW"
domain = "Scaled_Slots"
maxval = 0
errcount = 0
area = "Info.Bottleneck"
metricgroup = frozenset(['BvFB', 'Fed', 'FetchBW', 'Frontend'])
sibling = None
def compute(self, EV):
try:
self.val = Instruction_Fetch_BW(self, EV, 0)
self.thresh = (self.val > 20)
except ZeroDivisionError:
handle_error_metric(self, "Instruction_Fetch_BW zero division")
desc = """
Total pipeline cost of instruction fetch bandwidth related
bottlenecks (when the front-end could not sustain operations
delivery to the back-end)"""
class Metric_Cache_Memory_Bandwidth:
name = "Cache_Memory_Bandwidth"
domain = "Scaled_Slots"
maxval = 0
errcount = 0
area = "Info.Bottleneck"
metricgroup = frozenset(['BvMB', 'Mem', 'MemoryBW', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = Cache_Memory_Bandwidth(self, EV, 0)
self.thresh = (self.val > 20)
except ZeroDivisionError:
handle_error_metric(self, "Cache_Memory_Bandwidth zero division")
desc = """
Total pipeline cost of external Memory- or Cache-Bandwidth
related bottlenecks"""
class Metric_Cache_Memory_Latency:
name = "Cache_Memory_Latency"
domain = "Scaled_Slots"
maxval = 0
errcount = 0
area = "Info.Bottleneck"
metricgroup = frozenset(['BvML', 'Mem', 'MemoryLat', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = Cache_Memory_Latency(self, EV, 0)
self.thresh = (self.val > 20)
except ZeroDivisionError:
handle_error_metric(self, "Cache_Memory_Latency zero division")
desc = """
Total pipeline cost of external Memory- or Cache-Latency
related bottlenecks"""
class Metric_Memory_Data_TLBs:
name = "Memory_Data_TLBs"
domain = "Scaled_Slots"
maxval = 0
errcount = 0
area = "Info.Bottleneck"
metricgroup = frozenset(['BvMT', 'Mem', 'MemoryTLB', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = Memory_Data_TLBs(self, EV, 0)
self.thresh = (self.val > 20)
except ZeroDivisionError:
handle_error_metric(self, "Memory_Data_TLBs zero division")
desc = """
Total pipeline cost of Memory Address Translation related
bottlenecks (data-side TLBs)"""
class Metric_Memory_Synchronization:
name = "Memory_Synchronization"
domain = "Scaled_Slots"
maxval = 0
errcount = 0
area = "Info.Bottleneck"
metricgroup = frozenset(['BvMS', 'Mem', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = Memory_Synchronization(self, EV, 0)
self.thresh = (self.val > 10)
except ZeroDivisionError:
handle_error_metric(self, "Memory_Synchronization zero division")
desc = """
Total pipeline cost of Memory Synchronization related
bottlenecks (data transfers and coherency updates across
processors)"""
class Metric_Compute_Bound_Est:
name = "Compute_Bound_Est"
domain = "Scaled_Slots"
maxval = 0
errcount = 0
area = "Info.Bottleneck"
metricgroup = frozenset(['BvCB', 'Cor'])
sibling = None
def compute(self, EV):
try:
self.val = Compute_Bound_Est(self, EV, 0)
self.thresh = (self.val > 20)
except ZeroDivisionError:
handle_error_metric(self, "Compute_Bound_Est zero division")
desc = """
Total pipeline cost when the execution is compute-bound - an
estimation. Covers Core Bound when High ILP as well as when
long-latency execution units are busy."""
class Metric_Irregular_Overhead:
name = "Irregular_Overhead"
domain = "Scaled_Slots"
maxval = 0
errcount = 0
area = "Info.Bottleneck"
metricgroup = frozenset(['Bad', 'BvIO', 'Cor', 'Ret'])
sibling = None
def compute(self, EV):
try:
self.val = Irregular_Overhead(self, EV, 0)
self.thresh = (self.val > 10)
except ZeroDivisionError:
handle_error_metric(self, "Irregular_Overhead zero division")
desc = """
Total pipeline cost of irregular execution (e.g. FP-assists
in HPC, Wait time with work imbalance multithreaded
workloads, overhead in system services or virtualized
environments)"""
class Metric_Other_Bottlenecks:
name = "Other_Bottlenecks"
domain = "Scaled_Slots"
maxval = 0
errcount = 0
area = "Info.Bottleneck"
metricgroup = frozenset(['BvOB', 'Cor', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = Other_Bottlenecks(self, EV, 0)
self.thresh = (self.val > 20)
except ZeroDivisionError:
handle_error_metric(self, "Other_Bottlenecks zero division")
desc = """
Total pipeline cost of remaining bottlenecks in the back-
end. Examples include data-dependencies (Core Bound when Low
ILP) and other unlisted memory-related stalls."""
class Metric_Branching_Overhead:
name = "Branching_Overhead"
domain = "Scaled_Slots"
maxval = 0
errcount = 0
area = "Info.Bottleneck"
metricgroup = frozenset(['BvBO', 'Ret'])
sibling = None
def compute(self, EV):
try:
self.val = Branching_Overhead(self, EV, 0)
self.thresh = (self.val > 5)
except ZeroDivisionError:
handle_error_metric(self, "Branching_Overhead zero division")
desc = """
Total pipeline cost of instructions used for program
control-flow - a subset of the Retiring category in TMA.
Examples include function calls; loops and alignments. (A
lower bound). Consider Loop Unrolling or function inlining
optimizations"""
class Metric_Useful_Work:
name = "Useful_Work"
domain = "Scaled_Slots"
maxval = 0
errcount = 0
area = "Info.Bottleneck"
metricgroup = frozenset(['BvUW', 'Ret'])
sibling = None
def compute(self, EV):
try:
self.val = Useful_Work(self, EV, 0)
self.thresh = (self.val > 20)
except ZeroDivisionError:
handle_error_metric(self, "Useful_Work zero division")
desc = """
Total pipeline cost of \"useful operations\" - the portion
of Retiring category not covered by Branching_Overhead nor
Irregular_Overhead."""
class Metric_Core_Bound_Likely:
name = "Core_Bound_Likely"
domain = "Metric"
maxval = 1.0
errcount = 0
area = "Info.Botlnk.L0"
metricgroup = frozenset(['Cor', 'SMT'])
sibling = None
def compute(self, EV):
try:
self.val = Core_Bound_Likely(self, EV, 0)
self.thresh = (self.val > 0.5)
except ZeroDivisionError:
handle_error_metric(self, "Core_Bound_Likely zero division")
desc = """
Probability of Core Bound bottleneck hidden by SMT-profiling
artifacts. Tip: consider analysis with SMT disabled"""
class Metric_IPC:
name = "IPC"
domain = "Metric"
maxval = Pipeline_Width + 2
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['Ret', 'Summary'])
sibling = None
def compute(self, EV):
try:
self.val = IPC(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "IPC zero division")
desc = """
Instructions Per Cycle (per Logical Processor)"""
class Metric_UopPI:
name = "UopPI"
domain = "Metric"
maxval = 2.0
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['Pipeline', 'Ret', 'Retire'])
sibling = None
def compute(self, EV):
try:
self.val = UopPI(self, EV, 0)
self.thresh = (self.val > 1.05)
except ZeroDivisionError:
handle_error_metric(self, "UopPI zero division")
desc = """
Uops Per Instruction"""
class Metric_UpTB:
name = "UpTB"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['Branches', 'Fed', 'FetchBW'])
sibling = None
def compute(self, EV):
try:
self.val = UpTB(self, EV, 0)
self.thresh = self.val < Pipeline_Width * 1.5
except ZeroDivisionError:
handle_error_metric(self, "UpTB zero division")
desc = """
Uops per taken branch"""
class Metric_CPI:
name = "CPI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['Pipeline', 'Mem'])
sibling = None
def compute(self, EV):
try:
self.val = CPI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "CPI zero division")
desc = """
Cycles Per Instruction (per Logical Processor)"""
class Metric_CLKS:
name = "CLKS"
domain = "Count"
maxval = 0
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['Pipeline'])
sibling = None
def compute(self, EV):
try:
self.val = CLKS(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "CLKS zero division")
desc = """
Per-Logical Processor actual clocks when the Logical
Processor is active."""
class Metric_SLOTS:
name = "SLOTS"
domain = "Count"
maxval = 0
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['TmaL1'])
sibling = None
def compute(self, EV):
try:
self.val = SLOTS(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "SLOTS zero division")
desc = """
Total issue-pipeline slots (per-Physical Core till ICL; per-
Logical Processor ICL onward)"""
class Metric_Slots_Utilization:
name = "Slots_Utilization"
domain = "Metric"
maxval = 1.0
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['SMT', 'TmaL1'])
sibling = None
def compute(self, EV):
try:
self.val = Slots_Utilization(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Slots_Utilization zero division")
desc = """
Fraction of Physical Core issue-slots utilized by this
Logical Processor"""
class Metric_Execute_per_Issue:
name = "Execute_per_Issue"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['Cor', 'Pipeline'])
sibling = None
def compute(self, EV):
try:
self.val = Execute_per_Issue(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Execute_per_Issue zero division")
desc = """
The ratio of Executed- by Issued-Uops. Ratio > 1 suggests
high rate of uop micro-fusions. Ratio < 1 suggest high rate
of \"execute\" at rename stage."""
class Metric_CoreIPC:
name = "CoreIPC"
domain = "Core_Metric"
maxval = Pipeline_Width + 2
errcount = 0
area = "Info.Core"
metricgroup = frozenset(['Ret', 'SMT', 'TmaL1'])
sibling = None
def compute(self, EV):
try:
self.val = CoreIPC(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "CoreIPC zero division")
desc = """
Instructions Per Cycle across hyper-threads (per physical
core)"""
class Metric_FLOPc:
name = "FLOPc"
domain = "Core_Metric"
maxval = 10.0
errcount = 0
area = "Info.Core"
metricgroup = frozenset(['Ret', 'Flops'])
sibling = None
def compute(self, EV):
try:
self.val = FLOPc(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "FLOPc zero division")
desc = """
Floating Point Operations Per Cycle"""
class Metric_FP_Arith_Utilization:
name = "FP_Arith_Utilization"
domain = "Core_Metric"
maxval = 2.0
errcount = 0
area = "Info.Core"
metricgroup = frozenset(['Cor', 'Flops', 'HPC'])
sibling = None
def compute(self, EV):
try:
self.val = FP_Arith_Utilization(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "FP_Arith_Utilization zero division")
desc = """
Actual per-core usage of the Floating Point non-X87
execution units (regardless of precision or vector-width).
Values > 1 are possible due to Fused-Multiply Add use all
of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less
common."""
class Metric_ILP:
name = "ILP"
domain = "Metric"
maxval = Exe_Ports
errcount = 0
area = "Info.Core"
metricgroup = frozenset(['Backend', 'Cor', 'Pipeline', 'PortsUtil'])
sibling = None
def compute(self, EV):
try:
self.val = ILP(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "ILP zero division")
desc = """
Instruction-Level-Parallelism (average number of uops
executed when there is execution) per thread (logical-
processor)"""
class Metric_EPC:
name = "EPC"
domain = "Metric"
maxval = 20.0
errcount = 0
area = "Info.Core"
metricgroup = frozenset(['Power'])
sibling = None
def compute(self, EV):
try:
self.val = EPC(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "EPC zero division")
desc = """
uops Executed per Cycle"""
class Metric_CORE_CLKS:
name = "CORE_CLKS"
domain = "Count"
maxval = 0
errcount = 0
area = "Info.Core"
metricgroup = frozenset(['SMT'])
sibling = None
def compute(self, EV):
try:
self.val = CORE_CLKS(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "CORE_CLKS zero division")
desc = """
Core actual clocks when any Logical Processor is active on
the Physical Core"""
class Metric_IpLoad:
name = "IpLoad"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpLoad(self, EV, 0)
self.thresh = (self.val < 3)
except ZeroDivisionError:
handle_error_metric(self, "IpLoad zero division")
desc = """
Instructions per Load (lower number means higher occurrence
rate). Tip: reduce memory accesses."""
class Metric_IpStore:
name = "IpStore"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpStore(self, EV, 0)
self.thresh = (self.val < 8)
except ZeroDivisionError:
handle_error_metric(self, "IpStore zero division")
desc = """
Instructions per Store (lower number means higher occurrence
rate). Tip: reduce memory accesses."""
class Metric_IpBranch:
name = "IpBranch"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Branches', 'Fed', 'InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpBranch(self, EV, 0)
self.thresh = (self.val < 8)
except ZeroDivisionError:
handle_error_metric(self, "IpBranch zero division")
desc = """
Instructions per Branch (lower number means higher
occurrence rate)"""
class Metric_IpCall:
name = "IpCall"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Branches', 'Fed', 'PGO'])
sibling = None
def compute(self, EV):
try:
self.val = IpCall(self, EV, 0)
self.thresh = (self.val < 200)
except ZeroDivisionError:
handle_error_metric(self, "IpCall zero division")
desc = """
Instructions per (near) call (lower number means higher
occurrence rate)"""
class Metric_IpTB:
name = "IpTB"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Branches', 'Fed', 'FetchBW', 'Frontend', 'PGO'])
sibling = None
def compute(self, EV):
try:
self.val = IpTB(self, EV, 0)
self.thresh = self.val < Pipeline_Width * 2 + 1
except ZeroDivisionError:
handle_error_metric(self, "IpTB zero division")
desc = """
Instructions per taken branch"""
class Metric_BpTkBranch:
name = "BpTkBranch"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Branches', 'Fed', 'PGO'])
sibling = None
def compute(self, EV):
try:
self.val = BpTkBranch(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "BpTkBranch zero division")
desc = """
Branch instructions per taken branch. . Can be used to
approximate PGO-likelihood for non-loopy codes."""
class Metric_IpFLOP:
name = "IpFLOP"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Flops', 'InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpFLOP(self, EV, 0)
self.thresh = (self.val < 10)
except ZeroDivisionError:
handle_error_metric(self, "IpFLOP zero division")
desc = """
Instructions per Floating Point (FP) Operation (lower number
means higher occurrence rate). Reference: Tuning Performance
via Metrics with Expectations.
https://doi.org/10.1109/LCA.2019.2916408"""
class Metric_IpArith:
name = "IpArith"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Flops', 'InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpArith(self, EV, 0)
self.thresh = (self.val < 10)
except ZeroDivisionError:
handle_error_metric(self, "IpArith zero division")
desc = """
Instructions per FP Arithmetic instruction (lower number
means higher occurrence rate). Values < 1 are possible due
to intentional FMA double counting. Approximated prior to
BDW."""
class Metric_IpArith_Scalar_SP:
name = "IpArith_Scalar_SP"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Flops', 'FpScalar', 'InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpArith_Scalar_SP(self, EV, 0)
self.thresh = (self.val < 10)
except ZeroDivisionError:
handle_error_metric(self, "IpArith_Scalar_SP zero division")
desc = """
Instructions per FP Arithmetic Scalar Single-Precision
instruction (lower number means higher occurrence rate).
Values < 1 are possible due to intentional FMA double
counting."""
class Metric_IpArith_Scalar_DP:
name = "IpArith_Scalar_DP"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Flops', 'FpScalar', 'InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpArith_Scalar_DP(self, EV, 0)
self.thresh = (self.val < 10)
except ZeroDivisionError:
handle_error_metric(self, "IpArith_Scalar_DP zero division")
desc = """
Instructions per FP Arithmetic Scalar Double-Precision
instruction (lower number means higher occurrence rate).
Values < 1 are possible due to intentional FMA double
counting."""
class Metric_IpArith_AVX128:
name = "IpArith_AVX128"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Flops', 'FpVector', 'InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpArith_AVX128(self, EV, 0)
self.thresh = (self.val < 10)
except ZeroDivisionError:
handle_error_metric(self, "IpArith_AVX128 zero division")
desc = """
Instructions per FP Arithmetic AVX/SSE 128-bit instruction
(lower number means higher occurrence rate). Values < 1 are
possible due to intentional FMA double counting."""
class Metric_IpArith_AVX256:
name = "IpArith_AVX256"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Flops', 'FpVector', 'InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpArith_AVX256(self, EV, 0)
self.thresh = (self.val < 10)
except ZeroDivisionError:
handle_error_metric(self, "IpArith_AVX256 zero division")
desc = """
Instructions per FP Arithmetic AVX* 256-bit instruction
(lower number means higher occurrence rate). Values < 1 are
possible due to intentional FMA double counting."""
class Metric_IpPause:
name = "IpPause"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Flops', 'FpVector', 'InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpPause(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "IpPause zero division")
desc = """
Instructions per PAUSE (lower number means higher occurrence
rate)"""
class Metric_IpSWPF:
name = "IpSWPF"
domain = "Inst_Metric"
maxval = 1000
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Prefetches'])
sibling = None
def compute(self, EV):
try:
self.val = IpSWPF(self, EV, 0)
self.thresh = (self.val < 100)
except ZeroDivisionError:
handle_error_metric(self, "IpSWPF zero division")
desc = """
Instructions per Software prefetch instruction (of any type:
NTA/T0/T1/T2/Prefetch) (lower number means higher occurrence
rate)"""
class Metric_Instructions:
name = "Instructions"
domain = "Count"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Summary', 'TmaL1'])
sibling = None
def compute(self, EV):
try:
self.val = Instructions(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Instructions zero division")
desc = """
Total number of retired Instructions"""
class Metric_Retire:
name = "Retire"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Pipeline"
metricgroup = frozenset(['Pipeline', 'Ret'])
sibling = None
def compute(self, EV):
try:
self.val = Retire(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Retire zero division")
desc = """
Average number of Uops retired in cycles where at least one
uop has retired."""
class Metric_Strings_Cycles:
name = "Strings_Cycles"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Pipeline"
metricgroup = frozenset(['MicroSeq', 'Pipeline', 'Ret'])
sibling = None
def compute(self, EV):
try:
self.val = Strings_Cycles(self, EV, 0)
self.thresh = (self.val > 0.1)
except ZeroDivisionError:
handle_error_metric(self, "Strings_Cycles zero division")
desc = """
Estimated fraction of retirement-cycles dealing with repeat
instructions"""
class Metric_IpAssist:
name = "IpAssist"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Pipeline"
metricgroup = frozenset(['MicroSeq', 'Pipeline', 'Ret', 'Retire'])
sibling = None
def compute(self, EV):
try:
self.val = IpAssist(self, EV, 0)
self.thresh = (self.val < 100000)
except ZeroDivisionError:
handle_error_metric(self, "IpAssist zero division")
desc = """
Instructions per a microcode Assist invocation. See Assists
tree node for details (lower number means higher occurrence
rate)"""
class Metric_Execute:
name = "Execute"
domain = "Metric"
maxval = Exe_Ports
errcount = 0
area = "Info.Pipeline"
metricgroup = frozenset(['Cor', 'Pipeline', 'PortsUtil', 'SMT'])
sibling = None
def compute(self, EV):
try:
self.val = Execute(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Execute zero division")
desc = """
"""
class Metric_Fetch_LSD:
name = "Fetch_LSD"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Pipeline"
metricgroup = frozenset(['Fed', 'FetchBW'])
sibling = None
def compute(self, EV):
try:
self.val = Fetch_LSD(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Fetch_LSD zero division")
desc = """
Average number of uops fetched from LSD per cycle"""
class Metric_Fetch_DSB:
name = "Fetch_DSB"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Pipeline"
metricgroup = frozenset(['Fed', 'FetchBW'])
sibling = None
def compute(self, EV):
try:
self.val = Fetch_DSB(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Fetch_DSB zero division")
desc = """
Average number of uops fetched from DSB per cycle"""
class Metric_Fetch_MITE:
name = "Fetch_MITE"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Pipeline"
metricgroup = frozenset(['Fed', 'FetchBW'])
sibling = None
def compute(self, EV):
try:
self.val = Fetch_MITE(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Fetch_MITE zero division")
desc = """
Average number of uops fetched from MITE per cycle"""
class Metric_Fetch_UpC:
name = "Fetch_UpC"
domain = "Metric"
maxval = 6.0
errcount = 0
area = "Info.Frontend"
metricgroup = frozenset(['Fed', 'FetchBW'])
sibling = None
def compute(self, EV):
try:
self.val = Fetch_UpC(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Fetch_UpC zero division")
desc = """
Average number of Uops issued by front-end when it issued
something"""
class Metric_LSD_Coverage:
name = "LSD_Coverage"
domain = "Metric"
maxval = 1.0
errcount = 0
area = "Info.Frontend"
metricgroup = frozenset(['Fed', 'LSD'])
sibling = None
def compute(self, EV):
try:
self.val = LSD_Coverage(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "LSD_Coverage zero division")
desc = """
Fraction of Uops delivered by the LSD (Loop Stream Detector;
aka Loop Cache)"""
class Metric_DSB_Coverage:
name = "DSB_Coverage"
domain = "Metric"
maxval = 1.0
errcount = 0
area = "Info.Frontend"
metricgroup = frozenset(['DSB', 'Fed', 'FetchBW'])
sibling = None
def compute(self, EV):
try:
self.val = DSB_Coverage(self, EV, 0)
self.thresh = (self.val < 0.7) and HighIPC(self, EV, 1)
except ZeroDivisionError:
handle_error_metric(self, "DSB_Coverage zero division")
desc = """
Fraction of Uops delivered by the DSB (aka Decoded ICache;
or Uop Cache). See section 'Decoded ICache' in Optimization
Manual. http://www.intel.com/content/www/us/en/architecture-
and-technology/64-ia-32-architectures-optimization-
manual.html"""
class Metric_Unknown_Branch_Cost:
name = "Unknown_Branch_Cost"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Frontend"
metricgroup = frozenset(['Fed'])
sibling = None
def compute(self, EV):
try:
self.val = Unknown_Branch_Cost(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Unknown_Branch_Cost zero division")
desc = """
Average number of cycles the front-end was delayed due to an
Unknown Branch detection. See Unknown_Branches node."""
class Metric_DSB_Switch_Cost:
name = "DSB_Switch_Cost"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Frontend"
metricgroup = frozenset(['DSBmiss'])
sibling = None
def compute(self, EV):
try:
self.val = DSB_Switch_Cost(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "DSB_Switch_Cost zero division")
desc = """
Average number of cycles of a switch from the DSB fetch-unit
to MITE fetch unit - see DSB_Switches tree node for details."""
class Metric_DSB_Misses:
name = "DSB_Misses"
domain = "Scaled_Slots"
maxval = 0
errcount = 0
area = "Info.Botlnk.L2"
metricgroup = frozenset(['DSBmiss', 'Fed'])
sibling = None
def compute(self, EV):
try:
self.val = DSB_Misses(self, EV, 0)
self.thresh = (self.val > 10)
except ZeroDivisionError:
handle_error_metric(self, "DSB_Misses zero division")
desc = """
Total pipeline cost of DSB (uop cache) misses - subset of
the Instruction_Fetch_BW Bottleneck."""
class Metric_DSB_Bandwidth:
name = "DSB_Bandwidth"
domain = "Scaled_Slots"
maxval = 0
errcount = 0
area = "Info.Botlnk.L2"
metricgroup = frozenset(['DSB', 'FetchBW'])
sibling = None
def compute(self, EV):
try:
self.val = DSB_Bandwidth(self, EV, 0)
self.thresh = (self.val > 10)
except ZeroDivisionError:
handle_error_metric(self, "DSB_Bandwidth zero division")
desc = """
Total pipeline cost of DSB (uop cache) hits - subset of the
Instruction_Fetch_BW Bottleneck."""
class Metric_ICache_Miss_Latency:
name = "ICache_Miss_Latency"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Frontend"
metricgroup = frozenset(['Fed', 'FetchLat', 'IcMiss'])
sibling = None
def compute(self, EV):
try:
self.val = ICache_Miss_Latency(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "ICache_Miss_Latency zero division")
desc = """
Average Latency for L1 instruction cache misses"""
class Metric_IC_Misses:
name = "IC_Misses"
domain = "Scaled_Slots"
maxval = 0
errcount = 0
area = "Info.Botlnk.L2"
metricgroup = frozenset(['Fed', 'FetchLat', 'IcMiss'])
sibling = None
def compute(self, EV):
try:
self.val = IC_Misses(self, EV, 0)
self.thresh = (self.val > 5)
except ZeroDivisionError:
handle_error_metric(self, "IC_Misses zero division")
desc = """
Total pipeline cost of Instruction Cache misses - subset of
the Big_Code Bottleneck."""
class Metric_IpDSB_Miss_Ret:
name = "IpDSB_Miss_Ret"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Frontend"
metricgroup = frozenset(['DSBmiss', 'Fed'])
sibling = None
def compute(self, EV):
try:
self.val = IpDSB_Miss_Ret(self, EV, 0)
self.thresh = (self.val < 50)
except ZeroDivisionError:
handle_error_metric(self, "IpDSB_Miss_Ret zero division")
desc = """
Instructions per non-speculative DSB miss (lower number
means higher occurrence rate)"""
class Metric_IpUnknown_Branch:
name = "IpUnknown_Branch"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Frontend"
metricgroup = frozenset(['Fed'])
sibling = None
def compute(self, EV):
try:
self.val = IpUnknown_Branch(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "IpUnknown_Branch zero division")
desc = """
Instructions per speculative Unknown Branch Misprediction
(BAClear) (lower number means higher occurrence rate)"""
class Metric_L2MPKI_Code:
name = "L2MPKI_Code"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Frontend"
metricgroup = frozenset(['IcMiss'])
sibling = None
def compute(self, EV):
try:
self.val = L2MPKI_Code(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2MPKI_Code zero division")
desc = """
L2 cache true code cacheline misses per kilo instruction"""
class Metric_L2MPKI_Code_All:
name = "L2MPKI_Code_All"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Frontend"
metricgroup = frozenset(['IcMiss'])
sibling = None
def compute(self, EV):
try:
self.val = L2MPKI_Code_All(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2MPKI_Code_All zero division")
desc = """
L2 cache speculative code cacheline misses per kilo
instruction"""
class Metric_IpMispredict:
name = "IpMispredict"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Bad_Spec"
metricgroup = frozenset(['Bad', 'BadSpec', 'BrMispredicts'])
sibling = None
def compute(self, EV):
try:
self.val = IpMispredict(self, EV, 0)
self.thresh = (self.val < 200)
except ZeroDivisionError:
handle_error_metric(self, "IpMispredict zero division")
desc = """
Number of Instructions per non-speculative Branch
Misprediction (JEClear) (lower number means higher
occurrence rate)"""
class Metric_IpMisp_Cond_Ntaken:
name = "IpMisp_Cond_Ntaken"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Bad_Spec"
metricgroup = frozenset(['Bad', 'BrMispredicts'])
sibling = None
def compute(self, EV):
try:
self.val = IpMisp_Cond_Ntaken(self, EV, 0)
self.thresh = (self.val < 200)
except ZeroDivisionError:
handle_error_metric(self, "IpMisp_Cond_Ntaken zero division")
desc = """
Instructions per retired Mispredicts for conditional non-
taken branches (lower number means higher occurrence rate)."""
class Metric_IpMisp_Cond_Taken:
name = "IpMisp_Cond_Taken"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Bad_Spec"
metricgroup = frozenset(['Bad', 'BrMispredicts'])
sibling = None
def compute(self, EV):
try:
self.val = IpMisp_Cond_Taken(self, EV, 0)
self.thresh = (self.val < 200)
except ZeroDivisionError:
handle_error_metric(self, "IpMisp_Cond_Taken zero division")
desc = """
Instructions per retired Mispredicts for conditional taken
branches (lower number means higher occurrence rate)."""
class Metric_IpMisp_Ret:
name = "IpMisp_Ret"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Bad_Spec"
metricgroup = frozenset(['Bad', 'BrMispredicts'])
sibling = None
def compute(self, EV):
try:
self.val = IpMisp_Ret(self, EV, 0)
self.thresh = (self.val < 500)
except ZeroDivisionError:
handle_error_metric(self, "IpMisp_Ret zero division")
desc = """
Instructions per retired Mispredicts for return branches
(lower number means higher occurrence rate)."""
class Metric_IpMisp_Indirect:
name = "IpMisp_Indirect"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Bad_Spec"
metricgroup = frozenset(['Bad', 'BrMispredicts'])
sibling = None
def compute(self, EV):
try:
self.val = IpMisp_Indirect(self, EV, 0)
self.thresh = (self.val < 1000)
except ZeroDivisionError:
handle_error_metric(self, "IpMisp_Indirect zero division")
desc = """
Instructions per retired Mispredicts for indirect CALL or
JMP branches (lower number means higher occurrence rate)."""
class Metric_Branch_Misprediction_Cost:
name = "Branch_Misprediction_Cost"
domain = "Core_Metric"
maxval = 300
errcount = 0
area = "Info.Bad_Spec"
metricgroup = frozenset(['Bad', 'BrMispredicts'])
sibling = None
def compute(self, EV):
try:
self.val = Branch_Misprediction_Cost(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Branch_Misprediction_Cost zero division")
desc = """
Branch Misprediction Cost: Fraction of TMA slots wasted per
non-speculative branch misprediction (retired JEClear)"""
class Metric_Spec_Clears_Ratio:
name = "Spec_Clears_Ratio"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Bad_Spec"
metricgroup = frozenset(['BrMispredicts'])
sibling = None
def compute(self, EV):
try:
self.val = Spec_Clears_Ratio(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Spec_Clears_Ratio zero division")
desc = """
Speculative to Retired ratio of all clears (covering
Mispredicts and nukes)"""
class Metric_Cond_NT:
name = "Cond_NT"
domain = "Fraction"
maxval = 1.0
errcount = 0
area = "Info.Branches"
metricgroup = frozenset(['Bad', 'Branches', 'CodeGen', 'PGO'])
sibling = None
def compute(self, EV):
try:
self.val = Cond_NT(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Cond_NT zero division")
desc = """
Fraction of branches that are non-taken conditionals"""
class Metric_Cond_TK:
name = "Cond_TK"
domain = "Fraction"
maxval = 1.0
errcount = 0
area = "Info.Branches"
metricgroup = frozenset(['Bad', 'Branches', 'CodeGen', 'PGO'])
sibling = None
def compute(self, EV):
try:
self.val = Cond_TK(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Cond_TK zero division")
desc = """
Fraction of branches that are taken conditionals"""
class Metric_CallRet:
name = "CallRet"
domain = "Fraction"
maxval = 1.0
errcount = 0
area = "Info.Branches"
metricgroup = frozenset(['Bad', 'Branches'])
sibling = None
def compute(self, EV):
try:
self.val = CallRet(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "CallRet zero division")
desc = """
Fraction of branches that are CALL or RET"""
class Metric_Jump:
name = "Jump"
domain = "Fraction"
maxval = 1.0
errcount = 0
area = "Info.Branches"
metricgroup = frozenset(['Bad', 'Branches'])
sibling = None
def compute(self, EV):
try:
self.val = Jump(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Jump zero division")
desc = """
Fraction of branches that are unconditional (direct or
indirect) jumps"""
class Metric_Other_Branches:
name = "Other_Branches"
domain = "Fraction"
maxval = 1.0
errcount = 0
area = "Info.Branches"
metricgroup = frozenset(['Bad', 'Branches'])
sibling = None
def compute(self, EV):
try:
self.val = Other_Branches(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Other_Branches zero division")
desc = """
Fraction of branches of other types (not individually
covered by other metrics in Info.Branches group)"""
class Metric_Load_Miss_Real_Latency:
name = "Load_Miss_Real_Latency"
domain = "Clocks_Latency"
maxval = 1000
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['Mem', 'MemoryBound', 'MemoryLat'])
sibling = None
def compute(self, EV):
try:
self.val = Load_Miss_Real_Latency(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Load_Miss_Real_Latency zero division")
desc = """
Actual Average Latency for L1 data-cache miss demand load
operations (in core cycles)"""
class Metric_MLP:
name = "MLP"
domain = "Metric"
maxval = 10.0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['Mem', 'MemoryBound', 'MemoryBW'])
sibling = None
def compute(self, EV):
try:
self.val = MLP(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "MLP zero division")
desc = """
Memory-Level-Parallelism (average number of L1 miss demand
load when there is at least one such miss. Per-Logical
Processor)"""
class Metric_L1MPKI:
name = "L1MPKI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['CacheHits', 'Mem'])
sibling = None
def compute(self, EV):
try:
self.val = L1MPKI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L1MPKI zero division")
desc = """
L1 cache true misses per kilo instruction for retired demand
loads"""
class Metric_L1MPKI_Load:
name = "L1MPKI_Load"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['CacheHits', 'Mem'])
sibling = None
def compute(self, EV):
try:
self.val = L1MPKI_Load(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L1MPKI_Load zero division")
desc = """
L1 cache true misses per kilo instruction for all demand
loads (including speculative)"""
class Metric_L2MPKI:
name = "L2MPKI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['Mem', 'Backend', 'CacheHits'])
sibling = None
def compute(self, EV):
try:
self.val = L2MPKI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2MPKI zero division")
desc = """
L2 cache true misses per kilo instruction for retired demand
loads"""
class Metric_L2MPKI_All:
name = "L2MPKI_All"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['CacheHits', 'Mem', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = L2MPKI_All(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2MPKI_All zero division")
desc = """
L2 cache true misses per kilo instruction for all request
types (including speculative)"""
class Metric_L2MPKI_Load:
name = "L2MPKI_Load"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['CacheHits', 'Mem'])
sibling = None
def compute(self, EV):
try:
self.val = L2MPKI_Load(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2MPKI_Load zero division")
desc = """
L2 cache true misses per kilo instruction for all demand
loads (including speculative)"""
class Metric_L2MPKI_RFO:
name = "L2MPKI_RFO"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['CacheMisses', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = L2MPKI_RFO(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2MPKI_RFO zero division")
desc = """
Offcore requests (L2 cache miss) per kilo instruction for
demand RFOs"""
class Metric_L2HPKI_All:
name = "L2HPKI_All"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['CacheHits', 'Mem'])
sibling = None
def compute(self, EV):
try:
self.val = L2HPKI_All(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2HPKI_All zero division")
desc = """
L2 cache hits per kilo instruction for all request types
(including speculative)"""
class Metric_L2HPKI_Load:
name = "L2HPKI_Load"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['CacheHits', 'Mem'])
sibling = None
def compute(self, EV):
try:
self.val = L2HPKI_Load(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2HPKI_Load zero division")
desc = """
L2 cache hits per kilo instruction for all demand loads
(including speculative)"""
class Metric_L3MPKI:
name = "L3MPKI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['Mem'])
sibling = None
def compute(self, EV):
try:
self.val = L3MPKI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L3MPKI zero division")
desc = """
L3 cache true misses per kilo instruction for retired demand
loads"""
class Metric_FB_HPKI:
name = "FB_HPKI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['CacheHits', 'Mem'])
sibling = None
def compute(self, EV):
try:
self.val = FB_HPKI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "FB_HPKI zero division")
desc = """
Fill Buffer (FB) hits per kilo instructions for retired
demand loads (L1D misses that merge into ongoing miss-
handling entries)"""
class Metric_L1D_Cache_Fill_BW:
name = "L1D_Cache_Fill_BW"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['Mem', 'MemoryBW'])
sibling = None
def compute(self, EV):
try:
self.val = L1D_Cache_Fill_BW(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L1D_Cache_Fill_BW zero division")
desc = """
"""
class Metric_L2_Cache_Fill_BW:
name = "L2_Cache_Fill_BW"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['Mem', 'MemoryBW'])
sibling = None
def compute(self, EV):
try:
self.val = L2_Cache_Fill_BW(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2_Cache_Fill_BW zero division")
desc = """
"""
class Metric_L3_Cache_Fill_BW:
name = "L3_Cache_Fill_BW"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['Mem', 'MemoryBW'])
sibling = None
def compute(self, EV):
try:
self.val = L3_Cache_Fill_BW(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L3_Cache_Fill_BW zero division")
desc = """
"""
class Metric_L3_Cache_Access_BW:
name = "L3_Cache_Access_BW"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['Mem', 'MemoryBW', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = L3_Cache_Access_BW(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L3_Cache_Access_BW zero division")
desc = """
"""
class Metric_Page_Walks_Utilization:
name = "Page_Walks_Utilization"
domain = "Core_Metric"
maxval = 1.0
errcount = 0
area = "Info.Memory.TLB"
metricgroup = frozenset(['Mem', 'MemoryTLB'])
sibling = None
def compute(self, EV):
try:
self.val = Page_Walks_Utilization(self, EV, 0)
self.thresh = (self.val > 0.5)
except ZeroDivisionError:
handle_error_metric(self, "Page_Walks_Utilization zero division")
desc = """
Utilization of the core's Page Walker(s) serving STLB misses
triggered by instruction/Load/Store accesses"""
class Metric_Code_STLB_MPKI:
name = "Code_STLB_MPKI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory.TLB"
metricgroup = frozenset(['Fed', 'MemoryTLB'])
sibling = None
def compute(self, EV):
try:
self.val = Code_STLB_MPKI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Code_STLB_MPKI zero division")
desc = """
STLB (2nd level TLB) code speculative misses per kilo
instruction (misses of any page-size that complete the page
walk)"""
class Metric_Load_STLB_MPKI:
name = "Load_STLB_MPKI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory.TLB"
metricgroup = frozenset(['Mem', 'MemoryTLB'])
sibling = None
def compute(self, EV):
try:
self.val = Load_STLB_MPKI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Load_STLB_MPKI zero division")
desc = """
STLB (2nd level TLB) data load speculative misses per kilo
instruction (misses of any page-size that complete the page
walk)"""
class Metric_Store_STLB_MPKI:
name = "Store_STLB_MPKI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory.TLB"
metricgroup = frozenset(['Mem', 'MemoryTLB'])
sibling = None
def compute(self, EV):
try:
self.val = Store_STLB_MPKI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Store_STLB_MPKI zero division")
desc = """
STLB (2nd level TLB) data store speculative misses per kilo
instruction (misses of any page-size that complete the page
walk)"""
class Metric_L1D_Cache_Fill_BW_2T:
name = "L1D_Cache_Fill_BW_2T"
domain = "Core_Metric"
maxval = 0
errcount = 0
area = "Info.Memory.Core"
metricgroup = frozenset(['Mem', 'MemoryBW'])
sibling = None
def compute(self, EV):
try:
self.val = L1D_Cache_Fill_BW_2T(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L1D_Cache_Fill_BW_2T zero division")
desc = """
Average per-core data fill bandwidth to the L1 data cache
[GB / sec]"""
class Metric_L2_Cache_Fill_BW_2T:
name = "L2_Cache_Fill_BW_2T"
domain = "Core_Metric"
maxval = 0
errcount = 0
area = "Info.Memory.Core"
metricgroup = frozenset(['Mem', 'MemoryBW'])
sibling = None
def compute(self, EV):
try:
self.val = L2_Cache_Fill_BW_2T(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2_Cache_Fill_BW_2T zero division")
desc = """
Average per-core data fill bandwidth to the L2 cache [GB /
sec]"""
class Metric_L3_Cache_Fill_BW_2T:
name = "L3_Cache_Fill_BW_2T"
domain = "Core_Metric"
maxval = 0
errcount = 0
area = "Info.Memory.Core"
metricgroup = frozenset(['Mem', 'MemoryBW'])
sibling = None
def compute(self, EV):
try:
self.val = L3_Cache_Fill_BW_2T(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L3_Cache_Fill_BW_2T zero division")
desc = """
Average per-core data fill bandwidth to the L3 cache [GB /
sec]"""
class Metric_L3_Cache_Access_BW_2T:
name = "L3_Cache_Access_BW_2T"
domain = "Core_Metric"
maxval = 0
errcount = 0
area = "Info.Memory.Core"
metricgroup = frozenset(['Mem', 'MemoryBW', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = L3_Cache_Access_BW_2T(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L3_Cache_Access_BW_2T zero division")
desc = """
Average per-core data access bandwidth to the L3 cache [GB /
sec]"""
class Metric_Load_L2_Miss_Latency:
name = "Load_L2_Miss_Latency"
domain = "Clocks_Latency"
maxval = 1000
errcount = 0
area = "Info.Memory.Latency"
metricgroup = frozenset(['Memory_Lat', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = Load_L2_Miss_Latency(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Load_L2_Miss_Latency zero division")
desc = """
Average Latency for L2 cache miss demand Loads"""
class Metric_Load_L3_Miss_Latency:
name = "Load_L3_Miss_Latency"
domain = "Clocks_Latency"
maxval = 1000
errcount = 0
area = "Info.Memory.Latency"
metricgroup = frozenset(['Memory_Lat', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = Load_L3_Miss_Latency(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Load_L3_Miss_Latency zero division")
desc = """
Average Latency for L3 cache miss demand Loads"""
class Metric_Load_L2_MLP:
name = "Load_L2_MLP"
domain = "Metric"
maxval = 100
errcount = 0
area = "Info.Memory.Latency"
metricgroup = frozenset(['Memory_BW', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = Load_L2_MLP(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Load_L2_MLP zero division")
desc = """
Average Parallel L2 cache miss demand Loads"""
class Metric_Data_L2_MLP:
name = "Data_L2_MLP"
domain = "Metric"
maxval = 100
errcount = 0
area = "Info.Memory.Latency"
metricgroup = frozenset(['Memory_BW', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = Data_L2_MLP(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Data_L2_MLP zero division")
desc = """
Average Parallel L2 cache miss data reads"""
class Metric_UC_Load_PKI:
name = "UC_Load_PKI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory.Mix"
metricgroup = frozenset(['Mem'])
sibling = None
def compute(self, EV):
try:
self.val = UC_Load_PKI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "UC_Load_PKI zero division")
desc = """
Un-cacheable retired load per kilo instruction"""
class Metric_Bus_Lock_PKI:
name = "Bus_Lock_PKI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory.Mix"
metricgroup = frozenset(['Mem'])
sibling = None
def compute(self, EV):
try:
self.val = Bus_Lock_PKI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Bus_Lock_PKI zero division")
desc = """
\"Bus lock\" per kilo instruction"""
class Metric_CPU_Utilization:
name = "CPU_Utilization"
domain = "Metric"
maxval = 1
errcount = 0
area = "Info.System"
metricgroup = frozenset(['HPC', 'Summary'])
sibling = None
def compute(self, EV):
try:
self.val = CPU_Utilization(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "CPU_Utilization zero division")
desc = """
Average CPU Utilization (percentage)"""
class Metric_CPUs_Utilized:
name = "CPUs_Utilized"
domain = "Metric"
maxval = 300
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Summary'])
sibling = None
def compute(self, EV):
try:
self.val = CPUs_Utilized(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "CPUs_Utilized zero division")
desc = """
Average number of utilized CPUs"""
class Metric_Core_Frequency:
name = "Core_Frequency"
domain = "SystemMetric"
maxval = 0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Summary', 'Power'])
sibling = None
def compute(self, EV):
try:
self.val = Core_Frequency(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Core_Frequency zero division")
desc = """
Measured Average Core Frequency for unhalted processors
[GHz]"""
class Metric_GFLOPs:
name = "GFLOPs"
domain = "Metric"
maxval = 200
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Cor', 'Flops', 'HPC'])
sibling = None
def compute(self, EV):
try:
self.val = GFLOPs(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "GFLOPs zero division")
desc = """
Giga Floating Point Operations Per Second. Aggregate across
all supported options of: FP precisions, scalar and vector
instructions, vector-width"""
class Metric_Turbo_Utilization:
name = "Turbo_Utilization"
domain = "Core_Metric"
maxval = 10.0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Power'])
sibling = None
def compute(self, EV):
try:
self.val = Turbo_Utilization(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Turbo_Utilization zero division")
desc = """
Average Frequency Utilization relative nominal frequency"""
class Metric_SMT_2T_Utilization:
name = "SMT_2T_Utilization"
domain = "Core_Metric"
maxval = 1.0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['SMT'])
sibling = None
def compute(self, EV):
try:
self.val = SMT_2T_Utilization(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "SMT_2T_Utilization zero division")
desc = """
Fraction of cycles where both hardware Logical Processors
were active"""
class Metric_Kernel_Utilization:
name = "Kernel_Utilization"
domain = "Metric"
maxval = 1.0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['OS'])
sibling = None
def compute(self, EV):
try:
self.val = Kernel_Utilization(self, EV, 0)
self.thresh = (self.val > 0.05)
except ZeroDivisionError:
handle_error_metric(self, "Kernel_Utilization zero division")
desc = """
Fraction of cycles spent in the Operating System (OS) Kernel
mode"""
class Metric_Kernel_CPI:
name = "Kernel_CPI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['OS'])
sibling = None
def compute(self, EV):
try:
self.val = Kernel_CPI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Kernel_CPI zero division")
desc = """
Cycles Per Instruction for the Operating System (OS) Kernel
mode"""
class Metric_C0_Wait:
name = "C0_Wait"
domain = "Metric"
maxval = 1.0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['C0Wait'])
sibling = None
def compute(self, EV):
try:
self.val = C0_Wait(self, EV, 0)
self.thresh = (self.val > 0.05)
except ZeroDivisionError:
handle_error_metric(self, "C0_Wait zero division")
desc = """
Fraction of cycles the processor is waiting yet unhalted;
covering legacy PAUSE instruction, as well as C0.1 / C0.2
power-performance optimized states. Sample code of TPAUSE: h
ttps://github.com/torvalds/linux/blob/master/arch/x86/lib/de
lay.c"""
class Metric_DRAM_BW_Use:
name = "DRAM_BW_Use"
domain = "GB/sec"
maxval = 200
errcount = 0
area = "Info.System"
metricgroup = frozenset(['HPC', 'MemOffcore', 'MemoryBW', 'SoC'])
sibling = None
def compute(self, EV):
try:
self.val = DRAM_BW_Use(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "DRAM_BW_Use zero division")
desc = """
Average external Memory Bandwidth Use for reads and writes
[GB / sec]"""
class Metric_MEM_Parallel_Reads:
name = "MEM_Parallel_Reads"
domain = "SystemMetric"
maxval = 100
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Mem', 'MemoryBW', 'SoC'])
sibling = None
def compute(self, EV):
try:
self.val = MEM_Parallel_Reads(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "MEM_Parallel_Reads zero division")
desc = """
Average number of parallel data read requests to external
memory. Accounts for demand loads and L1/L2 prefetches"""
class Metric_Power:
name = "Power"
domain = "SystemMetric"
maxval = 200
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Power', 'SoC'])
sibling = None
def compute(self, EV):
try:
self.val = Power(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Power zero division")
desc = """
Total package Power in Watts"""
class Metric_Time:
name = "Time"
domain = "Seconds"
maxval = 0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Summary'])
sibling = None
def compute(self, EV):
try:
self.val = Time(self, EV, 0)
self.thresh = (self.val < 1)
except ZeroDivisionError:
handle_error_metric(self, "Time zero division")
desc = """
Run duration time in seconds"""
class Metric_Socket_CLKS:
name = "Socket_CLKS"
domain = "Count"
maxval = 0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['SoC'])
sibling = None
def compute(self, EV):
try:
self.val = Socket_CLKS(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Socket_CLKS zero division")
desc = """
Socket actual clocks when any core is active on that socket"""
class Metric_IpFarBranch:
name = "IpFarBranch"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Branches', 'OS'])
sibling = None
def compute(self, EV):
try:
self.val = IpFarBranch(self, EV, 0)
self.thresh = (self.val < 1000000)
except ZeroDivisionError:
handle_error_metric(self, "IpFarBranch zero division")
desc = """
Instructions per Far Branch ( Far Branches apply upon
transition from application to operating system, handling
interrupts, exceptions) [lower number means higher
occurrence rate]"""
# Schedule
class Setup:
def __init__(self, r):
o = dict()
n = Frontend_Bound() ; r.run(n) ; o["Frontend_Bound"] = n
n = Fetch_Latency() ; r.run(n) ; o["Fetch_Latency"] = n
n = ICache_Misses() ; r.run(n) ; o["ICache_Misses"] = n
n = ITLB_Misses() ; r.run(n) ; o["ITLB_Misses"] = n
n = Branch_Resteers() ; r.run(n) ; o["Branch_Resteers"] = n
n = Mispredicts_Resteers() ; r.run(n) ; o["Mispredicts_Resteers"] = n
n = Clears_Resteers() ; r.run(n) ; o["Clears_Resteers"] = n
n = Unknown_Branches() ; r.run(n) ; o["Unknown_Branches"] = n
n = MS_Switches() ; r.run(n) ; o["MS_Switches"] = n
n = LCP() ; r.run(n) ; o["LCP"] = n
n = DSB_Switches() ; r.run(n) ; o["DSB_Switches"] = n
n = Fetch_Bandwidth() ; r.run(n) ; o["Fetch_Bandwidth"] = n
n = MITE() ; r.run(n) ; o["MITE"] = n
n = Decoder0_Alone() ; r.run(n) ; o["Decoder0_Alone"] = n
n = DSB() ; r.run(n) ; o["DSB"] = n
n = LSD() ; r.run(n) ; o["LSD"] = n
n = Bad_Speculation() ; r.run(n) ; o["Bad_Speculation"] = n
n = Branch_Mispredicts() ; r.run(n) ; o["Branch_Mispredicts"] = n
n = Other_Mispredicts() ; r.run(n) ; o["Other_Mispredicts"] = n
n = Machine_Clears() ; r.run(n) ; o["Machine_Clears"] = n
n = Other_Nukes() ; r.run(n) ; o["Other_Nukes"] = n
n = Backend_Bound() ; r.run(n) ; o["Backend_Bound"] = n
n = Memory_Bound() ; r.run(n) ; o["Memory_Bound"] = n
n = L1_Bound() ; r.run(n) ; o["L1_Bound"] = n
n = DTLB_Load() ; r.run(n) ; o["DTLB_Load"] = n
n = Load_STLB_Hit() ; r.run(n) ; o["Load_STLB_Hit"] = n
n = Load_STLB_Miss() ; r.run(n) ; o["Load_STLB_Miss"] = n
n = Store_Fwd_Blk() ; r.run(n) ; o["Store_Fwd_Blk"] = n
n = L1_Hit_Latency() ; r.run(n) ; o["L1_Hit_Latency"] = n
n = Lock_Latency() ; r.run(n) ; o["Lock_Latency"] = n
n = Split_Loads() ; r.run(n) ; o["Split_Loads"] = n
n = FB_Full() ; r.run(n) ; o["FB_Full"] = n
n = L2_Bound() ; r.run(n) ; o["L2_Bound"] = n
n = L3_Bound() ; r.run(n) ; o["L3_Bound"] = n
n = Contested_Accesses() ; r.run(n) ; o["Contested_Accesses"] = n
n = Data_Sharing() ; r.run(n) ; o["Data_Sharing"] = n
n = L3_Hit_Latency() ; r.run(n) ; o["L3_Hit_Latency"] = n
n = SQ_Full() ; r.run(n) ; o["SQ_Full"] = n
n = DRAM_Bound() ; r.run(n) ; o["DRAM_Bound"] = n
n = MEM_Bandwidth() ; r.run(n) ; o["MEM_Bandwidth"] = n
n = MEM_Latency() ; r.run(n) ; o["MEM_Latency"] = n
n = Store_Bound() ; r.run(n) ; o["Store_Bound"] = n
n = Store_Latency() ; r.run(n) ; o["Store_Latency"] = n
n = False_Sharing() ; r.run(n) ; o["False_Sharing"] = n
n = Split_Stores() ; r.run(n) ; o["Split_Stores"] = n
n = Streaming_Stores() ; r.run(n) ; o["Streaming_Stores"] = n
n = DTLB_Store() ; r.run(n) ; o["DTLB_Store"] = n
n = Store_STLB_Hit() ; r.run(n) ; o["Store_STLB_Hit"] = n
n = Store_STLB_Miss() ; r.run(n) ; o["Store_STLB_Miss"] = n
n = Core_Bound() ; r.run(n) ; o["Core_Bound"] = n
n = Divider() ; r.run(n) ; o["Divider"] = n
n = Serializing_Operation() ; r.run(n) ; o["Serializing_Operation"] = n
n = Slow_Pause() ; r.run(n) ; o["Slow_Pause"] = n
n = C01_WAIT() ; r.run(n) ; o["C01_WAIT"] = n
n = C02_WAIT() ; r.run(n) ; o["C02_WAIT"] = n
n = Memory_Fence() ; r.run(n) ; o["Memory_Fence"] = n
n = Ports_Utilization() ; r.run(n) ; o["Ports_Utilization"] = n
n = Ports_Utilized_0() ; r.run(n) ; o["Ports_Utilized_0"] = n
n = Mixing_Vectors() ; r.run(n) ; o["Mixing_Vectors"] = n
n = Ports_Utilized_1() ; r.run(n) ; o["Ports_Utilized_1"] = n
n = Ports_Utilized_2() ; r.run(n) ; o["Ports_Utilized_2"] = n
n = Ports_Utilized_3m() ; r.run(n) ; o["Ports_Utilized_3m"] = n
n = ALU_Op_Utilization() ; r.run(n) ; o["ALU_Op_Utilization"] = n
n = Port_0() ; r.run(n) ; o["Port_0"] = n
n = Port_1() ; r.run(n) ; o["Port_1"] = n
n = Port_6() ; r.run(n) ; o["Port_6"] = n
n = Load_Op_Utilization() ; r.run(n) ; o["Load_Op_Utilization"] = n
n = Store_Op_Utilization() ; r.run(n) ; o["Store_Op_Utilization"] = n
n = Retiring() ; r.run(n) ; o["Retiring"] = n
n = Light_Operations() ; r.run(n) ; o["Light_Operations"] = n
n = FP_Arith() ; r.run(n) ; o["FP_Arith"] = n
n = X87_Use() ; r.run(n) ; o["X87_Use"] = n
n = FP_Scalar() ; r.run(n) ; o["FP_Scalar"] = n
n = FP_Vector() ; r.run(n) ; o["FP_Vector"] = n
n = FP_Vector_128b() ; r.run(n) ; o["FP_Vector_128b"] = n
n = FP_Vector_256b() ; r.run(n) ; o["FP_Vector_256b"] = n
n = Int_Operations() ; r.run(n) ; o["Int_Operations"] = n
n = Int_Vector_128b() ; r.run(n) ; o["Int_Vector_128b"] = n
n = Int_Vector_256b() ; r.run(n) ; o["Int_Vector_256b"] = n
n = Memory_Operations() ; r.run(n) ; o["Memory_Operations"] = n
n = Fused_Instructions() ; r.run(n) ; o["Fused_Instructions"] = n
n = Non_Fused_Branches() ; r.run(n) ; o["Non_Fused_Branches"] = n
n = Other_Light_Ops() ; r.run(n) ; o["Other_Light_Ops"] = n
n = Nop_Instructions() ; r.run(n) ; o["Nop_Instructions"] = n
n = Shuffles_256b() ; r.run(n) ; o["Shuffles_256b"] = n
n = Heavy_Operations() ; r.run(n) ; o["Heavy_Operations"] = n
n = Few_Uops_Instructions() ; r.run(n) ; o["Few_Uops_Instructions"] = n
n = Microcode_Sequencer() ; r.run(n) ; o["Microcode_Sequencer"] = n
n = Assists() ; r.run(n) ; o["Assists"] = n
n = Page_Faults() ; r.run(n) ; o["Page_Faults"] = n
n = FP_Assists() ; r.run(n) ; o["FP_Assists"] = n
n = AVX_Assists() ; r.run(n) ; o["AVX_Assists"] = n
n = CISC() ; r.run(n) ; o["CISC"] = n
# parents
o["Fetch_Latency"].parent = o["Frontend_Bound"]
o["ICache_Misses"].parent = o["Fetch_Latency"]
o["ITLB_Misses"].parent = o["Fetch_Latency"]
o["Branch_Resteers"].parent = o["Fetch_Latency"]
o["Mispredicts_Resteers"].parent = o["Branch_Resteers"]
o["Clears_Resteers"].parent = o["Branch_Resteers"]
o["Unknown_Branches"].parent = o["Branch_Resteers"]
o["MS_Switches"].parent = o["Fetch_Latency"]
o["LCP"].parent = o["Fetch_Latency"]
o["DSB_Switches"].parent = o["Fetch_Latency"]
o["Fetch_Bandwidth"].parent = o["Frontend_Bound"]
o["MITE"].parent = o["Fetch_Bandwidth"]
o["Decoder0_Alone"].parent = o["MITE"]
o["DSB"].parent = o["Fetch_Bandwidth"]
o["LSD"].parent = o["Fetch_Bandwidth"]
o["Branch_Mispredicts"].parent = o["Bad_Speculation"]
o["Other_Mispredicts"].parent = o["Branch_Mispredicts"]
o["Machine_Clears"].parent = o["Bad_Speculation"]
o["Other_Nukes"].parent = o["Machine_Clears"]
o["Memory_Bound"].parent = o["Backend_Bound"]
o["L1_Bound"].parent = o["Memory_Bound"]
o["DTLB_Load"].parent = o["L1_Bound"]
o["Load_STLB_Hit"].parent = o["DTLB_Load"]
o["Load_STLB_Miss"].parent = o["DTLB_Load"]
o["Store_Fwd_Blk"].parent = o["L1_Bound"]
o["L1_Hit_Latency"].parent = o["L1_Bound"]
o["Lock_Latency"].parent = o["L1_Bound"]
o["Split_Loads"].parent = o["L1_Bound"]
o["FB_Full"].parent = o["L1_Bound"]
o["L2_Bound"].parent = o["Memory_Bound"]
o["L3_Bound"].parent = o["Memory_Bound"]
o["Contested_Accesses"].parent = o["L3_Bound"]
o["Data_Sharing"].parent = o["L3_Bound"]
o["L3_Hit_Latency"].parent = o["L3_Bound"]
o["SQ_Full"].parent = o["L3_Bound"]
o["DRAM_Bound"].parent = o["Memory_Bound"]
o["MEM_Bandwidth"].parent = o["DRAM_Bound"]
o["MEM_Latency"].parent = o["DRAM_Bound"]
o["Store_Bound"].parent = o["Memory_Bound"]
o["Store_Latency"].parent = o["Store_Bound"]
o["False_Sharing"].parent = o["Store_Bound"]
o["Split_Stores"].parent = o["Store_Bound"]
o["Streaming_Stores"].parent = o["Store_Bound"]
o["DTLB_Store"].parent = o["Store_Bound"]
o["Store_STLB_Hit"].parent = o["DTLB_Store"]
o["Store_STLB_Miss"].parent = o["DTLB_Store"]
o["Core_Bound"].parent = o["Backend_Bound"]
o["Divider"].parent = o["Core_Bound"]
o["Serializing_Operation"].parent = o["Core_Bound"]
o["Slow_Pause"].parent = o["Serializing_Operation"]
o["C01_WAIT"].parent = o["Serializing_Operation"]
o["C02_WAIT"].parent = o["Serializing_Operation"]
o["Memory_Fence"].parent = o["Serializing_Operation"]
o["Ports_Utilization"].parent = o["Core_Bound"]
o["Ports_Utilized_0"].parent = o["Ports_Utilization"]
o["Mixing_Vectors"].parent = o["Ports_Utilized_0"]
o["Ports_Utilized_1"].parent = o["Ports_Utilization"]
o["Ports_Utilized_2"].parent = o["Ports_Utilization"]
o["Ports_Utilized_3m"].parent = o["Ports_Utilization"]
o["ALU_Op_Utilization"].parent = o["Ports_Utilized_3m"]
o["Port_0"].parent = o["ALU_Op_Utilization"]
o["Port_1"].parent = o["ALU_Op_Utilization"]
o["Port_6"].parent = o["ALU_Op_Utilization"]
o["Load_Op_Utilization"].parent = o["Ports_Utilized_3m"]
o["Store_Op_Utilization"].parent = o["Ports_Utilized_3m"]
o["Light_Operations"].parent = o["Retiring"]
o["FP_Arith"].parent = o["Light_Operations"]
o["X87_Use"].parent = o["FP_Arith"]
o["FP_Scalar"].parent = o["FP_Arith"]
o["FP_Vector"].parent = o["FP_Arith"]
o["FP_Vector_128b"].parent = o["FP_Vector"]
o["FP_Vector_256b"].parent = o["FP_Vector"]
o["Int_Operations"].parent = o["Light_Operations"]
o["Int_Vector_128b"].parent = o["Int_Operations"]
o["Int_Vector_256b"].parent = o["Int_Operations"]
o["Memory_Operations"].parent = o["Light_Operations"]
o["Fused_Instructions"].parent = o["Light_Operations"]
o["Non_Fused_Branches"].parent = o["Light_Operations"]
o["Other_Light_Ops"].parent = o["Light_Operations"]
o["Nop_Instructions"].parent = o["Other_Light_Ops"]
o["Shuffles_256b"].parent = o["Other_Light_Ops"]
o["Heavy_Operations"].parent = o["Retiring"]
o["Few_Uops_Instructions"].parent = o["Heavy_Operations"]
o["Microcode_Sequencer"].parent = o["Heavy_Operations"]
o["Assists"].parent = o["Microcode_Sequencer"]
o["Page_Faults"].parent = o["Assists"]
o["FP_Assists"].parent = o["Assists"]
o["AVX_Assists"].parent = o["Assists"]
o["CISC"].parent = o["Microcode_Sequencer"]
# user visible metrics
n = Metric_Mispredictions() ; r.metric(n) ; o["Mispredictions"] = n
n = Metric_Big_Code() ; r.metric(n) ; o["Big_Code"] = n
n = Metric_Instruction_Fetch_BW() ; r.metric(n) ; o["Instruction_Fetch_BW"] = n
n = Metric_Cache_Memory_Bandwidth() ; r.metric(n) ; o["Cache_Memory_Bandwidth"] = n
n = Metric_Cache_Memory_Latency() ; r.metric(n) ; o["Cache_Memory_Latency"] = n
n = Metric_Memory_Data_TLBs() ; r.metric(n) ; o["Memory_Data_TLBs"] = n
n = Metric_Memory_Synchronization() ; r.metric(n) ; o["Memory_Synchronization"] = n
n = Metric_Compute_Bound_Est() ; r.metric(n) ; o["Compute_Bound_Est"] = n
n = Metric_Irregular_Overhead() ; r.metric(n) ; o["Irregular_Overhead"] = n
n = Metric_Other_Bottlenecks() ; r.metric(n) ; o["Other_Bottlenecks"] = n
n = Metric_Branching_Overhead() ; r.metric(n) ; o["Branching_Overhead"] = n
n = Metric_Useful_Work() ; r.metric(n) ; o["Useful_Work"] = n
n = Metric_Core_Bound_Likely() ; r.metric(n) ; o["Core_Bound_Likely"] = n
n = Metric_IPC() ; r.metric(n) ; o["IPC"] = n
n = Metric_UopPI() ; r.metric(n) ; o["UopPI"] = n
n = Metric_UpTB() ; r.metric(n) ; o["UpTB"] = n
n = Metric_CPI() ; r.metric(n) ; o["CPI"] = n
n = Metric_CLKS() ; r.metric(n) ; o["CLKS"] = n
n = Metric_SLOTS() ; r.metric(n) ; o["SLOTS"] = n
n = Metric_Slots_Utilization() ; r.metric(n) ; o["Slots_Utilization"] = n
n = Metric_Execute_per_Issue() ; r.metric(n) ; o["Execute_per_Issue"] = n
n = Metric_CoreIPC() ; r.metric(n) ; o["CoreIPC"] = n
n = Metric_FLOPc() ; r.metric(n) ; o["FLOPc"] = n
n = Metric_FP_Arith_Utilization() ; r.metric(n) ; o["FP_Arith_Utilization"] = n
n = Metric_ILP() ; r.metric(n) ; o["ILP"] = n
n = Metric_EPC() ; r.metric(n) ; o["EPC"] = n
n = Metric_CORE_CLKS() ; r.metric(n) ; o["CORE_CLKS"] = n
n = Metric_IpLoad() ; r.metric(n) ; o["IpLoad"] = n
n = Metric_IpStore() ; r.metric(n) ; o["IpStore"] = n
n = Metric_IpBranch() ; r.metric(n) ; o["IpBranch"] = n
n = Metric_IpCall() ; r.metric(n) ; o["IpCall"] = n
n = Metric_IpTB() ; r.metric(n) ; o["IpTB"] = n
n = Metric_BpTkBranch() ; r.metric(n) ; o["BpTkBranch"] = n
n = Metric_IpFLOP() ; r.metric(n) ; o["IpFLOP"] = n
n = Metric_IpArith() ; r.metric(n) ; o["IpArith"] = n
n = Metric_IpArith_Scalar_SP() ; r.metric(n) ; o["IpArith_Scalar_SP"] = n
n = Metric_IpArith_Scalar_DP() ; r.metric(n) ; o["IpArith_Scalar_DP"] = n
n = Metric_IpArith_AVX128() ; r.metric(n) ; o["IpArith_AVX128"] = n
n = Metric_IpArith_AVX256() ; r.metric(n) ; o["IpArith_AVX256"] = n
n = Metric_IpPause() ; r.metric(n) ; o["IpPause"] = n
n = Metric_IpSWPF() ; r.metric(n) ; o["IpSWPF"] = n
n = Metric_Instructions() ; r.metric(n) ; o["Instructions"] = n
n = Metric_Retire() ; r.metric(n) ; o["Retire"] = n
n = Metric_Strings_Cycles() ; r.metric(n) ; o["Strings_Cycles"] = n
n = Metric_IpAssist() ; r.metric(n) ; o["IpAssist"] = n
n = Metric_Execute() ; r.metric(n) ; o["Execute"] = n
n = Metric_Fetch_LSD() ; r.metric(n) ; o["Fetch_LSD"] = n
n = Metric_Fetch_DSB() ; r.metric(n) ; o["Fetch_DSB"] = n
n = Metric_Fetch_MITE() ; r.metric(n) ; o["Fetch_MITE"] = n
n = Metric_Fetch_UpC() ; r.metric(n) ; o["Fetch_UpC"] = n
n = Metric_LSD_Coverage() ; r.metric(n) ; o["LSD_Coverage"] = n
n = Metric_DSB_Coverage() ; r.metric(n) ; o["DSB_Coverage"] = n
n = Metric_Unknown_Branch_Cost() ; r.metric(n) ; o["Unknown_Branch_Cost"] = n
n = Metric_DSB_Switch_Cost() ; r.metric(n) ; o["DSB_Switch_Cost"] = n
n = Metric_DSB_Misses() ; r.metric(n) ; o["DSB_Misses"] = n
n = Metric_DSB_Bandwidth() ; r.metric(n) ; o["DSB_Bandwidth"] = n
n = Metric_ICache_Miss_Latency() ; r.metric(n) ; o["ICache_Miss_Latency"] = n
n = Metric_IC_Misses() ; r.metric(n) ; o["IC_Misses"] = n
n = Metric_IpDSB_Miss_Ret() ; r.metric(n) ; o["IpDSB_Miss_Ret"] = n
n = Metric_IpUnknown_Branch() ; r.metric(n) ; o["IpUnknown_Branch"] = n
n = Metric_L2MPKI_Code() ; r.metric(n) ; o["L2MPKI_Code"] = n
n = Metric_L2MPKI_Code_All() ; r.metric(n) ; o["L2MPKI_Code_All"] = n
n = Metric_IpMispredict() ; r.metric(n) ; o["IpMispredict"] = n
n = Metric_IpMisp_Cond_Ntaken() ; r.metric(n) ; o["IpMisp_Cond_Ntaken"] = n
n = Metric_IpMisp_Cond_Taken() ; r.metric(n) ; o["IpMisp_Cond_Taken"] = n
n = Metric_IpMisp_Ret() ; r.metric(n) ; o["IpMisp_Ret"] = n
n = Metric_IpMisp_Indirect() ; r.metric(n) ; o["IpMisp_Indirect"] = n
n = Metric_Branch_Misprediction_Cost() ; r.metric(n) ; o["Branch_Misprediction_Cost"] = n
n = Metric_Spec_Clears_Ratio() ; r.metric(n) ; o["Spec_Clears_Ratio"] = n
n = Metric_Cond_NT() ; r.metric(n) ; o["Cond_NT"] = n
n = Metric_Cond_TK() ; r.metric(n) ; o["Cond_TK"] = n
n = Metric_CallRet() ; r.metric(n) ; o["CallRet"] = n
n = Metric_Jump() ; r.metric(n) ; o["Jump"] = n
n = Metric_Other_Branches() ; r.metric(n) ; o["Other_Branches"] = n
n = Metric_Load_Miss_Real_Latency() ; r.metric(n) ; o["Load_Miss_Real_Latency"] = n
n = Metric_MLP() ; r.metric(n) ; o["MLP"] = n
n = Metric_L1MPKI() ; r.metric(n) ; o["L1MPKI"] = n
n = Metric_L1MPKI_Load() ; r.metric(n) ; o["L1MPKI_Load"] = n
n = Metric_L2MPKI() ; r.metric(n) ; o["L2MPKI"] = n
n = Metric_L2MPKI_All() ; r.metric(n) ; o["L2MPKI_All"] = n
n = Metric_L2MPKI_Load() ; r.metric(n) ; o["L2MPKI_Load"] = n
n = Metric_L2MPKI_RFO() ; r.metric(n) ; o["L2MPKI_RFO"] = n
n = Metric_L2HPKI_All() ; r.metric(n) ; o["L2HPKI_All"] = n
n = Metric_L2HPKI_Load() ; r.metric(n) ; o["L2HPKI_Load"] = n
n = Metric_L3MPKI() ; r.metric(n) ; o["L3MPKI"] = n
n = Metric_FB_HPKI() ; r.metric(n) ; o["FB_HPKI"] = n
n = Metric_L1D_Cache_Fill_BW() ; r.metric(n) ; o["L1D_Cache_Fill_BW"] = n
n = Metric_L2_Cache_Fill_BW() ; r.metric(n) ; o["L2_Cache_Fill_BW"] = n
n = Metric_L3_Cache_Fill_BW() ; r.metric(n) ; o["L3_Cache_Fill_BW"] = n
n = Metric_L3_Cache_Access_BW() ; r.metric(n) ; o["L3_Cache_Access_BW"] = n
n = Metric_Page_Walks_Utilization() ; r.metric(n) ; o["Page_Walks_Utilization"] = n
n = Metric_Code_STLB_MPKI() ; r.metric(n) ; o["Code_STLB_MPKI"] = n
n = Metric_Load_STLB_MPKI() ; r.metric(n) ; o["Load_STLB_MPKI"] = n
n = Metric_Store_STLB_MPKI() ; r.metric(n) ; o["Store_STLB_MPKI"] = n
n = Metric_L1D_Cache_Fill_BW_2T() ; r.metric(n) ; o["L1D_Cache_Fill_BW_2T"] = n
n = Metric_L2_Cache_Fill_BW_2T() ; r.metric(n) ; o["L2_Cache_Fill_BW_2T"] = n
n = Metric_L3_Cache_Fill_BW_2T() ; r.metric(n) ; o["L3_Cache_Fill_BW_2T"] = n
n = Metric_L3_Cache_Access_BW_2T() ; r.metric(n) ; o["L3_Cache_Access_BW_2T"] = n
n = Metric_Load_L2_Miss_Latency() ; r.metric(n) ; o["Load_L2_Miss_Latency"] = n
n = Metric_Load_L3_Miss_Latency() ; r.metric(n) ; o["Load_L3_Miss_Latency"] = n
n = Metric_Load_L2_MLP() ; r.metric(n) ; o["Load_L2_MLP"] = n
n = Metric_Data_L2_MLP() ; r.metric(n) ; o["Data_L2_MLP"] = n
n = Metric_UC_Load_PKI() ; r.metric(n) ; o["UC_Load_PKI"] = n
n = Metric_Bus_Lock_PKI() ; r.metric(n) ; o["Bus_Lock_PKI"] = n
n = Metric_CPU_Utilization() ; r.metric(n) ; o["CPU_Utilization"] = n
n = Metric_CPUs_Utilized() ; r.metric(n) ; o["CPUs_Utilized"] = n
n = Metric_Core_Frequency() ; r.metric(n) ; o["Core_Frequency"] = n
n = Metric_GFLOPs() ; r.metric(n) ; o["GFLOPs"] = n
n = Metric_Turbo_Utilization() ; r.metric(n) ; o["Turbo_Utilization"] = n
n = Metric_SMT_2T_Utilization() ; r.metric(n) ; o["SMT_2T_Utilization"] = n
n = Metric_Kernel_Utilization() ; r.metric(n) ; o["Kernel_Utilization"] = n
n = Metric_Kernel_CPI() ; r.metric(n) ; o["Kernel_CPI"] = n
n = Metric_C0_Wait() ; r.metric(n) ; o["C0_Wait"] = n
n = Metric_DRAM_BW_Use() ; r.metric(n) ; o["DRAM_BW_Use"] = n
n = Metric_MEM_Parallel_Reads() ; r.metric(n) ; o["MEM_Parallel_Reads"] = n
n = Metric_Power() ; r.metric(n) ; o["Power"] = n
n = Metric_Time() ; r.metric(n) ; o["Time"] = n
n = Metric_Socket_CLKS() ; r.metric(n) ; o["Socket_CLKS"] = n
n = Metric_IpFarBranch() ; r.metric(n) ; o["IpFarBranch"] = n
# references between groups
o["Branch_Resteers"].Unknown_Branches = o["Unknown_Branches"]
o["Mispredicts_Resteers"].Retiring = o["Retiring"]
o["Mispredicts_Resteers"].Branch_Mispredicts = o["Branch_Mispredicts"]
o["Mispredicts_Resteers"].Bad_Speculation = o["Bad_Speculation"]
o["Mispredicts_Resteers"].Frontend_Bound = o["Frontend_Bound"]
o["Mispredicts_Resteers"].Backend_Bound = o["Backend_Bound"]
o["Clears_Resteers"].Retiring = o["Retiring"]
o["Clears_Resteers"].Branch_Mispredicts = o["Branch_Mispredicts"]
o["Clears_Resteers"].Bad_Speculation = o["Bad_Speculation"]
o["Clears_Resteers"].Frontend_Bound = o["Frontend_Bound"]
o["Clears_Resteers"].Backend_Bound = o["Backend_Bound"]
o["Fetch_Bandwidth"].Frontend_Bound = o["Frontend_Bound"]
o["Fetch_Bandwidth"].Fetch_Latency = o["Fetch_Latency"]
o["Bad_Speculation"].Retiring = o["Retiring"]
o["Bad_Speculation"].Frontend_Bound = o["Frontend_Bound"]
o["Bad_Speculation"].Backend_Bound = o["Backend_Bound"]
o["Other_Mispredicts"].Branch_Mispredicts = o["Branch_Mispredicts"]
o["Machine_Clears"].Retiring = o["Retiring"]
o["Machine_Clears"].Branch_Mispredicts = o["Branch_Mispredicts"]
o["Machine_Clears"].Bad_Speculation = o["Bad_Speculation"]
o["Machine_Clears"].Frontend_Bound = o["Frontend_Bound"]
o["Machine_Clears"].Backend_Bound = o["Backend_Bound"]
o["Other_Nukes"].Machine_Clears = o["Machine_Clears"]
o["Other_Nukes"].Branch_Mispredicts = o["Branch_Mispredicts"]
o["Other_Nukes"].Retiring = o["Retiring"]
o["Other_Nukes"].Backend_Bound = o["Backend_Bound"]
o["Other_Nukes"].Bad_Speculation = o["Bad_Speculation"]
o["Other_Nukes"].Frontend_Bound = o["Frontend_Bound"]
o["DTLB_Load"].Load_STLB_Miss = o["Load_STLB_Miss"]
o["Load_STLB_Hit"].Load_STLB_Miss = o["Load_STLB_Miss"]
o["Load_STLB_Hit"].DTLB_Load = o["DTLB_Load"]
o["MEM_Latency"].MEM_Bandwidth = o["MEM_Bandwidth"]
o["DTLB_Store"].Store_STLB_Miss = o["Store_STLB_Miss"]
o["Store_STLB_Hit"].DTLB_Store = o["DTLB_Store"]
o["Store_STLB_Hit"].Store_STLB_Miss = o["Store_STLB_Miss"]
o["Core_Bound"].Memory_Bound = o["Memory_Bound"]
o["Core_Bound"].Backend_Bound = o["Backend_Bound"]
o["Serializing_Operation"].C02_WAIT = o["C02_WAIT"]
o["Ports_Utilization"].Ports_Utilized_0 = o["Ports_Utilized_0"]
o["Ports_Utilization"].Retiring = o["Retiring"]
o["Retiring"].Heavy_Operations = o["Heavy_Operations"]
o["Light_Operations"].Retiring = o["Retiring"]
o["Light_Operations"].Heavy_Operations = o["Heavy_Operations"]
o["FP_Arith"].Retiring = o["Retiring"]
o["FP_Arith"].FP_Scalar = o["FP_Scalar"]
o["FP_Arith"].X87_Use = o["X87_Use"]
o["FP_Arith"].FP_Vector = o["FP_Vector"]
o["X87_Use"].Retiring = o["Retiring"]
o["FP_Scalar"].Retiring = o["Retiring"]
o["FP_Vector"].Retiring = o["Retiring"]
o["FP_Vector_128b"].Retiring = o["Retiring"]
o["FP_Vector_256b"].Retiring = o["Retiring"]
o["Int_Operations"].Retiring = o["Retiring"]
o["Int_Operations"].Int_Vector_256b = o["Int_Vector_256b"]
o["Int_Operations"].Int_Vector_128b = o["Int_Vector_128b"]
o["Int_Vector_128b"].Retiring = o["Retiring"]
o["Int_Vector_256b"].Retiring = o["Retiring"]
o["Memory_Operations"].Retiring = o["Retiring"]
o["Memory_Operations"].Light_Operations = o["Light_Operations"]
o["Memory_Operations"].Heavy_Operations = o["Heavy_Operations"]
o["Fused_Instructions"].Retiring = o["Retiring"]
o["Fused_Instructions"].Light_Operations = o["Light_Operations"]
o["Fused_Instructions"].Heavy_Operations = o["Heavy_Operations"]
o["Non_Fused_Branches"].Retiring = o["Retiring"]
o["Non_Fused_Branches"].Light_Operations = o["Light_Operations"]
o["Non_Fused_Branches"].Heavy_Operations = o["Heavy_Operations"]
o["Other_Light_Ops"].Light_Operations = o["Light_Operations"]
o["Other_Light_Ops"].Retiring = o["Retiring"]
o["Other_Light_Ops"].Heavy_Operations = o["Heavy_Operations"]
o["Other_Light_Ops"].Int_Operations = o["Int_Operations"]
o["Other_Light_Ops"].Non_Fused_Branches = o["Non_Fused_Branches"]
o["Other_Light_Ops"].FP_Arith = o["FP_Arith"]
o["Other_Light_Ops"].Fused_Instructions = o["Fused_Instructions"]
o["Other_Light_Ops"].Int_Vector_128b = o["Int_Vector_128b"]
o["Other_Light_Ops"].FP_Vector = o["FP_Vector"]
o["Other_Light_Ops"].FP_Scalar = o["FP_Scalar"]
o["Other_Light_Ops"].X87_Use = o["X87_Use"]
o["Other_Light_Ops"].Int_Vector_256b = o["Int_Vector_256b"]
o["Other_Light_Ops"].Memory_Operations = o["Memory_Operations"]
o["Nop_Instructions"].Retiring = o["Retiring"]
o["Nop_Instructions"].Light_Operations = o["Light_Operations"]
o["Nop_Instructions"].Heavy_Operations = o["Heavy_Operations"]
o["Shuffles_256b"].Retiring = o["Retiring"]
o["Shuffles_256b"].Light_Operations = o["Light_Operations"]
o["Shuffles_256b"].Heavy_Operations = o["Heavy_Operations"]
o["Few_Uops_Instructions"].Heavy_Operations = o["Heavy_Operations"]
o["Few_Uops_Instructions"].Microcode_Sequencer = o["Microcode_Sequencer"]
o["CISC"].Microcode_Sequencer = o["Microcode_Sequencer"]
o["CISC"].Assists = o["Assists"]
o["Mispredictions"].Branch_Mispredicts = o["Branch_Mispredicts"]
o["Mispredictions"].LCP = o["LCP"]
o["Mispredictions"].Retiring = o["Retiring"]
o["Mispredictions"].Other_Mispredicts = o["Other_Mispredicts"]
o["Mispredictions"].Microcode_Sequencer = o["Microcode_Sequencer"]
o["Mispredictions"].Frontend_Bound = o["Frontend_Bound"]
o["Mispredictions"].DSB_Switches = o["DSB_Switches"]
o["Mispredictions"].Backend_Bound = o["Backend_Bound"]
o["Mispredictions"].Branch_Resteers = o["Branch_Resteers"]
o["Mispredictions"].ICache_Misses = o["ICache_Misses"]
o["Mispredictions"].MS_Switches = o["MS_Switches"]
o["Mispredictions"].Bad_Speculation = o["Bad_Speculation"]
o["Mispredictions"].ITLB_Misses = o["ITLB_Misses"]
o["Mispredictions"].Unknown_Branches = o["Unknown_Branches"]
o["Mispredictions"].Fetch_Latency = o["Fetch_Latency"]
o["Mispredictions"].Mispredicts_Resteers = o["Mispredicts_Resteers"]
o["Big_Code"].LCP = o["LCP"]
o["Big_Code"].ICache_Misses = o["ICache_Misses"]
o["Big_Code"].DSB_Switches = o["DSB_Switches"]
o["Big_Code"].Branch_Resteers = o["Branch_Resteers"]
o["Big_Code"].MS_Switches = o["MS_Switches"]
o["Big_Code"].ITLB_Misses = o["ITLB_Misses"]
o["Big_Code"].Unknown_Branches = o["Unknown_Branches"]
o["Big_Code"].Fetch_Latency = o["Fetch_Latency"]
o["Instruction_Fetch_BW"].Retiring = o["Retiring"]
o["Instruction_Fetch_BW"].Other_Mispredicts = o["Other_Mispredicts"]
o["Instruction_Fetch_BW"].DSB_Switches = o["DSB_Switches"]
o["Instruction_Fetch_BW"].Backend_Bound = o["Backend_Bound"]
o["Instruction_Fetch_BW"].Branch_Resteers = o["Branch_Resteers"]
o["Instruction_Fetch_BW"].Fetch_Latency = o["Fetch_Latency"]
o["Instruction_Fetch_BW"].ICache_Misses = o["ICache_Misses"]
o["Instruction_Fetch_BW"].Microcode_Sequencer = o["Microcode_Sequencer"]
o["Instruction_Fetch_BW"].Frontend_Bound = o["Frontend_Bound"]
o["Instruction_Fetch_BW"].Bad_Speculation = o["Bad_Speculation"]
o["Instruction_Fetch_BW"].ITLB_Misses = o["ITLB_Misses"]
o["Instruction_Fetch_BW"].Mispredicts_Resteers = o["Mispredicts_Resteers"]
o["Instruction_Fetch_BW"].Branch_Mispredicts = o["Branch_Mispredicts"]
o["Instruction_Fetch_BW"].LCP = o["LCP"]
o["Instruction_Fetch_BW"].Clears_Resteers = o["Clears_Resteers"]
o["Instruction_Fetch_BW"].MS_Switches = o["MS_Switches"]
o["Instruction_Fetch_BW"].Unknown_Branches = o["Unknown_Branches"]
o["Cache_Memory_Bandwidth"].L1_Bound = o["L1_Bound"]
o["Cache_Memory_Bandwidth"].Store_Fwd_Blk = o["Store_Fwd_Blk"]
o["Cache_Memory_Bandwidth"].SQ_Full = o["SQ_Full"]
o["Cache_Memory_Bandwidth"].MEM_Bandwidth = o["MEM_Bandwidth"]
o["Cache_Memory_Bandwidth"].L1_Hit_Latency = o["L1_Hit_Latency"]
o["Cache_Memory_Bandwidth"].Load_STLB_Miss = o["Load_STLB_Miss"]
o["Cache_Memory_Bandwidth"].Data_Sharing = o["Data_Sharing"]
o["Cache_Memory_Bandwidth"].L2_Bound = o["L2_Bound"]
o["Cache_Memory_Bandwidth"].Memory_Bound = o["Memory_Bound"]
o["Cache_Memory_Bandwidth"].Lock_Latency = o["Lock_Latency"]
o["Cache_Memory_Bandwidth"].MEM_Latency = o["MEM_Latency"]
o["Cache_Memory_Bandwidth"].Store_Bound = o["Store_Bound"]
o["Cache_Memory_Bandwidth"].Split_Loads = o["Split_Loads"]
o["Cache_Memory_Bandwidth"].L3_Hit_Latency = o["L3_Hit_Latency"]
o["Cache_Memory_Bandwidth"].DTLB_Load = o["DTLB_Load"]
o["Cache_Memory_Bandwidth"].L3_Bound = o["L3_Bound"]
o["Cache_Memory_Bandwidth"].FB_Full = o["FB_Full"]
o["Cache_Memory_Bandwidth"].Contested_Accesses = o["Contested_Accesses"]
o["Cache_Memory_Bandwidth"].DRAM_Bound = o["DRAM_Bound"]
o["Cache_Memory_Latency"].L1_Bound = o["L1_Bound"]
o["Cache_Memory_Latency"].DTLB_Load = o["DTLB_Load"]
o["Cache_Memory_Latency"].False_Sharing = o["False_Sharing"]
o["Cache_Memory_Latency"].Data_Sharing = o["Data_Sharing"]
o["Cache_Memory_Latency"].L2_Bound = o["L2_Bound"]
o["Cache_Memory_Latency"].Memory_Bound = o["Memory_Bound"]
o["Cache_Memory_Latency"].DTLB_Store = o["DTLB_Store"]
o["Cache_Memory_Latency"].SQ_Full = o["SQ_Full"]
o["Cache_Memory_Latency"].Store_Bound = o["Store_Bound"]
o["Cache_Memory_Latency"].Split_Loads = o["Split_Loads"]
o["Cache_Memory_Latency"].L3_Bound = o["L3_Bound"]
o["Cache_Memory_Latency"].FB_Full = o["FB_Full"]
o["Cache_Memory_Latency"].Streaming_Stores = o["Streaming_Stores"]
o["Cache_Memory_Latency"].Contested_Accesses = o["Contested_Accesses"]
o["Cache_Memory_Latency"].Store_Fwd_Blk = o["Store_Fwd_Blk"]
o["Cache_Memory_Latency"].L1_Hit_Latency = o["L1_Hit_Latency"]
o["Cache_Memory_Latency"].MEM_Bandwidth = o["MEM_Bandwidth"]
o["Cache_Memory_Latency"].Store_Latency = o["Store_Latency"]
o["Cache_Memory_Latency"].Split_Stores = o["Split_Stores"]
o["Cache_Memory_Latency"].Lock_Latency = o["Lock_Latency"]
o["Cache_Memory_Latency"].MEM_Latency = o["MEM_Latency"]
o["Cache_Memory_Latency"].Store_STLB_Miss = o["Store_STLB_Miss"]
o["Cache_Memory_Latency"].Load_STLB_Miss = o["Load_STLB_Miss"]
o["Cache_Memory_Latency"].L3_Hit_Latency = o["L3_Hit_Latency"]
o["Cache_Memory_Latency"].DRAM_Bound = o["DRAM_Bound"]
o["Memory_Data_TLBs"].L1_Bound = o["L1_Bound"]
o["Memory_Data_TLBs"].Store_Fwd_Blk = o["Store_Fwd_Blk"]
o["Memory_Data_TLBs"].L1_Hit_Latency = o["L1_Hit_Latency"]
o["Memory_Data_TLBs"].DTLB_Load = o["DTLB_Load"]
o["Memory_Data_TLBs"].Store_Latency = o["Store_Latency"]
o["Memory_Data_TLBs"].Load_STLB_Miss = o["Load_STLB_Miss"]
o["Memory_Data_TLBs"].False_Sharing = o["False_Sharing"]
o["Memory_Data_TLBs"].DTLB_Store = o["DTLB_Store"]
o["Memory_Data_TLBs"].L2_Bound = o["L2_Bound"]
o["Memory_Data_TLBs"].Memory_Bound = o["Memory_Bound"]
o["Memory_Data_TLBs"].Lock_Latency = o["Lock_Latency"]
o["Memory_Data_TLBs"].Store_STLB_Miss = o["Store_STLB_Miss"]
o["Memory_Data_TLBs"].Store_Bound = o["Store_Bound"]
o["Memory_Data_TLBs"].Split_Stores = o["Split_Stores"]
o["Memory_Data_TLBs"].Split_Loads = o["Split_Loads"]
o["Memory_Data_TLBs"].L3_Bound = o["L3_Bound"]
o["Memory_Data_TLBs"].FB_Full = o["FB_Full"]
o["Memory_Data_TLBs"].Streaming_Stores = o["Streaming_Stores"]
o["Memory_Data_TLBs"].DRAM_Bound = o["DRAM_Bound"]
o["Memory_Synchronization"].L1_Bound = o["L1_Bound"]
o["Memory_Synchronization"].Frontend_Bound = o["Frontend_Bound"]
o["Memory_Synchronization"].False_Sharing = o["False_Sharing"]
o["Memory_Synchronization"].Retiring = o["Retiring"]
o["Memory_Synchronization"].Bad_Speculation = o["Bad_Speculation"]
o["Memory_Synchronization"].Machine_Clears = o["Machine_Clears"]
o["Memory_Synchronization"].Data_Sharing = o["Data_Sharing"]
o["Memory_Synchronization"].Memory_Bound = o["Memory_Bound"]
o["Memory_Synchronization"].SQ_Full = o["SQ_Full"]
o["Memory_Synchronization"].Store_Bound = o["Store_Bound"]
o["Memory_Synchronization"].L3_Bound = o["L3_Bound"]
o["Memory_Synchronization"].L2_Bound = o["L2_Bound"]
o["Memory_Synchronization"].Streaming_Stores = o["Streaming_Stores"]
o["Memory_Synchronization"].Contested_Accesses = o["Contested_Accesses"]
o["Memory_Synchronization"].DTLB_Store = o["DTLB_Store"]
o["Memory_Synchronization"].Branch_Mispredicts = o["Branch_Mispredicts"]
o["Memory_Synchronization"].Store_Latency = o["Store_Latency"]
o["Memory_Synchronization"].Split_Stores = o["Split_Stores"]
o["Memory_Synchronization"].Store_STLB_Miss = o["Store_STLB_Miss"]
o["Memory_Synchronization"].Backend_Bound = o["Backend_Bound"]
o["Memory_Synchronization"].L3_Hit_Latency = o["L3_Hit_Latency"]
o["Memory_Synchronization"].Other_Nukes = o["Other_Nukes"]
o["Memory_Synchronization"].DRAM_Bound = o["DRAM_Bound"]
o["Compute_Bound_Est"].Serializing_Operation = o["Serializing_Operation"]
o["Compute_Bound_Est"].Ports_Utilization = o["Ports_Utilization"]
o["Compute_Bound_Est"].C02_WAIT = o["C02_WAIT"]
o["Compute_Bound_Est"].Retiring = o["Retiring"]
o["Compute_Bound_Est"].Ports_Utilized_2 = o["Ports_Utilized_2"]
o["Compute_Bound_Est"].Memory_Bound = o["Memory_Bound"]
o["Compute_Bound_Est"].Ports_Utilized_1 = o["Ports_Utilized_1"]
o["Compute_Bound_Est"].Core_Bound = o["Core_Bound"]
o["Compute_Bound_Est"].Backend_Bound = o["Backend_Bound"]
o["Compute_Bound_Est"].Ports_Utilized_3m = o["Ports_Utilized_3m"]
o["Compute_Bound_Est"].Divider = o["Divider"]
o["Compute_Bound_Est"].Ports_Utilized_0 = o["Ports_Utilized_0"]
o["Irregular_Overhead"].Heavy_Operations = o["Heavy_Operations"]
o["Irregular_Overhead"].Ports_Utilization = o["Ports_Utilization"]
o["Irregular_Overhead"].C02_WAIT = o["C02_WAIT"]
o["Irregular_Overhead"].Retiring = o["Retiring"]
o["Irregular_Overhead"].ICache_Misses = o["ICache_Misses"]
o["Irregular_Overhead"].Microcode_Sequencer = o["Microcode_Sequencer"]
o["Irregular_Overhead"].Frontend_Bound = o["Frontend_Bound"]
o["Irregular_Overhead"].Serializing_Operation = o["Serializing_Operation"]
o["Irregular_Overhead"].Core_Bound = o["Core_Bound"]
o["Irregular_Overhead"].Bad_Speculation = o["Bad_Speculation"]
o["Irregular_Overhead"].ITLB_Misses = o["ITLB_Misses"]
o["Irregular_Overhead"].Divider = o["Divider"]
o["Irregular_Overhead"].Mispredicts_Resteers = o["Mispredicts_Resteers"]
o["Irregular_Overhead"].Memory_Bound = o["Memory_Bound"]
o["Irregular_Overhead"].Machine_Clears = o["Machine_Clears"]
o["Irregular_Overhead"].Branch_Mispredicts = o["Branch_Mispredicts"]
o["Irregular_Overhead"].LCP = o["LCP"]
o["Irregular_Overhead"].Other_Mispredicts = o["Other_Mispredicts"]
o["Irregular_Overhead"].Few_Uops_Instructions = o["Few_Uops_Instructions"]
o["Irregular_Overhead"].DSB_Switches = o["DSB_Switches"]
o["Irregular_Overhead"].Ports_Utilized_0 = o["Ports_Utilized_0"]
o["Irregular_Overhead"].Assists = o["Assists"]
o["Irregular_Overhead"].Backend_Bound = o["Backend_Bound"]
o["Irregular_Overhead"].Branch_Resteers = o["Branch_Resteers"]
o["Irregular_Overhead"].Clears_Resteers = o["Clears_Resteers"]
o["Irregular_Overhead"].MS_Switches = o["MS_Switches"]
o["Irregular_Overhead"].Other_Nukes = o["Other_Nukes"]
o["Irregular_Overhead"].Unknown_Branches = o["Unknown_Branches"]
o["Irregular_Overhead"].Fetch_Latency = o["Fetch_Latency"]
o["Other_Bottlenecks"].L1_Bound = o["L1_Bound"]
o["Other_Bottlenecks"].C02_WAIT = o["C02_WAIT"]
o["Other_Bottlenecks"].Retiring = o["Retiring"]
o["Other_Bottlenecks"].Data_Sharing = o["Data_Sharing"]
o["Other_Bottlenecks"].L2_Bound = o["L2_Bound"]
o["Other_Bottlenecks"].Core_Bound = o["Core_Bound"]
o["Other_Bottlenecks"].Ports_Utilization = o["Ports_Utilization"]
o["Other_Bottlenecks"].Contested_Accesses = o["Contested_Accesses"]
o["Other_Bottlenecks"].Divider = o["Divider"]
o["Other_Bottlenecks"].L3_Bound = o["L3_Bound"]
o["Other_Bottlenecks"].Ports_Utilized_3m = o["Ports_Utilized_3m"]
o["Other_Bottlenecks"].L1_Hit_Latency = o["L1_Hit_Latency"]
o["Other_Bottlenecks"].FB_Full = o["FB_Full"]
o["Other_Bottlenecks"].MEM_Bandwidth = o["MEM_Bandwidth"]
o["Other_Bottlenecks"].Store_Latency = o["Store_Latency"]
o["Other_Bottlenecks"].Other_Mispredicts = o["Other_Mispredicts"]
o["Other_Bottlenecks"].DSB_Switches = o["DSB_Switches"]
o["Other_Bottlenecks"].Ports_Utilized_0 = o["Ports_Utilized_0"]
o["Other_Bottlenecks"].Ports_Utilized_1 = o["Ports_Utilized_1"]
o["Other_Bottlenecks"].Ports_Utilized_2 = o["Ports_Utilized_2"]
o["Other_Bottlenecks"].Assists = o["Assists"]
o["Other_Bottlenecks"].Backend_Bound = o["Backend_Bound"]
o["Other_Bottlenecks"].Branch_Resteers = o["Branch_Resteers"]
o["Other_Bottlenecks"].Store_STLB_Miss = o["Store_STLB_Miss"]
o["Other_Bottlenecks"].L3_Hit_Latency = o["L3_Hit_Latency"]
o["Other_Bottlenecks"].Split_Stores = o["Split_Stores"]
o["Other_Bottlenecks"].Heavy_Operations = o["Heavy_Operations"]
o["Other_Bottlenecks"].Fetch_Latency = o["Fetch_Latency"]
o["Other_Bottlenecks"].DTLB_Load = o["DTLB_Load"]
o["Other_Bottlenecks"].False_Sharing = o["False_Sharing"]
o["Other_Bottlenecks"].ICache_Misses = o["ICache_Misses"]
o["Other_Bottlenecks"].Microcode_Sequencer = o["Microcode_Sequencer"]
o["Other_Bottlenecks"].Frontend_Bound = o["Frontend_Bound"]
o["Other_Bottlenecks"].Machine_Clears = o["Machine_Clears"]
o["Other_Bottlenecks"].Streaming_Stores = o["Streaming_Stores"]
o["Other_Bottlenecks"].Memory_Bound = o["Memory_Bound"]
o["Other_Bottlenecks"].SQ_Full = o["SQ_Full"]
o["Other_Bottlenecks"].Store_Bound = o["Store_Bound"]
o["Other_Bottlenecks"].Split_Loads = o["Split_Loads"]
o["Other_Bottlenecks"].Bad_Speculation = o["Bad_Speculation"]
o["Other_Bottlenecks"].ITLB_Misses = o["ITLB_Misses"]
o["Other_Bottlenecks"].Mispredicts_Resteers = o["Mispredicts_Resteers"]
o["Other_Bottlenecks"].Store_Fwd_Blk = o["Store_Fwd_Blk"]
o["Other_Bottlenecks"].Serializing_Operation = o["Serializing_Operation"]
o["Other_Bottlenecks"].DTLB_Store = o["DTLB_Store"]
o["Other_Bottlenecks"].Branch_Mispredicts = o["Branch_Mispredicts"]
o["Other_Bottlenecks"].LCP = o["LCP"]
o["Other_Bottlenecks"].Load_STLB_Miss = o["Load_STLB_Miss"]
o["Other_Bottlenecks"].Few_Uops_Instructions = o["Few_Uops_Instructions"]
o["Other_Bottlenecks"].Lock_Latency = o["Lock_Latency"]
o["Other_Bottlenecks"].MEM_Latency = o["MEM_Latency"]
o["Other_Bottlenecks"].Clears_Resteers = o["Clears_Resteers"]
o["Other_Bottlenecks"].MS_Switches = o["MS_Switches"]
o["Other_Bottlenecks"].Other_Nukes = o["Other_Nukes"]
o["Other_Bottlenecks"].Unknown_Branches = o["Unknown_Branches"]
o["Other_Bottlenecks"].DRAM_Bound = o["DRAM_Bound"]
o["Useful_Work"].Retiring = o["Retiring"]
o["Useful_Work"].Heavy_Operations = o["Heavy_Operations"]
o["Useful_Work"].Microcode_Sequencer = o["Microcode_Sequencer"]
o["Useful_Work"].Few_Uops_Instructions = o["Few_Uops_Instructions"]
o["Useful_Work"].Assists = o["Assists"]
o["Core_Bound_Likely"].Memory_Bound = o["Memory_Bound"]
o["Core_Bound_Likely"].Ports_Utilized_0 = o["Ports_Utilized_0"]
o["Core_Bound_Likely"].Core_Bound = o["Core_Bound"]
o["Core_Bound_Likely"].Ports_Utilization = o["Ports_Utilization"]
o["Core_Bound_Likely"].Retiring = o["Retiring"]
o["Core_Bound_Likely"].Backend_Bound = o["Backend_Bound"]
o["UopPI"].Retiring = o["Retiring"]
o["UpTB"].Retiring = o["Retiring"]
o["Retire"].Retiring = o["Retiring"]
o["DSB_Misses"].LSD = o["LSD"]
o["DSB_Misses"].MITE = o["MITE"]
o["DSB_Misses"].LCP = o["LCP"]
o["DSB_Misses"].Fetch_Bandwidth = o["Fetch_Bandwidth"]
o["DSB_Misses"].Frontend_Bound = o["Frontend_Bound"]
o["DSB_Misses"].DSB_Switches = o["DSB_Switches"]
o["DSB_Misses"].Branch_Resteers = o["Branch_Resteers"]
o["DSB_Misses"].ICache_Misses = o["ICache_Misses"]
o["DSB_Misses"].MS_Switches = o["MS_Switches"]
o["DSB_Misses"].ITLB_Misses = o["ITLB_Misses"]
o["DSB_Misses"].DSB = o["DSB"]
o["DSB_Misses"].Unknown_Branches = o["Unknown_Branches"]
o["DSB_Misses"].Fetch_Latency = o["Fetch_Latency"]
o["DSB_Bandwidth"].LSD = o["LSD"]
o["DSB_Bandwidth"].Fetch_Bandwidth = o["Fetch_Bandwidth"]
o["DSB_Bandwidth"].Frontend_Bound = o["Frontend_Bound"]
o["DSB_Bandwidth"].MITE = o["MITE"]
o["DSB_Bandwidth"].DSB = o["DSB"]
o["DSB_Bandwidth"].Fetch_Latency = o["Fetch_Latency"]
o["IC_Misses"].Fetch_Latency = o["Fetch_Latency"]
o["IC_Misses"].LCP = o["LCP"]
o["IC_Misses"].MS_Switches = o["MS_Switches"]
o["IC_Misses"].ICache_Misses = o["ICache_Misses"]
o["IC_Misses"].ITLB_Misses = o["ITLB_Misses"]
o["IC_Misses"].Unknown_Branches = o["Unknown_Branches"]
o["IC_Misses"].DSB_Switches = o["DSB_Switches"]
o["IC_Misses"].Branch_Resteers = o["Branch_Resteers"]
o["Branch_Misprediction_Cost"].Retiring = o["Retiring"]
o["Branch_Misprediction_Cost"].ICache_Misses = o["ICache_Misses"]
o["Branch_Misprediction_Cost"].Microcode_Sequencer = o["Microcode_Sequencer"]
o["Branch_Misprediction_Cost"].Frontend_Bound = o["Frontend_Bound"]
o["Branch_Misprediction_Cost"].Bad_Speculation = o["Bad_Speculation"]
o["Branch_Misprediction_Cost"].ITLB_Misses = o["ITLB_Misses"]
o["Branch_Misprediction_Cost"].Mispredicts_Resteers = o["Mispredicts_Resteers"]
o["Branch_Misprediction_Cost"].Branch_Mispredicts = o["Branch_Mispredicts"]
o["Branch_Misprediction_Cost"].LCP = o["LCP"]
o["Branch_Misprediction_Cost"].Other_Mispredicts = o["Other_Mispredicts"]
o["Branch_Misprediction_Cost"].DSB_Switches = o["DSB_Switches"]
o["Branch_Misprediction_Cost"].Backend_Bound = o["Backend_Bound"]
o["Branch_Misprediction_Cost"].Branch_Resteers = o["Branch_Resteers"]
o["Branch_Misprediction_Cost"].MS_Switches = o["MS_Switches"]
o["Branch_Misprediction_Cost"].Unknown_Branches = o["Unknown_Branches"]
o["Branch_Misprediction_Cost"].Fetch_Latency = o["Fetch_Latency"]
# siblings cross-tree
o["Mispredicts_Resteers"].sibling = (o["Branch_Mispredicts"],)
o["Clears_Resteers"].sibling = (o["MS_Switches"], o["Machine_Clears"], o["L1_Bound"], o["Microcode_Sequencer"],)
o["MS_Switches"].sibling = (o["Clears_Resteers"], o["Machine_Clears"], o["L1_Bound"], o["Serializing_Operation"], o["Mixing_Vectors"], o["Microcode_Sequencer"],)
o["LCP"].sibling = (o["DSB_Switches"], o["Fetch_Bandwidth"],)
o["DSB_Switches"].sibling = (o["LCP"], o["Fetch_Bandwidth"],)
o["Fetch_Bandwidth"].sibling = (o["LCP"], o["DSB_Switches"],)
o["Decoder0_Alone"].sibling = (o["Few_Uops_Instructions"],)
o["Branch_Mispredicts"].sibling = (o["Mispredicts_Resteers"],)
o["Machine_Clears"].sibling = (o["Clears_Resteers"], o["MS_Switches"], o["L1_Bound"], o["Contested_Accesses"], o["Data_Sharing"], o["False_Sharing"], o["Microcode_Sequencer"],)
o["L1_Bound"].sibling = (o["Clears_Resteers"], o["MS_Switches"], o["Machine_Clears"], o["Ports_Utilized_1"], o["Microcode_Sequencer"],)
o["DTLB_Load"].sibling = (o["DTLB_Store"],)
o["Lock_Latency"].sibling = (o["Store_Latency"],)
o["FB_Full"].sibling = (o["SQ_Full"], o["MEM_Bandwidth"], o["Store_Latency"], o["Streaming_Stores"],)
o["Contested_Accesses"].sibling = (o["Machine_Clears"], o["Data_Sharing"], o["False_Sharing"],)
o["Data_Sharing"].sibling = (o["Machine_Clears"], o["Contested_Accesses"], o["False_Sharing"],)
o["L3_Hit_Latency"].sibling = (o["MEM_Latency"],)
o["L3_Hit_Latency"].overlap = True
o["SQ_Full"].sibling = (o["FB_Full"], o["MEM_Bandwidth"],)
o["MEM_Bandwidth"].sibling = (o["FB_Full"], o["SQ_Full"],)
o["MEM_Latency"].sibling = (o["L3_Hit_Latency"],)
o["Store_Latency"].sibling = (o["Lock_Latency"], o["FB_Full"],)
o["Store_Latency"].overlap = True
o["False_Sharing"].sibling = (o["Machine_Clears"], o["Contested_Accesses"], o["Data_Sharing"],)
o["Streaming_Stores"].sibling = (o["FB_Full"],)
o["DTLB_Store"].sibling = (o["DTLB_Load"],)
o["Serializing_Operation"].sibling = (o["MS_Switches"],)
o["Mixing_Vectors"].sibling = (o["MS_Switches"],)
o["Ports_Utilized_1"].sibling = (o["L1_Bound"],)
o["Ports_Utilized_2"].sibling = (o["Port_0"], o["Port_1"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["Int_Vector_128b"], o["Int_Vector_256b"],)
o["Port_0"].sibling = (o["Ports_Utilized_2"], o["Port_1"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["Int_Vector_128b"], o["Int_Vector_256b"],)
o["Port_1"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["Int_Vector_128b"], o["Int_Vector_256b"],)
o["Port_6"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["Int_Vector_128b"], o["Int_Vector_256b"],)
o["FP_Scalar"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_6"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["Int_Vector_128b"], o["Int_Vector_256b"],)
o["FP_Vector"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_6"], o["FP_Scalar"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["Int_Vector_128b"], o["Int_Vector_256b"],)
o["FP_Vector_128b"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_256b"], o["Int_Vector_128b"], o["Int_Vector_256b"],)
o["FP_Vector_256b"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["Int_Vector_128b"], o["Int_Vector_256b"],)
o["Int_Vector_128b"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["Int_Vector_256b"],)
o["Int_Vector_256b"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"], o["Int_Vector_128b"],)
o["Few_Uops_Instructions"].sibling = (o["Decoder0_Alone"],)
o["Microcode_Sequencer"].sibling = (o["Clears_Resteers"], o["MS_Switches"], o["Machine_Clears"], o["L1_Bound"],)
o["Mispredictions"].sibling = (o["Mispredicts_Resteers"], o["Branch_Mispredicts"],)
o["Cache_Memory_Bandwidth"].sibling = (o["FB_Full"], o["SQ_Full"], o["MEM_Bandwidth"],)
o["Cache_Memory_Latency"].sibling = (o["L3_Hit_Latency"], o["MEM_Latency"],)
o["Memory_Data_TLBs"].sibling = (o["DTLB_Load"], o["DTLB_Store"],)
o["Memory_Synchronization"].sibling = (o["DTLB_Load"], o["DTLB_Store"],)
o["Irregular_Overhead"].sibling = (o["MS_Switches"], o["Microcode_Sequencer"],)
o["IpTB"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],)
o["DSB_Coverage"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],)
o["DSB_Misses"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],)
o["DSB_Bandwidth"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],)
o["Branch_Misprediction_Cost"].sibling = (o["Mispredicts_Resteers"], o["Branch_Mispredicts"],)
o["DRAM_BW_Use"].sibling = (o["FB_Full"], o["SQ_Full"], o["MEM_Bandwidth"],)
| 244,538 | Python | .py | 5,655 | 37.270203 | 1,765 | 0.655489 | andikleen/pmu-tools | 1,984 | 331 | 178 | GPL-2.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,891 | event_download.py | andikleen_pmu-tools/event_download.py | #!/usr/bin/env python3
# Copyright (c) 2014-2020, Intel Corporation
# Author: Andi Kleen
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# Automatic event list downloader
#
# event_download.py download for current cpu
# event_download.py -a download all
# event_download.py cpustr... Download for specific CPU
#
# env:
# CPUINFO=... override /proc/cpuinfo file
# MAPFILE=... override mapfile.csv
# PERFMONDIR=... override download prefix for perfmon data, can be a local clone (file:///tmp/perfmon)
from __future__ import print_function
import sys
import re
try:
from urllib.request import urlopen
from urllib.error import URLError
except ImportError:
from urllib2 import urlopen, URLError
import os
import string
from fnmatch import fnmatch
urlpath = os.environ.get('PERFMONDIR', 'https://raw.githubusercontent.com/intel/perfmon/main')
mapfile = 'mapfile.csv'
modelpath = urlpath + "/" + mapfile
def get_cpustr():
cpuinfo = os.getenv("CPUINFO")
if cpuinfo is None:
cpuinfo = '/proc/cpuinfo'
f = open(cpuinfo, 'r')
cpu = [None, None, None, None]
for j in f:
n = j.split()
if n[0] == 'vendor_id':
cpu[0] = n[2]
elif n[0] == 'model' and n[1] == ':':
cpu[2] = int(n[2])
elif n[0] == 'cpu' and n[1] == 'family':
cpu[1] = int(n[3])
elif n[0] == 'stepping' and n[1] == ':':
cpu[3] = int(n[2])
if all(v is not None for v in cpu):
break
# stepping for SKX only
stepping = cpu[0] == "GenuineIntel" and cpu[1] == 6 and cpu[2] == 0x55
if stepping:
return "%s-%d-%X-%X" % tuple(cpu)
return "%s-%d-%X" % tuple(cpu)[:3]
def sanitize(s, a):
o = ""
for j in s:
if j in a:
o += j
return o
def getdir():
try:
d = os.getenv("XDG_CACHE_HOME")
xd = d
if not d:
home = os.getenv("HOME")
d = "%s/.cache" % (home)
d += "/pmu-events"
if not os.path.isdir(d):
# try to handle the sudo case
if not xd:
user = os.getenv("SUDO_USER")
if user:
nd = os.path.expanduser("~" + user) + "/.cache/pmu-events"
if os.path.isdir(nd):
return nd
os.makedirs(d)
return d
except OSError:
raise Exception('Cannot access ' + d)
NUM_TRIES = 3
def getfile(url, dirfn, fn):
tries = 0
print("Downloading", url, "to", fn)
while True:
try:
f = urlopen(url)
data = f.read()
except IOError:
tries += 1
if tries >= NUM_TRIES:
raise
print("retrying download")
continue
break
o = open(os.path.join(dirfn, fn), "wb")
o.write(data)
o.close()
f.close()
printed = set()
def warn_once(msg):
if msg not in printed:
print(msg, file=sys.stderr)
printed.add(msg)
def cpu_without_step(match):
if match.count("-") < 3:
return match
n = match.split("-")
return "%s-%s-%s" % tuple(n[:3])
allowed_chars = string.ascii_letters + '_-.' + string.digits
def parse_map_file(match, key=None, link=True, onlyprint=False,
acceptfile=False, hybridkey=None):
match2 = cpu_without_step(match)
files = []
dirfn = getdir()
try:
mfn = os.getenv("MAPFILE")
if mfn:
mapfn = mfn
acceptfile = True
else:
mapfn = os.path.join(dirfn, mapfile)
if onlyprint and not os.path.exists(mapfn) and not mfn:
print("Download", mapfn, "first for --print")
return []
if acceptfile and os.path.exists(mapfn):
pass
elif not onlyprint and not mfn:
getfile(modelpath, dirfn, mapfile)
models = open(mapfn)
for j in models:
if j.startswith("Family-model"):
continue
n = j.rstrip().split(",")
if len(n) < 4:
if len(n) > 0:
print("Cannot parse", n)
continue
cpu, version, name, typ = n[:4]
if not (fnmatch(cpu, match) or fnmatch(cpu, match2) or
fnmatch(match2, cpu) or fnmatch(match, cpu)):
continue
if key is not None and typ not in key:
continue
if hybridkey and len(n) >= 7 and n[6] != hybridkey:
continue
cpu = sanitize(cpu, allowed_chars)
url = urlpath + name
matchfn = match
if matchfn == "*":
matchfn = cpu
if ".json" not in matchfn:
if hybridkey:
fn = "%s-%s-%s.json" % (matchfn, sanitize(typ, allowed_chars), hybridkey)
else:
fn = "%s-%s.json" % (matchfn, sanitize(typ, allowed_chars))
path = os.path.join(dirfn, fn)
if acceptfile and os.path.exists(path):
if onlyprint:
print(path)
continue
else:
try:
os.remove(path)
except OSError:
pass
if onlyprint:
print(path)
continue
if mfn:
print("error accessing", path)
continue
try:
fn = fn.replace("01234", "4")
fn = fn.replace("56789ABCDEF", "5") # XXX
getfile(url, dirfn, fn)
except URLError as e:
print("error accessing %s: %s" % (url, e), file=sys.stderr)
if match == '*':
continue
raise
if link:
lname = re.sub(r'.*/', '', name)
lname = sanitize(lname, allowed_chars)
try:
os.remove(os.path.join(dirfn, lname))
except OSError:
pass
try:
os.symlink(fn, os.path.join(dirfn, lname))
except OSError as e:
print("Cannot link %s to %s:" % (name, lname), e, file=sys.stderr)
files.append(fn)
models.close()
for file_name in ["README.md", "LICENSE"]:
if not onlyprint and not os.path.exists(os.path.join(dirfn, file_name)) and not mfn:
getfile(urlpath + "/" + file_name, dirfn, file_name)
except URLError as e:
print("Cannot access event server:", e, file=sys.stderr)
warn_once("""
If you need a proxy to access the internet please set it with:
\texport https_proxy=http://proxyname...
If you are not connected to the internet please run this on a connected system:
\tevent_download.py '%s'
and then copy ~/.cache/pmu-events to the system under test
To get events for all possible CPUs use:
\tevent_download.py -a""" % match)
except OSError as e:
print("Cannot write events file:", e, file=sys.stderr)
return files
def download(match, key=None, link=True, onlyprint=False, acceptfile=False):
return len(parse_map_file(match, key, link, onlyprint, acceptfile))
def download_current(link=False, onlyprint=False):
"""Download JSON event list for current cpu.
Returns >0 when a event list is found"""
return download(get_cpustr(), link=link, onlyprint=onlyprint, )
def eventlist_name(name=None, key="core", hybridkey=None):
if not name:
name = get_cpustr()
cache = getdir()
fn = name
if os.path.exists(fn):
return fn
if ".json" not in name:
if hybridkey:
fn = "%s-%s-%s.json" % (name, key, hybridkey)
else:
fn = "%s-%s.json" % (name, key)
if "/" not in fn:
fn = "%s/%s" % (cache, fn)
if not os.path.exists(fn):
files = parse_map_file(name, key, acceptfile=True, hybridkey=hybridkey)
if files:
return files[0]
name = cpu_without_step(name)
if "*" in fn:
fn = "%s/%s" % (cache, name)
elif hybridkey:
fn = "%s/%s-%s-%s.json" % (cache, name, key, hybridkey)
else:
fn = "%s/%s-%s.json" % (cache, name, key)
files = parse_map_file(name, key, acceptfile=True, hybridkey=hybridkey)
if files:
fn = files[0]
return fn
if __name__ == '__main__':
# only import argparse when actually called from command line
# this makes ocperf work on older python versions without it.
import argparse
p = argparse.ArgumentParser(usage='download Intel event files')
p.add_argument('--all', '-a', help='Download all available event files', action='store_true')
p.add_argument('--verbose', '-v', help='Be verbose', action='store_true')
p.add_argument('--mine', help='Print name of current CPU', action='store_true')
p.add_argument('--link', help='Create links with the original event file name', action='store_true', default=True)
p.add_argument('--print', help='Print file names of all event files instead of downloading. Requires existing mapfile.csv.',
dest='print_', action='store_true')
p.add_argument('cpus', help='CPU identifiers to download', nargs='*')
args = p.parse_args()
if args.verbose or args.mine:
print(get_cpustr())
if args.mine:
sys.exit(0)
d = getdir()
if args.all:
found = download('*', link=args.link, onlyprint=args.print_)
elif len(args.cpus) == 0:
found = download_current(link=args.link, onlyprint=args.print_)
else:
found = 0
for j in args.cpus:
found += download(j, link=args.link, onlyprint=args.print_)
if found == 0 and not args.print_:
print("Nothing found", file=sys.stderr)
el = eventlist_name()
if os.path.exists(el) and not args.print_:
print("my event list", el)
| 10,437 | Python | .py | 282 | 27.79078 | 128 | 0.558591 | andikleen/pmu-tools | 1,984 | 331 | 178 | GPL-2.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,892 | power_metrics.py | andikleen_pmu-tools/power_metrics.py | #
# perf power metrics for toplev
#
import os
class EnergyPackage:
name = "Package Energy"
desc = """
Package Energy over measurement period in Joules"""
unit = "Joules"
nogroup = True
subplot = "Power"
domain = "Package"
def compute(self, EV):
self.val = EV("power/energy-pkg/", 1)
self.thresh = self.val > 0
class EnergyCores:
name = "Cores Energy"
desc = """
Cores Energy over measurement period in Joules"""
unit = "Joules"
nogroup = True
subplot = "Power"
domain = "Package"
def compute(self, EV):
self.val = EV("power/energy-cores/", 1)
self.thresh = self.val > 0
class EnergyRAM:
name = "RAM Energy"
desc = """
RAM Energy over measurement period in Joules"""
unit = "Joules"
nogroup = True
subplot = "Power"
domain = "Package"
def compute(self, EV):
self.val = EV("power/energy-ram/", 1)
self.thresh = self.val > 0
class EnergyGPU:
name = "GPU Energy"
desc = """
GPU Energy over measurement period in Joules"""
unit = "Joules"
nogroup = True
subplot = "Power"
domain = "Package"
def compute(self, EV):
self.val = EV("power/energy-gpu/", 1)
self.thresh = self.val > 1
class Setup:
def __init__(self, r):
if os.path.exists("/sys/devices/power/events/energy-cores"):
r.force_metric(EnergyCores())
r.force_metric(EnergyPackage())
if os.path.exists("/sys/devices/power/events/energy-ram"):
r.force_metric(EnergyRAM())
if os.path.exists("/sys/devices/power/events/energy-gpu"):
r.force_metric(EnergyGPU())
| 1,661 | Python | .py | 57 | 23.684211 | 68 | 0.62015 | andikleen/pmu-tools | 1,984 | 331 | 178 | GPL-2.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,893 | bdw_client_ratios.py | andikleen_pmu-tools/bdw_client_ratios.py | # -*- coding: latin-1 -*-
#
# auto generated TopDown/TMA 4.8-full-perf description for Intel 5th gen Core / Core M (code named Broadwell)
# Please see http://ark.intel.com for more details on these CPUs.
#
# References:
# http://bit.ly/tma-ispass14
# http://halobates.de/blog/p/262
# https://sites.google.com/site/analysismethods/yasin-pubs
# https://download.01.org/perfmon/
# https://github.com/andikleen/pmu-tools/wiki/toplev-manual
#
# Helpers
print_error = lambda msg: False
smt_enabled = False
ebs_mode = False
version = "4.8-full-perf"
base_frequency = -1.0
Memory = 0
Average_Frequency = 0.0
num_cores = 1
num_threads = 1
num_sockets = 1
def handle_error(obj, msg):
print_error(msg)
obj.errcount += 1
obj.val = 0
obj.thresh = False
def handle_error_metric(obj, msg):
print_error(msg)
obj.errcount += 1
obj.val = 0
# Constants
Exe_Ports = 8
Mem_L2_Store_Cost = 9
Mem_L3_Weight = 7
Mem_STLB_Hit_Cost = 8
BAClear_Cost = 12
MS_Switches_Cost = 2
Avg_Assist_Cost = 66
Pipeline_Width = 4
OneMillion = 1000000
OneBillion = 1000000000
Energy_Unit = 61
Errata_Whitelist = "BDE69;BDE70"
EBS_Mode = 0
DS = 0
# Aux. formulas
def Backend_Bound_Cycles(self, EV, level):
return (EV("CYCLE_ACTIVITY.STALLS_TOTAL", level) + EV("UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC", level) - Few_Uops_Executed_Threshold(self, EV, level) - Frontend_RS_Empty_Cycles(self, EV, level) + EV("RESOURCE_STALLS.SB", level))
def Cycles_0_Ports_Utilized(self, EV, level):
return (EV("UOPS_EXECUTED.CORE:i1:c1", level)) / 2 if smt_enabled else(EV("CYCLE_ACTIVITY.STALLS_TOTAL", level) - Frontend_RS_Empty_Cycles(self, EV, level))
def Cycles_1_Port_Utilized(self, EV, level):
return (EV("UOPS_EXECUTED.CORE:c1", level) - EV("UOPS_EXECUTED.CORE:c2", level)) / 2 if smt_enabled else(EV("UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC", level) - EV("UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC", level))
def Cycles_2_Ports_Utilized(self, EV, level):
return (EV("UOPS_EXECUTED.CORE:c2", level) - EV("UOPS_EXECUTED.CORE:c3", level)) / 2 if smt_enabled else(EV("UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC", level) - EV("UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC", level))
def Cycles_3m_Ports_Utilized(self, EV, level):
return (EV("UOPS_EXECUTED.CORE:c3", level) / 2) if smt_enabled else EV("UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC", level)
def DurationTimeInSeconds(self, EV, level):
return EV("interval-ms", 0) / 1000
def Execute_Cycles(self, EV, level):
return (EV("UOPS_EXECUTED.CORE:c1", level) / 2) if smt_enabled else EV("UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC", level)
def Fetched_Uops(self, EV, level):
return (EV("IDQ.DSB_UOPS", level) + EV("LSD.UOPS", level) + EV("IDQ.MITE_UOPS", level) + EV("IDQ.MS_UOPS", level))
def Few_Uops_Executed_Threshold(self, EV, level):
EV("UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC", level)
EV("UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC", level)
return EV("UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC", level) if (IPC(self, EV, level)> 1.8) else EV("UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC", level)
# Floating Point computational (arithmetic) Operations Count
def FLOP_Count(self, EV, level):
return EV("FP_ARITH_INST_RETIRED.SCALAR", level) + 2 * EV("FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", level) + 4 * EV("FP_ARITH_INST_RETIRED.4_FLOPS", level) + 8 * EV("FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE", level)
# Floating Point computational (arithmetic) Operations Count
def FP_Arith_Scalar(self, EV, level):
return EV("FP_ARITH_INST_RETIRED.SCALAR", level)
# Floating Point computational (arithmetic) Operations Count
def FP_Arith_Vector(self, EV, level):
return EV("FP_ARITH_INST_RETIRED.VECTOR", level)
def Frontend_RS_Empty_Cycles(self, EV, level):
EV("RS_EVENTS.EMPTY_CYCLES", level)
return EV("RS_EVENTS.EMPTY_CYCLES", level) if (self.Fetch_Latency.compute(EV)> 0.1) else 0
def HighIPC(self, EV, level):
val = IPC(self, EV, level) / Pipeline_Width
return val
def ITLB_Miss_Cycles(self, EV, level):
return (14 * EV("ITLB_MISSES.STLB_HIT", level) + EV("ITLB_MISSES.WALK_DURATION:c1", level) + 7 * EV("ITLB_MISSES.WALK_COMPLETED", level))
def LOAD_L1_MISS(self, EV, level):
return EV("MEM_LOAD_UOPS_RETIRED.L2_HIT", level) + EV("MEM_LOAD_UOPS_RETIRED.L3_HIT", level) + EV("MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT", level) + EV("MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM", level) + EV("MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS", level)
def LOAD_L1_MISS_NET(self, EV, level):
return LOAD_L1_MISS(self, EV, level) + EV("MEM_LOAD_UOPS_RETIRED.L3_MISS", level)
def LOAD_L3_HIT(self, EV, level):
return EV("MEM_LOAD_UOPS_RETIRED.L3_HIT", level) * (1 + EV("MEM_LOAD_UOPS_RETIRED.HIT_LFB", level) / LOAD_L1_MISS_NET(self, EV, level))
def LOAD_XSNP_HIT(self, EV, level):
return EV("MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT", level) * (1 + EV("MEM_LOAD_UOPS_RETIRED.HIT_LFB", level) / LOAD_L1_MISS_NET(self, EV, level))
def LOAD_XSNP_HITM(self, EV, level):
return EV("MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM", level) * (1 + EV("MEM_LOAD_UOPS_RETIRED.HIT_LFB", level) / LOAD_L1_MISS_NET(self, EV, level))
def LOAD_XSNP_MISS(self, EV, level):
return EV("MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS", level) * (1 + EV("MEM_LOAD_UOPS_RETIRED.HIT_LFB", level) / LOAD_L1_MISS_NET(self, EV, level))
def Mem_L3_Hit_Fraction(self, EV, level):
return EV("MEM_LOAD_UOPS_RETIRED.L3_HIT", level) / (EV("MEM_LOAD_UOPS_RETIRED.L3_HIT", level) + Mem_L3_Weight * EV("MEM_LOAD_UOPS_RETIRED.L3_MISS", level))
def Mem_Lock_St_Fraction(self, EV, level):
return EV("MEM_UOPS_RETIRED.LOCK_LOADS", level) / EV("MEM_UOPS_RETIRED.ALL_STORES", level)
def Memory_Bound_Fraction(self, EV, level):
return (EV("CYCLE_ACTIVITY.STALLS_MEM_ANY", level) + EV("RESOURCE_STALLS.SB", level)) / Backend_Bound_Cycles(self, EV, level)
def Mispred_Clears_Fraction(self, EV, level):
return EV("BR_MISP_RETIRED.ALL_BRANCHES", level) / (EV("BR_MISP_RETIRED.ALL_BRANCHES", level) + EV("MACHINE_CLEARS.COUNT", level))
def ORO_Demand_RFO_C1(self, EV, level):
return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO", level)) , level )
def ORO_DRD_Any_Cycles(self, EV, level):
return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", level)) , level )
def ORO_DRD_BW_Cycles(self, EV, level):
return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD:c4", level)) , level )
def SQ_Full_Cycles(self, EV, level):
return (EV("OFFCORE_REQUESTS_BUFFER.SQ_FULL", level) / 2) if smt_enabled else EV("OFFCORE_REQUESTS_BUFFER.SQ_FULL", level)
def Store_L2_Hit_Cycles(self, EV, level):
return EV("L2_RQSTS.RFO_HIT", level) * Mem_L2_Store_Cost *(1 - Mem_Lock_St_Fraction(self, EV, level))
def Mem_XSNP_HitM_Cost(self, EV, level):
return 60
def Mem_XSNP_Hit_Cost(self, EV, level):
return 43
def Mem_XSNP_None_Cost(self, EV, level):
return 29
def Recovery_Cycles(self, EV, level):
return (EV("INT_MISC.RECOVERY_CYCLES_ANY", level) / 2) if smt_enabled else EV("INT_MISC.RECOVERY_CYCLES", level)
def Retire_Fraction(self, EV, level):
return Retired_Slots(self, EV, level) / EV("UOPS_ISSUED.ANY", level)
# Retired slots per Logical Processor
def Retired_Slots(self, EV, level):
return EV("UOPS_RETIRED.RETIRE_SLOTS", level)
# Number of logical processors (enabled or online) on the target system
def Num_CPUs(self, EV, level):
return 8 if smt_enabled else 4
# Instructions Per Cycle (per Logical Processor)
def IPC(self, EV, level):
return EV("INST_RETIRED.ANY", level) / CLKS(self, EV, level)
# Uops Per Instruction
def UopPI(self, EV, level):
val = Retired_Slots(self, EV, level) / EV("INST_RETIRED.ANY", level)
self.thresh = (val > 1.05)
return val
# Uops per taken branch
def UpTB(self, EV, level):
val = Retired_Slots(self, EV, level) / EV("BR_INST_RETIRED.NEAR_TAKEN", level)
self.thresh = val < Pipeline_Width * 1.5
return val
# Cycles Per Instruction (per Logical Processor)
def CPI(self, EV, level):
return 1 / IPC(self, EV, level)
# Per-Logical Processor actual clocks when the Logical Processor is active.
def CLKS(self, EV, level):
return EV("CPU_CLK_UNHALTED.THREAD", level)
# Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward)
def SLOTS(self, EV, level):
return Pipeline_Width * CORE_CLKS(self, EV, level)
# The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of "execute" at rename stage.
def Execute_per_Issue(self, EV, level):
return EV("UOPS_EXECUTED.THREAD", level) / EV("UOPS_ISSUED.ANY", level)
# Instructions Per Cycle across hyper-threads (per physical core)
def CoreIPC(self, EV, level):
return EV("INST_RETIRED.ANY", level) / CORE_CLKS(self, EV, level)
# Floating Point Operations Per Cycle
def FLOPc(self, EV, level):
return FLOP_Count(self, EV, level) / CORE_CLKS(self, EV, level)
# Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to Fused-Multiply Add FMA counting - common.
def FP_Arith_Utilization(self, EV, level):
return (FP_Arith_Scalar(self, EV, level) + FP_Arith_Vector(self, EV, level)) / (2 * CORE_CLKS(self, EV, level))
# Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor)
def ILP(self, EV, level):
return EV("UOPS_EXECUTED.THREAD", level) / EV("UOPS_EXECUTED.THREAD:c1", level)
# Core actual clocks when any Logical Processor is active on the Physical Core
def CORE_CLKS(self, EV, level):
return ((EV("CPU_CLK_UNHALTED.THREAD", level) / 2) * (1 + EV("CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE", level) / EV("CPU_CLK_UNHALTED.REF_XCLK", level))) if ebs_mode else(EV("CPU_CLK_UNHALTED.THREAD_ANY", level) / 2) if smt_enabled else CLKS(self, EV, level)
# Instructions per Load (lower number means higher occurrence rate). Tip: reduce memory accesses. #Link Opt Guide section: Minimize Register Spills
def IpLoad(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("MEM_UOPS_RETIRED.ALL_LOADS", level)
self.thresh = (val < 3)
return val
# Instructions per Store (lower number means higher occurrence rate). Tip: reduce memory accesses. #Link Opt Guide section: Minimize Register Spills
def IpStore(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("MEM_UOPS_RETIRED.ALL_STORES", level)
self.thresh = (val < 8)
return val
# Instructions per Branch (lower number means higher occurrence rate)
def IpBranch(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.ALL_BRANCHES", level)
self.thresh = (val < 8)
return val
# Instructions per (near) call (lower number means higher occurrence rate)
def IpCall(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.NEAR_CALL", level)
self.thresh = (val < 200)
return val
# Instructions per taken branch
def IpTB(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.NEAR_TAKEN", level)
self.thresh = val < Pipeline_Width * 2 + 1
return val
# Branch instructions per taken branch. . Can be used to approximate PGO-likelihood for non-loopy codes.
def BpTkBranch(self, EV, level):
return EV("BR_INST_RETIRED.ALL_BRANCHES", level) / EV("BR_INST_RETIRED.NEAR_TAKEN", level)
# Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate). Reference: Tuning Performance via Metrics with Expectations. https://doi.org/10.1109/LCA.2019.2916408
def IpFLOP(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / FLOP_Count(self, EV, level)
self.thresh = (val < 10)
return val
# Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW.
def IpArith(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / (FP_Arith_Scalar(self, EV, level) + FP_Arith_Vector(self, EV, level))
self.thresh = (val < 10)
return val
# Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.
def IpArith_Scalar_SP(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("FP_ARITH_INST_RETIRED.SCALAR_SINGLE", level)
self.thresh = (val < 10)
return val
# Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.
def IpArith_Scalar_DP(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("FP_ARITH_INST_RETIRED.SCALAR_DOUBLE", level)
self.thresh = (val < 10)
return val
# Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.
def IpArith_AVX128(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / (EV("FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", level) + EV("FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE", level))
self.thresh = (val < 10)
return val
# Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.
def IpArith_AVX256(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / (EV("FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE", level) + EV("FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE", level))
self.thresh = (val < 10)
return val
# Total number of retired Instructions
def Instructions(self, EV, level):
return EV("INST_RETIRED.ANY", level)
# Average number of Uops retired in cycles where at least one uop has retired.
def Retire(self, EV, level):
return Retired_Slots(self, EV, level) / EV("UOPS_RETIRED.RETIRE_SLOTS:c1", level)
def Execute(self, EV, level):
return EV("UOPS_EXECUTED.THREAD", level) / Execute_Cycles(self, EV, level)
# Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). See section 'Decoded ICache' in Optimization Manual. http://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-optimization-manual.html
def DSB_Coverage(self, EV, level):
val = EV("IDQ.DSB_UOPS", level) / Fetched_Uops(self, EV, level)
self.thresh = (val < 0.7) and HighIPC(self, EV, 1)
return val
# Instructions per speculative Unknown Branch Misprediction (BAClear) (lower number means higher occurrence rate)
def IpUnknown_Branch(self, EV, level):
return Instructions(self, EV, level) / EV("BACLEARS.ANY", level)
# Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)
def IpMispredict(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("BR_MISP_RETIRED.ALL_BRANCHES", level)
self.thresh = (val < 200)
return val
# Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).
def IpMisp_Indirect(self, EV, level):
val = Instructions(self, EV, level) / (Retire_Fraction(self, EV, level) * EV("BR_MISP_EXEC.INDIRECT", level))
self.thresh = (val < 1000)
return val
# Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)
def Load_Miss_Real_Latency(self, EV, level):
return EV("L1D_PEND_MISS.PENDING", level) / (EV("MEM_LOAD_UOPS_RETIRED.L1_MISS", level) + EV("MEM_LOAD_UOPS_RETIRED.HIT_LFB", level))
# Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)
def MLP(self, EV, level):
return EV("L1D_PEND_MISS.PENDING", level) / EV("L1D_PEND_MISS.PENDING_CYCLES", level)
# L1 cache true misses per kilo instruction for retired demand loads
def L1MPKI(self, EV, level):
return 1000 * EV("MEM_LOAD_UOPS_RETIRED.L1_MISS", level) / EV("INST_RETIRED.ANY", level)
# L2 cache true misses per kilo instruction for retired demand loads
def L2MPKI(self, EV, level):
return 1000 * EV("MEM_LOAD_UOPS_RETIRED.L2_MISS", level) / EV("INST_RETIRED.ANY", level)
# L2 cache misses per kilo instruction for all request types (including speculative)
def L2MPKI_All(self, EV, level):
return 1000 * EV("L2_RQSTS.MISS", level) / EV("INST_RETIRED.ANY", level)
# L2 cache misses per kilo instruction for all demand loads (including speculative)
def L2MPKI_Load(self, EV, level):
return 1000 * EV("L2_RQSTS.DEMAND_DATA_RD_MISS", level) / EV("INST_RETIRED.ANY", level)
# Offcore requests (L2 cache miss) per kilo instruction for demand RFOs
def L2MPKI_RFO(self, EV, level):
return 1000 * EV("OFFCORE_REQUESTS.DEMAND_RFO", level) / EV("INST_RETIRED.ANY", level)
# L2 cache hits per kilo instruction for all request types (including speculative)
def L2HPKI_All(self, EV, level):
return 1000 *(EV("L2_RQSTS.REFERENCES", level) - EV("L2_RQSTS.MISS", level)) / EV("INST_RETIRED.ANY", level)
# L2 cache hits per kilo instruction for all demand loads (including speculative)
def L2HPKI_Load(self, EV, level):
return 1000 * EV("L2_RQSTS.DEMAND_DATA_RD_HIT", level) / EV("INST_RETIRED.ANY", level)
# L3 cache true misses per kilo instruction for retired demand loads
def L3MPKI(self, EV, level):
return 1000 * EV("MEM_LOAD_UOPS_RETIRED.L3_MISS", level) / EV("INST_RETIRED.ANY", level)
def L1D_Cache_Fill_BW(self, EV, level):
return 64 * EV("L1D.REPLACEMENT", level) / OneBillion / Time(self, EV, level)
def L2_Cache_Fill_BW(self, EV, level):
return 64 * EV("L2_LINES_IN.ALL", level) / OneBillion / Time(self, EV, level)
def L3_Cache_Fill_BW(self, EV, level):
return 64 * EV("LONGEST_LAT_CACHE.MISS", level) / OneBillion / Time(self, EV, level)
# Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses
def Page_Walks_Utilization(self, EV, level):
val = (EV("ITLB_MISSES.WALK_DURATION:c1", level) + EV("DTLB_LOAD_MISSES.WALK_DURATION:c1", level) + EV("DTLB_STORE_MISSES.WALK_DURATION:c1", level) + 7 *(EV("DTLB_STORE_MISSES.WALK_COMPLETED", level) + EV("DTLB_LOAD_MISSES.WALK_COMPLETED", level) + EV("ITLB_MISSES.WALK_COMPLETED", level))) / CORE_CLKS(self, EV, level)
self.thresh = (val > 0.5)
return val
# Average per-core data fill bandwidth to the L1 data cache [GB / sec]
def L1D_Cache_Fill_BW_2T(self, EV, level):
return L1D_Cache_Fill_BW(self, EV, level)
# Average per-core data fill bandwidth to the L2 cache [GB / sec]
def L2_Cache_Fill_BW_2T(self, EV, level):
return L2_Cache_Fill_BW(self, EV, level)
# Average per-core data fill bandwidth to the L3 cache [GB / sec]
def L3_Cache_Fill_BW_2T(self, EV, level):
return L3_Cache_Fill_BW(self, EV, level)
# Average Latency for L2 cache miss demand Loads
def Load_L2_Miss_Latency(self, EV, level):
return EV("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD", level) / EV("OFFCORE_REQUESTS.DEMAND_DATA_RD", level)
# Average Parallel L2 cache miss demand Loads
def Load_L2_MLP(self, EV, level):
return EV("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD", level) / EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD", level)
# Average Parallel L2 cache miss data reads
def Data_L2_MLP(self, EV, level):
return EV("OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD", level) / EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", level)
# Average CPU Utilization (percentage)
def CPU_Utilization(self, EV, level):
return CPUs_Utilized(self, EV, level) / Num_CPUs(self, EV, level)
# Average number of utilized CPUs
def CPUs_Utilized(self, EV, level):
return EV("CPU_CLK_UNHALTED.REF_TSC", level) / EV("msr/tsc/", 0)
# Measured Average Core Frequency for unhalted processors [GHz]
def Core_Frequency(self, EV, level):
return Turbo_Utilization(self, EV, level) * EV("msr/tsc/", 0) / OneBillion / Time(self, EV, level)
# Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width
def GFLOPs(self, EV, level):
return (FLOP_Count(self, EV, level) / OneBillion) / Time(self, EV, level)
# Average Frequency Utilization relative nominal frequency
def Turbo_Utilization(self, EV, level):
return CLKS(self, EV, level) / EV("CPU_CLK_UNHALTED.REF_TSC", level)
# Fraction of cycles where both hardware Logical Processors were active
def SMT_2T_Utilization(self, EV, level):
return 1 - EV("CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE", level) / (EV("CPU_CLK_UNHALTED.REF_XCLK_ANY", level) / 2) if smt_enabled else 0
# Fraction of cycles spent in the Operating System (OS) Kernel mode
def Kernel_Utilization(self, EV, level):
val = EV("CPU_CLK_UNHALTED.THREAD_P:SUP", level) / EV("CPU_CLK_UNHALTED.THREAD", level)
self.thresh = (val > 0.05)
return val
# Cycles Per Instruction for the Operating System (OS) Kernel mode
def Kernel_CPI(self, EV, level):
return EV("CPU_CLK_UNHALTED.THREAD_P:SUP", level) / EV("INST_RETIRED.ANY_P:SUP", level)
# Average external Memory Bandwidth Use for reads and writes [GB / sec]
def DRAM_BW_Use(self, EV, level):
return 64 *(EV("UNC_ARB_TRK_REQUESTS.ALL", level) + EV("UNC_ARB_COH_TRK_REQUESTS.ALL", level)) / OneMillion / Time(self, EV, level) / 1000
# Total package Power in Watts
def Power(self, EV, level):
return EV("UNC_PKG_ENERGY_STATUS", level) * Energy_Unit /(Time(self, EV, level) * OneMillion )
# Run duration time in seconds
def Time(self, EV, level):
val = EV("interval-s", 0)
self.thresh = (val < 1)
return val
# Socket actual clocks when any core is active on that socket
def Socket_CLKS(self, EV, level):
return EV("UNC_CLOCK.SOCKET", level)
# Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]
def IpFarBranch(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.FAR_BRANCH:USER", level)
self.thresh = (val < 1000000)
return val
# Event groups
class Frontend_Bound:
name = "Frontend_Bound"
domain = "Slots"
area = "FE"
level = 1
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvFB', 'BvIO', 'TmaL1', 'PGO'])
maxval = None
def compute(self, EV):
try:
self.val = EV("IDQ_UOPS_NOT_DELIVERED.CORE", 1) / SLOTS(self, EV, 1)
self.thresh = (self.val > 0.15)
except ZeroDivisionError:
handle_error(self, "Frontend_Bound zero division")
return self.val
desc = """
This category represents fraction of slots where the
processor's Frontend undersupplies its Backend. Frontend
denotes the first part of the processor core responsible to
fetch operations that are executed later on by the Backend
part. Within the Frontend; a branch predictor predicts the
next address to fetch; cache-lines are fetched from the
memory subsystem; parsed into instructions; and lastly
decoded into micro-operations (uops). Ideally the Frontend
can issue Pipeline_Width uops every cycle to the Backend.
Frontend Bound denotes unutilized issue-slots when there is
no Backend stall; i.e. bubbles where Frontend delivered no
uops while Backend could have accepted them. For example;
stalls due to instruction-cache misses would be categorized
under Frontend Bound."""
class Fetch_Latency:
name = "Fetch_Latency"
domain = "Slots"
area = "FE"
level = 2
htoff = False
sample = ['RS_EVENTS.EMPTY_END']
errcount = 0
sibling = None
metricgroup = frozenset(['Frontend', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = Pipeline_Width * EV("IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE", 2) / SLOTS(self, EV, 2)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Fetch_Latency zero division")
return self.val
desc = """
This metric represents fraction of slots the CPU was stalled
due to Frontend latency issues. For example; instruction-
cache misses; iTLB misses or fetch stalls after a branch
misprediction are categorized under Frontend Latency. In
such cases; the Frontend eventually delivers no uops for
some period."""
class ICache_Misses:
name = "ICache_Misses"
domain = "Clocks"
area = "FE"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BigFootprint', 'BvBC', 'FetchLat', 'IcMiss'])
maxval = None
def compute(self, EV):
try:
self.val = EV("ICACHE.IFDATA_STALL", 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "ICache_Misses zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due to instruction cache misses.. Using compiler's
Profile-Guided Optimization (PGO) can reduce i-cache misses
through improved hot code layout."""
class ITLB_Misses:
name = "ITLB_Misses"
domain = "Clocks"
area = "FE"
level = 3
htoff = False
sample = ['ITLB_MISSES.WALK_COMPLETED']
errcount = 0
sibling = None
metricgroup = frozenset(['BigFootprint', 'BvBC', 'FetchLat', 'MemoryTLB'])
maxval = None
def compute(self, EV):
try:
self.val = ITLB_Miss_Cycles(self, EV, 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "ITLB_Misses zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due to Instruction TLB (ITLB) misses.. Consider
large 2M pages for code (selectively prefer hot large-size
function, due to limited 2M entries). Linux options:
standard binaries use libhugetlbfs; Hfsort.. https://github.
com/libhugetlbfs/libhugetlbfs;https://research.fb.com/public
ations/optimizing-function-placement-for-large-scale-data-
center-applications-2/"""
class Branch_Resteers:
name = "Branch_Resteers"
domain = "Clocks"
area = "FE"
level = 3
htoff = False
sample = ['BR_MISP_RETIRED.ALL_BRANCHES']
errcount = 0
sibling = None
metricgroup = frozenset(['FetchLat'])
maxval = None
def compute(self, EV):
try:
self.val = BAClear_Cost *(EV("BR_MISP_RETIRED.ALL_BRANCHES", 3) + EV("MACHINE_CLEARS.COUNT", 3) + EV("BACLEARS.ANY", 3)) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Branch_Resteers zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due to Branch Resteers. Branch Resteers estimates
the Frontend delay in fetching operations from corrected
path; following all sorts of miss-predicted branches. For
example; branchy code with lots of miss-predictions might
get categorized under Branch Resteers. Note the value of
this node may overlap with its siblings."""
class Mispredicts_Resteers:
name = "Mispredicts_Resteers"
domain = "Clocks"
area = "FE"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BadSpec', 'BrMispredicts', 'BvMP'])
maxval = None
def compute(self, EV):
try:
self.val = EV("BR_MISP_RETIRED.ALL_BRANCHES", 4) * self.Branch_Resteers.compute(EV) / (EV("BR_MISP_RETIRED.ALL_BRANCHES", 4) + EV("MACHINE_CLEARS.COUNT", 4) + EV("BACLEARS.ANY", 4))
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Mispredicts_Resteers zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due to Branch Resteers as a result of Branch
Misprediction at execution stage."""
class Clears_Resteers:
name = "Clears_Resteers"
domain = "Clocks"
area = "FE"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BadSpec', 'MachineClears'])
maxval = None
def compute(self, EV):
try:
self.val = EV("MACHINE_CLEARS.COUNT", 4) * self.Branch_Resteers.compute(EV) / (EV("BR_MISP_RETIRED.ALL_BRANCHES", 4) + EV("MACHINE_CLEARS.COUNT", 4) + EV("BACLEARS.ANY", 4))
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Clears_Resteers zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due to Branch Resteers as a result of Machine
Clears."""
class Unknown_Branches:
name = "Unknown_Branches"
domain = "Clocks"
area = "FE"
level = 4
htoff = False
sample = ['BACLEARS.ANY']
errcount = 0
sibling = None
metricgroup = frozenset(['BigFootprint', 'BvBC', 'FetchLat'])
maxval = None
def compute(self, EV):
try:
self.val = self.Branch_Resteers.compute(EV) - self.Mispredicts_Resteers.compute(EV) - self.Clears_Resteers.compute(EV)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Unknown_Branches zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due to new branch address clears. These are fetched
branches the Branch Prediction Unit was unable to recognize
(e.g. first time the branch is fetched or hitting BTB
capacity limit) hence called Unknown Branches"""
class MS_Switches:
name = "MS_Switches"
domain = "Clocks_Estimated"
area = "FE"
level = 3
htoff = False
sample = ['IDQ.MS_SWITCHES']
errcount = 0
sibling = None
metricgroup = frozenset(['FetchLat', 'MicroSeq'])
maxval = 1.0
def compute(self, EV):
try:
self.val = MS_Switches_Cost * EV("IDQ.MS_SWITCHES", 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "MS_Switches zero division")
return self.val
desc = """
This metric estimates the fraction of cycles when the CPU
was stalled due to switches of uop delivery to the Microcode
Sequencer (MS). Commonly used instructions are optimized for
delivery by the DSB (decoded i-cache) or MITE (legacy
instruction decode) pipelines. Certain operations cannot be
handled natively by the execution pipeline; and must be
performed by microcode (small programs injected into the
execution stream). Switching to the MS too often can
negatively impact performance. The MS is designated to
deliver long uop flows required by CISC instructions like
CPUID; or uncommon conditions like Floating Point Assists
when dealing with Denormals."""
class LCP:
name = "LCP"
domain = "Clocks"
area = "FE"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['FetchLat'])
maxval = None
def compute(self, EV):
try:
self.val = EV("ILD_STALL.LCP", 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "LCP zero division")
return self.val
desc = """
This metric represents fraction of cycles CPU was stalled
due to Length Changing Prefixes (LCPs). Using proper
compiler flags or Intel Compiler by default will certainly
avoid this."""
class DSB_Switches:
name = "DSB_Switches"
domain = "Clocks"
area = "FE"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['DSBmiss', 'FetchLat'])
maxval = None
def compute(self, EV):
try:
self.val = EV("DSB2MITE_SWITCHES.PENALTY_CYCLES", 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "DSB_Switches zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due to switches from DSB to MITE pipelines. The DSB
(decoded i-cache) is a Uop Cache where the front-end
directly delivers Uops (micro operations) avoiding heavy x86
decoding. The DSB pipeline has shorter latency and delivered
higher bandwidth than the MITE (legacy instruction decode
pipeline). Switching between the two pipelines can cause
penalties hence this metric measures the exposed penalty..
See section 'Optimization for Decoded Icache' in
Optimization Manual:. http://www.intel.com/content/www/us/en
/architecture-and-technology/64-ia-32-architectures-
optimization-manual.html"""
class Fetch_Bandwidth:
name = "Fetch_Bandwidth"
domain = "Slots"
area = "FE"
level = 2
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['FetchBW', 'Frontend', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = self.Frontend_Bound.compute(EV) - self.Fetch_Latency.compute(EV)
self.thresh = (self.val > 0.2)
except ZeroDivisionError:
handle_error(self, "Fetch_Bandwidth zero division")
return self.val
desc = """
This metric represents fraction of slots the CPU was stalled
due to Frontend bandwidth issues. For example;
inefficiencies at the instruction decoders; or restrictions
for caching in the DSB (decoded uops cache) are categorized
under Fetch Bandwidth. In such cases; the Frontend typically
delivers suboptimal amount of uops to the Backend."""
class MITE:
name = "MITE"
domain = "Slots_Estimated"
area = "FE"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['DSBmiss', 'FetchBW'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("IDQ.ALL_MITE_CYCLES_ANY_UOPS", 3) - EV("IDQ.ALL_MITE_CYCLES_4_UOPS", 3)) / CORE_CLKS(self, EV, 3) / 2
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "MITE zero division")
return self.val
desc = """
This metric represents Core fraction of cycles in which CPU
was likely limited due to the MITE pipeline (the legacy
decode pipeline). This pipeline is used for code that was
not pre-cached in the DSB or LSD. For example;
inefficiencies due to asymmetric decoders; use of long
immediate or LCP can manifest as MITE fetch bandwidth
bottleneck.. Consider tuning codegen of 'small hotspots'
that can fit in DSB. Read about 'Decoded ICache' in
Optimization Manual:. http://www.intel.com/content/www/us/en
/architecture-and-technology/64-ia-32-architectures-
optimization-manual.html"""
class DSB:
name = "DSB"
domain = "Slots_Estimated"
area = "FE"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['DSB', 'FetchBW'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("IDQ.ALL_DSB_CYCLES_ANY_UOPS", 3) - EV("IDQ.ALL_DSB_CYCLES_4_UOPS", 3)) / CORE_CLKS(self, EV, 3) / 2
self.thresh = (self.val > 0.15) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "DSB zero division")
return self.val
desc = """
This metric represents Core fraction of cycles in which CPU
was likely limited due to DSB (decoded uop cache) fetch
pipeline. For example; inefficient utilization of the DSB
cache structure or bank conflict when reading from it; are
categorized here."""
class Bad_Speculation:
name = "Bad_Speculation"
domain = "Slots"
area = "BAD"
level = 1
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['TmaL1'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("UOPS_ISSUED.ANY", 1) - Retired_Slots(self, EV, 1) + Pipeline_Width * Recovery_Cycles(self, EV, 1)) / SLOTS(self, EV, 1)
self.thresh = (self.val > 0.15)
except ZeroDivisionError:
handle_error(self, "Bad_Speculation zero division")
return self.val
desc = """
This category represents fraction of slots wasted due to
incorrect speculations. This include slots used to issue
uops that do not eventually get retired and slots for which
the issue-pipeline was blocked due to recovery from earlier
incorrect speculation. For example; wasted work due to miss-
predicted branches are categorized under Bad Speculation
category. Incorrect data speculation followed by Memory
Ordering Nukes is another example."""
class Branch_Mispredicts:
name = "Branch_Mispredicts"
domain = "Slots"
area = "BAD"
level = 2
htoff = False
sample = ['BR_MISP_RETIRED.ALL_BRANCHES:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['BadSpec', 'BrMispredicts', 'BvMP', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = Mispred_Clears_Fraction(self, EV, 2) * self.Bad_Speculation.compute(EV)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Branch_Mispredicts zero division")
return self.val
desc = """
This metric represents fraction of slots the CPU has wasted
due to Branch Misprediction. These slots are either wasted
by uops fetched from an incorrectly speculated program path;
or stalls when the out-of-order part of the machine needs to
recover its state from a speculative path.. Using profile
feedback in the compiler may help. Please see the
Optimization Manual for general strategies for addressing
branch misprediction issues..
http://www.intel.com/content/www/us/en/architecture-and-
technology/64-ia-32-architectures-optimization-manual.html"""
class Machine_Clears:
name = "Machine_Clears"
domain = "Slots"
area = "BAD"
level = 2
htoff = False
sample = ['MACHINE_CLEARS.COUNT']
errcount = 0
sibling = None
metricgroup = frozenset(['BadSpec', 'BvMS', 'MachineClears', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = self.Bad_Speculation.compute(EV) - self.Branch_Mispredicts.compute(EV)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Machine_Clears zero division")
return self.val
desc = """
This metric represents fraction of slots the CPU has wasted
due to Machine Clears. These slots are either wasted by
uops fetched prior to the clear; or stalls the out-of-order
portion of the machine needs to recover its state after the
clear. For example; this can happen due to memory ordering
Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code
(SMC) nukes.. See \"Memory Disambiguation\" in Optimization
Manual and:. https://software.intel.com/sites/default/files/
m/d/4/1/d/8/sma.pdf"""
class Backend_Bound:
name = "Backend_Bound"
domain = "Slots"
area = "BE"
level = 1
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvOB', 'TmaL1'])
maxval = None
def compute(self, EV):
try:
self.val = 1 -(self.Frontend_Bound.compute(EV) + self.Bad_Speculation.compute(EV) + self.Retiring.compute(EV))
self.thresh = (self.val > 0.2)
except ZeroDivisionError:
handle_error(self, "Backend_Bound zero division")
return self.val
desc = """
This category represents fraction of slots where no uops are
being delivered due to a lack of required resources for
accepting new uops in the Backend. Backend is the portion of
the processor core where the out-of-order scheduler
dispatches ready uops into their respective execution units;
and once completed these uops get retired according to
program order. For example; stalls due to data-cache misses
or stalls due to the divider unit being overloaded are both
categorized under Backend Bound. Backend Bound is further
divided into two main categories: Memory Bound and Core
Bound."""
class Memory_Bound:
name = "Memory_Bound"
domain = "Slots"
area = "BE/Mem"
level = 2
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Backend', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = Memory_Bound_Fraction(self, EV, 2) * self.Backend_Bound.compute(EV)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Memory_Bound zero division")
return self.val
desc = """
This metric represents fraction of slots the Memory
subsystem within the Backend was a bottleneck. Memory Bound
estimates fraction of slots where pipeline is likely stalled
due to demand load or store instructions. This accounts
mainly for (1) non-completed in-flight memory demand loads
which coincides with execution units starvation; in addition
to (2) cases where stores could impose backpressure on the
pipeline when many of them get buffered at the same time
(less common out of the two)."""
class L1_Bound:
name = "L1_Bound"
domain = "Stalls"
area = "BE/Mem"
level = 3
htoff = False
sample = ['MEM_LOAD_UOPS_RETIRED.L1_HIT:pp', 'MEM_LOAD_UOPS_RETIRED.HIT_LFB:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['CacheHits', 'MemoryBound', 'TmaL3mem'])
maxval = None
def compute(self, EV):
try:
self.val = max((EV("CYCLE_ACTIVITY.STALLS_MEM_ANY", 3) - EV("CYCLE_ACTIVITY.STALLS_L1D_MISS", 3)) / CLKS(self, EV, 3) , 0 )
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "L1_Bound zero division")
return self.val
desc = """
This metric estimates how often the CPU was stalled without
loads missing the L1 data cache. The L1 data cache
typically has the shortest latency. However; in certain
cases like loads blocked on older stores; a load might
suffer due to high latency even though it is being satisfied
by the L1. Another example is loads who miss in the TLB.
These cases are characterized by execution unit stalls;
while some non-completed demand load lives in the machine
without having that demand load missing the L1 cache."""
class DTLB_Load:
name = "DTLB_Load"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_UOPS_RETIRED.STLB_MISS_LOADS:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['BvMT', 'MemoryTLB'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (Mem_STLB_Hit_Cost * EV("DTLB_LOAD_MISSES.STLB_HIT", 4) + EV("DTLB_LOAD_MISSES.WALK_DURATION:c1", 4) + 7 * EV("DTLB_LOAD_MISSES.WALK_COMPLETED", 4)) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "DTLB_Load zero division")
return self.val
desc = """
This metric roughly estimates the fraction of cycles where
the Data TLB (DTLB) was missed by load accesses. TLBs
(Translation Look-aside Buffers) are processor caches for
recently used entries out of the Page Tables that are used
to map virtual- to physical-addresses by the operating
system. This metric approximates the potential delay of
demand loads missing the first-level data TLB (assuming
worst case scenario with back to back misses to different
pages). This includes hitting in the second-level TLB (STLB)
as well as performing a hardware page walk on an STLB miss.."""
class Store_Fwd_Blk:
name = "Store_Fwd_Blk"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = 1.0
def compute(self, EV):
try:
self.val = 13 * EV("LD_BLOCKS.STORE_FORWARD", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Store_Fwd_Blk zero division")
return self.val
desc = """
This metric roughly estimates fraction of cycles when the
memory subsystem had loads blocked since they could not
forward data from earlier (in program order) overlapping
stores. To streamline memory operations in the pipeline; a
load can avoid waiting for memory if a prior in-flight store
is writing the data that the load wants to read (store
forwarding process). However; in some cases the load may be
blocked for a significant time pending the store forward.
For example; when the prior store is writing a smaller
region than the load is reading."""
class Lock_Latency:
name = "Lock_Latency"
domain = "Clocks"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_UOPS_RETIRED.LOCK_LOADS:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['Offcore'])
maxval = 1.0
def compute(self, EV):
try:
self.val = Mem_Lock_St_Fraction(self, EV, 4) * ORO_Demand_RFO_C1(self, EV, 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Lock_Latency zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU spent
handling cache misses due to lock operations. Due to the
microarchitecture handling of locks; they are classified as
L1_Bound regardless of what memory source satisfied them."""
class Split_Loads:
name = "Split_Loads"
domain = "Clocks_Calculated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_UOPS_RETIRED.SPLIT_LOADS:pp']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = 1.0
def compute(self, EV):
try:
self.val = Load_Miss_Real_Latency(self, EV, 4) * EV("LD_BLOCKS.NO_SR", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Split_Loads zero division")
return self.val
desc = """
This metric estimates fraction of cycles handling memory
load split accesses - load that cross 64-byte cache line
boundary. . Consider aligning data or hot structure fields.
See the Optimization Manual for more details"""
class G4K_Aliasing:
name = "4K_Aliasing"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = EV("LD_BLOCKS_PARTIAL.ADDRESS_ALIAS", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "G4K_Aliasing zero division")
return self.val
desc = """
This metric estimates how often memory load accesses were
aliased by preceding stores (in program order) with a 4K
address offset. False match is possible; which incur a few
cycles load re-issue. However; the short re-issue duration
is often hidden by the out-of-order core and HW
optimizations; hence a user may safely ignore a high value
of this metric unless it manages to propagate up into parent
nodes of the hierarchy (e.g. to L1_Bound).. Consider
reducing independent loads/stores accesses with 4K offsets.
See the Optimization Manual for more details"""
class FB_Full:
name = "FB_Full"
domain = "Clocks_Calculated"
area = "BE/Mem"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvMB', 'MemoryBW'])
maxval = None
def compute(self, EV):
try:
self.val = Load_Miss_Real_Latency(self, EV, 4) * EV("L1D_PEND_MISS.FB_FULL:c1", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.3)
except ZeroDivisionError:
handle_error(self, "FB_Full zero division")
return self.val
desc = """
This metric does a *rough estimation* of how often L1D Fill
Buffer unavailability limited additional L1D miss memory
access requests to proceed. The higher the metric value; the
deeper the memory hierarchy level the misses are satisfied
from (metric values >1 are valid). Often it hints on
approaching bandwidth limits (to L2 cache; L3 cache or
external memory).. See $issueBW and $issueSL hints. Avoid
software prefetches if indeed memory BW limited."""
class L2_Bound:
name = "L2_Bound"
domain = "Stalls"
area = "BE/Mem"
level = 3
htoff = False
sample = ['MEM_LOAD_UOPS_RETIRED.L2_HIT:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['BvML', 'CacheHits', 'MemoryBound', 'TmaL3mem'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("CYCLE_ACTIVITY.STALLS_L1D_MISS", 3) - EV("CYCLE_ACTIVITY.STALLS_L2_MISS", 3)) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "L2_Bound zero division")
return self.val
desc = """
This metric estimates how often the CPU was stalled due to
L2 cache accesses by loads. Avoiding cache misses (i.e. L1
misses/L2 hits) can improve the latency and increase
performance."""
class L3_Bound:
name = "L3_Bound"
domain = "Stalls"
area = "BE/Mem"
level = 3
htoff = False
sample = ['MEM_LOAD_UOPS_RETIRED.L3_HIT:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['CacheHits', 'MemoryBound', 'TmaL3mem'])
maxval = None
def compute(self, EV):
try:
self.val = Mem_L3_Hit_Fraction(self, EV, 3) * EV("CYCLE_ACTIVITY.STALLS_L2_MISS", 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "L3_Bound zero division")
return self.val
desc = """
This metric estimates how often the CPU was stalled due to
loads accesses to L3 cache or contended with a sibling Core.
Avoiding cache misses (i.e. L2 misses/L3 hits) can improve
the latency and increase performance."""
class Contested_Accesses:
name = "Contested_Accesses"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM:pp', 'MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['BvMS', 'DataSharing', 'Offcore', 'Snoop'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (Mem_XSNP_HitM_Cost(self, EV, 4) * LOAD_XSNP_HITM(self, EV, 4) + Mem_XSNP_Hit_Cost(self, EV, 4) * LOAD_XSNP_MISS(self, EV, 4)) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Contested_Accesses zero division")
return self.val
desc = """
This metric estimates fraction of cycles while the memory
subsystem was handling synchronizations due to contested
accesses. Contested accesses occur when data written by one
Logical Processor are read by another Logical Processor on a
different Physical Core. Examples of contested accesses
include synchronizations such as locks; true data sharing
such as modified locked variables; and false sharing."""
class Data_Sharing:
name = "Data_Sharing"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['BvMS', 'Offcore', 'Snoop'])
maxval = 1.0
def compute(self, EV):
try:
self.val = Mem_XSNP_Hit_Cost(self, EV, 4) * LOAD_XSNP_HIT(self, EV, 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Data_Sharing zero division")
return self.val
desc = """
This metric estimates fraction of cycles while the memory
subsystem was handling synchronizations due to data-sharing
accesses. Data shared by multiple Logical Processors (even
just read shared) may cause increased access latency due to
cache coherency. Excessive data sharing can drastically harm
multithreaded performance."""
class L3_Hit_Latency:
name = "L3_Hit_Latency"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_LOAD_UOPS_RETIRED.L3_HIT:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['BvML', 'MemoryLat'])
maxval = 1.0
def compute(self, EV):
try:
self.val = Mem_XSNP_None_Cost(self, EV, 4) * LOAD_L3_HIT(self, EV, 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "L3_Hit_Latency zero division")
return self.val
desc = """
This metric estimates fraction of cycles with demand load
accesses that hit the L3 cache under unloaded scenarios
(possibly L3 latency limited). Avoiding private cache
misses (i.e. L2 misses/L3 hits) will improve the latency;
reduce contention with sibling physical cores and increase
performance. Note the value of this node may overlap with
its siblings."""
class SQ_Full:
name = "SQ_Full"
domain = "Clocks"
area = "BE/Mem"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvMB', 'MemoryBW', 'Offcore'])
maxval = None
def compute(self, EV):
try:
self.val = SQ_Full_Cycles(self, EV, 4) / CORE_CLKS(self, EV, 4)
self.thresh = (self.val > 0.3) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "SQ_Full zero division")
return self.val
desc = """
This metric measures fraction of cycles where the Super
Queue (SQ) was full taking into account all request-types
and both hardware SMT threads (Logical Processors)."""
class DRAM_Bound:
name = "DRAM_Bound"
domain = "Stalls"
area = "BE/Mem"
level = 3
htoff = False
sample = ['MEM_LOAD_UOPS_RETIRED.L3_MISS:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['MemoryBound', 'TmaL3mem'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (1 - Mem_L3_Hit_Fraction(self, EV, 3)) * EV("CYCLE_ACTIVITY.STALLS_L2_MISS", 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "DRAM_Bound zero division")
return self.val
desc = """
This metric estimates how often the CPU was stalled on
accesses to external memory (DRAM) by loads. Better caching
can improve the latency and increase performance."""
class MEM_Bandwidth:
name = "MEM_Bandwidth"
domain = "Clocks"
area = "BE/Mem"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvMB', 'MemoryBW', 'Offcore'])
maxval = None
def compute(self, EV):
try:
self.val = ORO_DRD_BW_Cycles(self, EV, 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "MEM_Bandwidth zero division")
return self.val
desc = """
This metric estimates fraction of cycles where the core's
performance was likely hurt due to approaching bandwidth
limits of external memory - DRAM ([SPR-HBM] and/or HBM).
The underlying heuristic assumes that a similar off-core
traffic is generated by all IA cores. This metric does not
aggregate non-data-read requests by this logical processor;
requests from other IA Logical Processors/Physical
Cores/sockets; or other non-IA devices like GPU; hence the
maximum external memory bandwidth limits may or may not be
approached when this metric is flagged (see Uncore counters
for that).. Improve data accesses to reduce cacheline
transfers from/to memory. Examples: 1) Consume all bytes of
a each cacheline before it is evicted (e.g. reorder
structure elements and split non-hot ones), 2) merge
computed-limited with BW-limited loops, 3) NUMA
optimizations in multi-socket system. Note: software
prefetches will not help BW-limited application.."""
class MEM_Latency:
name = "MEM_Latency"
domain = "Clocks"
area = "BE/Mem"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvML', 'MemoryLat', 'Offcore'])
maxval = None
def compute(self, EV):
try:
self.val = ORO_DRD_Any_Cycles(self, EV, 4) / CLKS(self, EV, 4) - self.MEM_Bandwidth.compute(EV)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "MEM_Latency zero division")
return self.val
desc = """
This metric estimates fraction of cycles where the
performance was likely hurt due to latency from external
memory - DRAM ([SPR-HBM] and/or HBM). This metric does not
aggregate requests from other Logical Processors/Physical
Cores/sockets (see Uncore counters for that).. Improve data
accesses or interleave them with compute. Examples: 1) Data
layout re-structuring, 2) Software Prefetches (also through
the compiler).."""
class Store_Bound:
name = "Store_Bound"
domain = "Stalls"
area = "BE/Mem"
level = 3
htoff = False
sample = ['MEM_UOPS_RETIRED.ALL_STORES:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['MemoryBound', 'TmaL3mem'])
maxval = None
def compute(self, EV):
try:
self.val = EV("RESOURCE_STALLS.SB", 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Store_Bound zero division")
return self.val
desc = """
This metric estimates how often CPU was stalled due to RFO
store memory accesses; RFO store issue a read-for-ownership
request before the write. Even though store accesses do not
typically stall out-of-order CPUs; there are few cases where
stores can lead to actual stalls. This metric will be
flagged should RFO stores be a bottleneck."""
class Store_Latency:
name = "Store_Latency"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvML', 'MemoryLat', 'Offcore'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (Store_L2_Hit_Cycles(self, EV, 4) + (1 - Mem_Lock_St_Fraction(self, EV, 4)) * ORO_Demand_RFO_C1(self, EV, 4)) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Store_Latency zero division")
return self.val
desc = """
This metric estimates fraction of cycles the CPU spent
handling L1D store misses. Store accesses usually less
impact out-of-order core performance; however; holding
resources for longer time can lead into undesired
implications (e.g. contention on L1D fill-buffer entries -
see FB_Full). Consider to avoid/reduce unnecessary (or
easily load-able/computable) memory store."""
class False_Sharing:
name = "False_Sharing"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM:pp', 'OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM']
errcount = 0
sibling = None
metricgroup = frozenset(['BvMS', 'DataSharing', 'Offcore', 'Snoop'])
maxval = 1.0
def compute(self, EV):
try:
self.val = Mem_XSNP_HitM_Cost(self, EV, 4) * EV("OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM", 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "False_Sharing zero division")
return self.val
desc = """
This metric roughly estimates how often CPU was handling
synchronizations due to False Sharing. False Sharing is a
multithreading hiccup; where multiple Logical Processors
contend on different data-elements mapped into the same
cache line. . False Sharing can be easily avoided by padding
to make Logical Processors access different lines."""
class Split_Stores:
name = "Split_Stores"
domain = "Core_Utilization"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_UOPS_RETIRED.SPLIT_STORES:pp']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = 2 * EV("MEM_UOPS_RETIRED.SPLIT_STORES", 4) / CORE_CLKS(self, EV, 4)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Split_Stores zero division")
return self.val
desc = """
This metric represents rate of split store accesses.
Consider aligning your data to the 64-byte cache line
granularity."""
class DTLB_Store:
name = "DTLB_Store"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_UOPS_RETIRED.STLB_MISS_STORES:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['BvMT', 'MemoryTLB'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (Mem_STLB_Hit_Cost * EV("DTLB_STORE_MISSES.STLB_HIT", 4) + EV("DTLB_STORE_MISSES.WALK_DURATION:c1", 4) + 7 * EV("DTLB_STORE_MISSES.WALK_COMPLETED", 4)) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "DTLB_Store zero division")
return self.val
desc = """
This metric roughly estimates the fraction of cycles spent
handling first-level data TLB store misses. As with
ordinary data caching; focus on improving data locality and
reducing working-set size to reduce DTLB overhead.
Additionally; consider using profile-guided optimization
(PGO) to collocate frequently-used data on the same page.
Try using larger page sizes for large amounts of frequently-
used data."""
class Core_Bound:
name = "Core_Bound"
domain = "Slots"
area = "BE/Core"
level = 2
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Backend', 'TmaL2', 'Compute'])
maxval = None
def compute(self, EV):
try:
self.val = self.Backend_Bound.compute(EV) - self.Memory_Bound.compute(EV)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Core_Bound zero division")
return self.val
desc = """
This metric represents fraction of slots where Core non-
memory issues were of a bottleneck. Shortage in hardware
compute resources; or dependencies in software's
instructions are both categorized under Core Bound. Hence it
may indicate the machine ran out of an out-of-order
resource; certain execution units are overloaded or
dependencies in program's data- or instruction-flow are
limiting the performance (e.g. FP-chained long-latency
arithmetic operations).. Tip: consider Port Saturation
analysis as next step."""
class Divider:
name = "Divider"
domain = "Clocks"
area = "BE/Core"
level = 3
htoff = False
sample = ['ARITH.FPU_DIV_ACTIVE']
errcount = 0
sibling = None
metricgroup = frozenset(['BvCB'])
maxval = 1.0
def compute(self, EV):
try:
self.val = EV("ARITH.FPU_DIV_ACTIVE", 3) / CORE_CLKS(self, EV, 3)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Divider zero division")
return self.val
desc = """
This metric represents fraction of cycles where the Divider
unit was active. Divide and square root instructions are
performed by the Divider unit and can take considerably
longer latency than integer or Floating Point addition;
subtraction; or multiplication."""
class Ports_Utilization:
name = "Ports_Utilization"
domain = "Clocks"
area = "BE/Core"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['PortsUtil'])
maxval = None
def compute(self, EV):
try:
self.val = (Backend_Bound_Cycles(self, EV, 3) - EV("RESOURCE_STALLS.SB", 3) - EV("CYCLE_ACTIVITY.STALLS_MEM_ANY", 3)) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.15) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Ports_Utilization zero division")
return self.val
desc = """
This metric estimates fraction of cycles the CPU performance
was potentially limited due to Core computation issues (non
divider-related). Two distinct categories can be attributed
into this metric: (1) heavy data-dependency among contiguous
instructions would manifest in this metric - such cases are
often referred to as low Instruction Level Parallelism
(ILP). (2) Contention on some hardware execution unit other
than Divider. For example; when there are too many multiply
operations.. Loop Vectorization -most compilers feature
auto-Vectorization options today- reduces pressure on the
execution ports as multiple elements are calculated with
same uop."""
class Ports_Utilized_0:
name = "Ports_Utilized_0"
domain = "Clocks"
area = "BE/Core"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['PortsUtil'])
maxval = None
def compute(self, EV):
try:
self.val = Cycles_0_Ports_Utilized(self, EV, 4) / CORE_CLKS(self, EV, 4)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Ports_Utilized_0 zero division")
return self.val
desc = """
This metric represents fraction of cycles CPU executed no
uops on any execution port (Logical Processor cycles since
ICL, Physical Core cycles otherwise). Long-latency
instructions like divides may contribute to this metric..
Check assembly view and Appendix C in Optimization Manual to
find out instructions with say 5 or more cycles latency..
http://www.intel.com/content/www/us/en/architecture-and-
technology/64-ia-32-architectures-optimization-manual.html"""
class Ports_Utilized_1:
name = "Ports_Utilized_1"
domain = "Clocks"
area = "BE/Core"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['PortsUtil'])
maxval = None
def compute(self, EV):
try:
self.val = Cycles_1_Port_Utilized(self, EV, 4) / CORE_CLKS(self, EV, 4)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Ports_Utilized_1 zero division")
return self.val
desc = """
This metric represents fraction of cycles where the CPU
executed total of 1 uop per cycle on all execution ports
(Logical Processor cycles since ICL, Physical Core cycles
otherwise). This can be due to heavy data-dependency among
software instructions; or over oversubscribing a particular
hardware resource. In some other cases with high
1_Port_Utilized and L1_Bound; this metric can point to L1
data-cache latency bottleneck that may not necessarily
manifest with complete execution starvation (due to the
short L1 latency e.g. walking a linked list) - looking at
the assembly can be helpful."""
class Ports_Utilized_2:
name = "Ports_Utilized_2"
domain = "Clocks"
area = "BE/Core"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['PortsUtil'])
maxval = None
def compute(self, EV):
try:
self.val = Cycles_2_Ports_Utilized(self, EV, 4) / CORE_CLKS(self, EV, 4)
self.thresh = (self.val > 0.15) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Ports_Utilized_2 zero division")
return self.val
desc = """
This metric represents fraction of cycles CPU executed total
of 2 uops per cycle on all execution ports (Logical
Processor cycles since ICL, Physical Core cycles otherwise).
Loop Vectorization -most compilers feature auto-
Vectorization options today- reduces pressure on the
execution ports as multiple elements are calculated with
same uop."""
class Ports_Utilized_3m:
name = "Ports_Utilized_3m"
domain = "Clocks"
area = "BE/Core"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvCB', 'PortsUtil'])
maxval = None
def compute(self, EV):
try:
self.val = Cycles_3m_Ports_Utilized(self, EV, 4) / CORE_CLKS(self, EV, 4)
self.thresh = (self.val > 0.4) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Ports_Utilized_3m zero division")
return self.val
desc = """
This metric represents fraction of cycles CPU executed total
of 3 or more uops per cycle on all execution ports (Logical
Processor cycles since ICL, Physical Core cycles otherwise)."""
class ALU_Op_Utilization:
name = "ALU_Op_Utilization"
domain = "Core_Execution"
area = "BE/Core"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = (EV("UOPS_DISPATCHED_PORT.PORT_0", 5) + EV("UOPS_DISPATCHED_PORT.PORT_1", 5) + EV("UOPS_DISPATCHED_PORT.PORT_5", 5) + EV("UOPS_DISPATCHED_PORT.PORT_6", 5)) / (4 * CORE_CLKS(self, EV, 5))
self.thresh = (self.val > 0.4)
except ZeroDivisionError:
handle_error(self, "ALU_Op_Utilization zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution ports for ALU operations."""
class Port_0:
name = "Port_0"
domain = "Core_Clocks"
area = "BE/Core"
level = 6
htoff = False
sample = ['UOPS_DISPATCHED_PORT.PORT_0']
errcount = 0
sibling = None
metricgroup = frozenset(['Compute'])
maxval = None
def compute(self, EV):
try:
self.val = EV("UOPS_DISPATCHED_PORT.PORT_0", 6) / CORE_CLKS(self, EV, 6)
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Port_0 zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution port 0 ALU and 2nd branch"""
class Port_1:
name = "Port_1"
domain = "Core_Clocks"
area = "BE/Core"
level = 6
htoff = False
sample = ['UOPS_DISPATCHED_PORT.PORT_1']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = EV("UOPS_DISPATCHED_PORT.PORT_1", 6) / CORE_CLKS(self, EV, 6)
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Port_1 zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution port 1 (ALU)"""
class Port_5:
name = "Port_5"
domain = "Core_Clocks"
area = "BE/Core"
level = 6
htoff = False
sample = ['UOPS_DISPATCHED_PORT.PORT_5']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = EV("UOPS_DISPATCHED_PORT.PORT_5", 6) / CORE_CLKS(self, EV, 6)
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Port_5 zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution port 5 ALU. See section
'Handling Port 5 Pressure' in Optimization Manual:.
http://www.intel.com/content/www/us/en/architecture-and-
technology/64-ia-32-architectures-optimization-manual.html"""
class Port_6:
name = "Port_6"
domain = "Core_Clocks"
area = "BE/Core"
level = 6
htoff = False
sample = ['UOPS_DISPATCHED_PORT.PORT_6']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = EV("UOPS_DISPATCHED_PORT.PORT_6", 6) / CORE_CLKS(self, EV, 6)
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Port_6 zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution port 6 Primary Branch and
simple ALU"""
class Load_Op_Utilization:
name = "Load_Op_Utilization"
domain = "Core_Execution"
area = "BE/Core"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = (EV("UOPS_DISPATCHED_PORT.PORT_2", 5) + EV("UOPS_DISPATCHED_PORT.PORT_3", 5) + EV("UOPS_DISPATCHED_PORT.PORT_7", 5) - EV("UOPS_DISPATCHED_PORT.PORT_4", 5)) / (2 * CORE_CLKS(self, EV, 5))
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Load_Op_Utilization zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution port for Load operations"""
class Port_2:
name = "Port_2"
domain = "Core_Clocks"
area = "BE/Core"
level = 6
htoff = False
sample = ['UOPS_DISPATCHED_PORT.PORT_2']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = EV("UOPS_DISPATCHED_PORT.PORT_2", 6) / CORE_CLKS(self, EV, 6)
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Port_2 zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution port 2 Loads and Store-address"""
class Port_3:
name = "Port_3"
domain = "Core_Clocks"
area = "BE/Core"
level = 6
htoff = False
sample = ['UOPS_DISPATCHED_PORT.PORT_3']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = EV("UOPS_DISPATCHED_PORT.PORT_3", 6) / CORE_CLKS(self, EV, 6)
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Port_3 zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution port 3 Loads and Store-address"""
class Store_Op_Utilization:
name = "Store_Op_Utilization"
domain = "Core_Execution"
area = "BE/Core"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = EV("UOPS_DISPATCHED_PORT.PORT_4", 5) / CORE_CLKS(self, EV, 5)
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Store_Op_Utilization zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution port for Store operations"""
class Port_4:
name = "Port_4"
domain = "Core_Clocks"
area = "BE/Core"
level = 6
htoff = False
sample = ['UOPS_DISPATCHED_PORT.PORT_4']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = EV("UOPS_DISPATCHED_PORT.PORT_4", 6) / CORE_CLKS(self, EV, 6)
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Port_4 zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution port 4 (Store-data)"""
class Port_7:
name = "Port_7"
domain = "Core_Clocks"
area = "BE/Core"
level = 6
htoff = False
sample = ['UOPS_DISPATCHED_PORT.PORT_7']
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = EV("UOPS_DISPATCHED_PORT.PORT_7", 6) / CORE_CLKS(self, EV, 6)
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Port_7 zero division")
return self.val
desc = """
This metric represents Core fraction of cycles CPU
dispatched uops on execution port 7 simple Store-address"""
class Retiring:
name = "Retiring"
domain = "Slots"
area = "RET"
level = 1
htoff = False
sample = ['UOPS_RETIRED.RETIRE_SLOTS']
errcount = 0
sibling = None
metricgroup = frozenset(['BvUW', 'TmaL1'])
maxval = None
def compute(self, EV):
try:
self.val = Retired_Slots(self, EV, 1) / SLOTS(self, EV, 1)
self.thresh = (self.val > 0.7) or self.Heavy_Operations.thresh
except ZeroDivisionError:
handle_error(self, "Retiring zero division")
return self.val
desc = """
This category represents fraction of slots utilized by
useful work i.e. issued uops that eventually get retired.
Ideally; all pipeline slots would be attributed to the
Retiring category. Retiring of 100% would indicate the
maximum Pipeline_Width throughput was achieved. Maximizing
Retiring typically increases the Instructions-per-cycle (see
IPC metric). Note that a high Retiring value does not
necessary mean there is no room for more performance. For
example; Heavy-operations or Microcode Assists are
categorized under Retiring. They often indicate suboptimal
performance and can often be optimized or avoided. . A high
Retiring value for non-vectorized code may be a good hint
for programmer to consider vectorizing his code. Doing so
essentially lets more computations be done without
significantly increasing number of instructions thus
improving the performance."""
class Light_Operations:
name = "Light_Operations"
domain = "Slots"
area = "RET"
level = 2
htoff = False
sample = ['INST_RETIRED.PREC_DIST']
errcount = 0
sibling = None
metricgroup = frozenset(['Retire', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = self.Retiring.compute(EV) - self.Heavy_Operations.compute(EV)
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Light_Operations zero division")
return self.val
desc = """
This metric represents fraction of slots where the CPU was
retiring light-weight operations -- instructions that
require no more than one uop (micro-operation). This
correlates with total number of instructions used by the
program. A uops-per-instruction (see UopPI metric) ratio of
1 or less should be expected for decently optimized code
running on Intel Core/Xeon products. While this often
indicates efficient X86 instructions were executed; high
value does not necessarily mean better performance cannot be
achieved. . Focus on techniques that reduce instruction
count or result in more efficient instructions generation
such as vectorization."""
class FP_Arith:
name = "FP_Arith"
domain = "Uops"
area = "RET"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['HPC'])
maxval = None
def compute(self, EV):
try:
self.val = self.X87_Use.compute(EV) + self.FP_Scalar.compute(EV) + self.FP_Vector.compute(EV)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "FP_Arith zero division")
return self.val
desc = """
This metric represents overall arithmetic floating-point
(FP) operations fraction the CPU has executed (retired).
Note this metric's value may exceed its parent due to use of
\"Uops\" CountDomain and FMA double-counting."""
class X87_Use:
name = "X87_Use"
domain = "Uops"
area = "RET"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Compute'])
maxval = None
def compute(self, EV):
try:
self.val = EV("INST_RETIRED.X87", 4) * UopPI(self, EV, 4) / Retired_Slots(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "X87_Use zero division")
return self.val
desc = """
This metric serves as an approximation of legacy x87 usage.
It accounts for instructions beyond X87 FP arithmetic
operations; hence may be used as a thermometer to avoid X87
high usage and preferably upgrade to modern ISA. See Tip
under Tuning Hint.. Tip: consider compiler flags to generate
newer AVX (or SSE) instruction sets; which typically perform
better and feature vectors."""
class FP_Scalar:
name = "FP_Scalar"
domain = "Uops"
area = "RET"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Compute', 'Flops'])
maxval = None
def compute(self, EV):
try:
self.val = FP_Arith_Scalar(self, EV, 4) / Retired_Slots(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "FP_Scalar zero division")
return self.val
desc = """
This metric approximates arithmetic floating-point (FP)
scalar uops fraction the CPU has retired. May overcount due
to FMA double counting.. Investigate what limits (compiler)
generation of vector code."""
class FP_Vector:
name = "FP_Vector"
domain = "Uops"
area = "RET"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Compute', 'Flops'])
maxval = 1.0
def compute(self, EV):
try:
self.val = FP_Arith_Vector(self, EV, 4) / Retired_Slots(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "FP_Vector zero division")
return self.val
desc = """
This metric approximates arithmetic floating-point (FP)
vector uops fraction the CPU has retired aggregated across
all vector widths. May overcount due to FMA double
counting.. Check if vector width is expected"""
class FP_Vector_128b:
name = "FP_Vector_128b"
domain = "Uops"
area = "RET"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Compute', 'Flops'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (EV("FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", 5) + EV("FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE", 5)) / Retired_Slots(self, EV, 5)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "FP_Vector_128b zero division")
return self.val
desc = """
This metric approximates arithmetic FP vector uops fraction
the CPU has retired for 128-bit wide vectors. May overcount
due to FMA double counting.. Try to exploit wider vector
length"""
class FP_Vector_256b:
name = "FP_Vector_256b"
domain = "Uops"
area = "RET"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Compute', 'Flops'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (EV("FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE", 5) + EV("FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE", 5)) / Retired_Slots(self, EV, 5)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "FP_Vector_256b zero division")
return self.val
desc = """
This metric approximates arithmetic FP vector uops fraction
the CPU has retired for 256-bit wide vectors. May overcount
due to FMA double counting.. Try to exploit wider vector
length"""
class Heavy_Operations:
name = "Heavy_Operations"
domain = "Slots"
area = "RET"
level = 2
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Retire', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = self.Microcode_Sequencer.compute(EV)
self.thresh = (self.val > 0.1)
except ZeroDivisionError:
handle_error(self, "Heavy_Operations zero division")
return self.val
desc = """
This metric represents fraction of slots where the CPU was
retiring heavy-weight operations -- instructions that
require two or more uops or micro-coded sequences. This
highly-correlates with the uop length of these
instructions/sequences."""
class Microcode_Sequencer:
name = "Microcode_Sequencer"
domain = "Slots"
area = "RET"
level = 3
htoff = False
sample = ['IDQ.MS_UOPS']
errcount = 0
sibling = None
metricgroup = frozenset(['MicroSeq'])
maxval = None
def compute(self, EV):
try:
self.val = Retire_Fraction(self, EV, 3) * EV("IDQ.MS_UOPS", 3) / SLOTS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Microcode_Sequencer zero division")
return self.val
desc = """
This metric represents fraction of slots the CPU was
retiring uops fetched by the Microcode Sequencer (MS) unit.
The MS is used for CISC instructions not supported by the
default decoders (like repeat move strings; or CPUID); or by
microcode assists used to address some operation modes (like
in Floating Point assists). These cases can often be
avoided.."""
class Assists:
name = "Assists"
domain = "Slots_Estimated"
area = "RET"
level = 4
htoff = False
sample = ['OTHER_ASSISTS.ANY_WB_ASSIST']
errcount = 0
sibling = None
metricgroup = frozenset(['BvIO'])
maxval = 1.0
def compute(self, EV):
try:
self.val = Avg_Assist_Cost * EV("OTHER_ASSISTS.ANY_WB_ASSIST", 4) / SLOTS(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Assists zero division")
return self.val
desc = """
This metric estimates fraction of slots the CPU retired uops
delivered by the Microcode_Sequencer as a result of Assists.
Assists are long sequences of uops that are required in
certain corner-cases for operations that cannot be handled
natively by the execution pipeline. For example; when
working with very small floating point values (so-called
Denormals); the FP units are not set up to perform these
operations natively. Instead; a sequence of instructions to
perform the computation on the Denormals is injected into
the pipeline. Since these microcode sequences might be
dozens of uops long; Assists can be extremely deleterious to
performance and they can be avoided in many cases."""
class CISC:
name = "CISC"
domain = "Slots"
area = "RET"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset([])
maxval = None
def compute(self, EV):
try:
self.val = max(0 , self.Microcode_Sequencer.compute(EV) - self.Assists.compute(EV))
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "CISC zero division")
return self.val
desc = """
This metric estimates fraction of cycles the CPU retired
uops originated from CISC (complex instruction set computer)
instruction. A CISC instruction has multiple uops that are
required to perform the instruction's functionality as in
the case of read-modify-write as an example. Since these
instructions require multiple uops they may or may not imply
sub-optimal use of machine resources."""
class Metric_IPC:
name = "IPC"
domain = "Metric"
maxval = Pipeline_Width + 2
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['Ret', 'Summary'])
sibling = None
def compute(self, EV):
try:
self.val = IPC(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "IPC zero division")
desc = """
Instructions Per Cycle (per Logical Processor)"""
class Metric_UopPI:
name = "UopPI"
domain = "Metric"
maxval = 2.0
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['Pipeline', 'Ret', 'Retire'])
sibling = None
def compute(self, EV):
try:
self.val = UopPI(self, EV, 0)
self.thresh = (self.val > 1.05)
except ZeroDivisionError:
handle_error_metric(self, "UopPI zero division")
desc = """
Uops Per Instruction"""
class Metric_UpTB:
name = "UpTB"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['Branches', 'Fed', 'FetchBW'])
sibling = None
def compute(self, EV):
try:
self.val = UpTB(self, EV, 0)
self.thresh = self.val < Pipeline_Width * 1.5
except ZeroDivisionError:
handle_error_metric(self, "UpTB zero division")
desc = """
Uops per taken branch"""
class Metric_CPI:
name = "CPI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['Pipeline', 'Mem'])
sibling = None
def compute(self, EV):
try:
self.val = CPI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "CPI zero division")
desc = """
Cycles Per Instruction (per Logical Processor)"""
class Metric_CLKS:
name = "CLKS"
domain = "Count"
maxval = 0
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['Pipeline'])
sibling = None
def compute(self, EV):
try:
self.val = CLKS(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "CLKS zero division")
desc = """
Per-Logical Processor actual clocks when the Logical
Processor is active."""
class Metric_SLOTS:
name = "SLOTS"
domain = "Count"
maxval = 0
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['TmaL1'])
sibling = None
def compute(self, EV):
try:
self.val = SLOTS(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "SLOTS zero division")
desc = """
Total issue-pipeline slots (per-Physical Core till ICL; per-
Logical Processor ICL onward)"""
class Metric_Execute_per_Issue:
name = "Execute_per_Issue"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['Cor', 'Pipeline'])
sibling = None
def compute(self, EV):
try:
self.val = Execute_per_Issue(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Execute_per_Issue zero division")
desc = """
The ratio of Executed- by Issued-Uops. Ratio > 1 suggests
high rate of uop micro-fusions. Ratio < 1 suggest high rate
of \"execute\" at rename stage."""
class Metric_CoreIPC:
name = "CoreIPC"
domain = "Core_Metric"
maxval = Pipeline_Width + 2
errcount = 0
area = "Info.Core"
metricgroup = frozenset(['Ret', 'SMT', 'TmaL1'])
sibling = None
def compute(self, EV):
try:
self.val = CoreIPC(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "CoreIPC zero division")
desc = """
Instructions Per Cycle across hyper-threads (per physical
core)"""
class Metric_FLOPc:
name = "FLOPc"
domain = "Core_Metric"
maxval = 10.0
errcount = 0
area = "Info.Core"
metricgroup = frozenset(['Ret', 'Flops'])
sibling = None
def compute(self, EV):
try:
self.val = FLOPc(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "FLOPc zero division")
desc = """
Floating Point Operations Per Cycle"""
class Metric_FP_Arith_Utilization:
name = "FP_Arith_Utilization"
domain = "Core_Metric"
maxval = 2.0
errcount = 0
area = "Info.Core"
metricgroup = frozenset(['Cor', 'Flops', 'HPC'])
sibling = None
def compute(self, EV):
try:
self.val = FP_Arith_Utilization(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "FP_Arith_Utilization zero division")
desc = """
Actual per-core usage of the Floating Point non-X87
execution units (regardless of precision or vector-width).
Values > 1 are possible due to Fused-Multiply Add FMA
counting - common."""
class Metric_ILP:
name = "ILP"
domain = "Metric"
maxval = Exe_Ports
errcount = 0
area = "Info.Core"
metricgroup = frozenset(['Backend', 'Cor', 'Pipeline', 'PortsUtil'])
sibling = None
def compute(self, EV):
try:
self.val = ILP(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "ILP zero division")
desc = """
Instruction-Level-Parallelism (average number of uops
executed when there is execution) per thread (logical-
processor)"""
class Metric_CORE_CLKS:
name = "CORE_CLKS"
domain = "Count"
maxval = 0
errcount = 0
area = "Info.Core"
metricgroup = frozenset(['SMT'])
sibling = None
def compute(self, EV):
try:
self.val = CORE_CLKS(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "CORE_CLKS zero division")
desc = """
Core actual clocks when any Logical Processor is active on
the Physical Core"""
class Metric_IpLoad:
name = "IpLoad"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpLoad(self, EV, 0)
self.thresh = (self.val < 3)
except ZeroDivisionError:
handle_error_metric(self, "IpLoad zero division")
desc = """
Instructions per Load (lower number means higher occurrence
rate). Tip: reduce memory accesses."""
class Metric_IpStore:
name = "IpStore"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpStore(self, EV, 0)
self.thresh = (self.val < 8)
except ZeroDivisionError:
handle_error_metric(self, "IpStore zero division")
desc = """
Instructions per Store (lower number means higher occurrence
rate). Tip: reduce memory accesses."""
class Metric_IpBranch:
name = "IpBranch"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Branches', 'Fed', 'InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpBranch(self, EV, 0)
self.thresh = (self.val < 8)
except ZeroDivisionError:
handle_error_metric(self, "IpBranch zero division")
desc = """
Instructions per Branch (lower number means higher
occurrence rate)"""
class Metric_IpCall:
name = "IpCall"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Branches', 'Fed', 'PGO'])
sibling = None
def compute(self, EV):
try:
self.val = IpCall(self, EV, 0)
self.thresh = (self.val < 200)
except ZeroDivisionError:
handle_error_metric(self, "IpCall zero division")
desc = """
Instructions per (near) call (lower number means higher
occurrence rate)"""
class Metric_IpTB:
name = "IpTB"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Branches', 'Fed', 'FetchBW', 'Frontend', 'PGO'])
sibling = None
def compute(self, EV):
try:
self.val = IpTB(self, EV, 0)
self.thresh = self.val < Pipeline_Width * 2 + 1
except ZeroDivisionError:
handle_error_metric(self, "IpTB zero division")
desc = """
Instructions per taken branch"""
class Metric_BpTkBranch:
name = "BpTkBranch"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Branches', 'Fed', 'PGO'])
sibling = None
def compute(self, EV):
try:
self.val = BpTkBranch(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "BpTkBranch zero division")
desc = """
Branch instructions per taken branch. . Can be used to
approximate PGO-likelihood for non-loopy codes."""
class Metric_IpFLOP:
name = "IpFLOP"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Flops', 'InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpFLOP(self, EV, 0)
self.thresh = (self.val < 10)
except ZeroDivisionError:
handle_error_metric(self, "IpFLOP zero division")
desc = """
Instructions per Floating Point (FP) Operation (lower number
means higher occurrence rate). Reference: Tuning Performance
via Metrics with Expectations.
https://doi.org/10.1109/LCA.2019.2916408"""
class Metric_IpArith:
name = "IpArith"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Flops', 'InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpArith(self, EV, 0)
self.thresh = (self.val < 10)
except ZeroDivisionError:
handle_error_metric(self, "IpArith zero division")
desc = """
Instructions per FP Arithmetic instruction (lower number
means higher occurrence rate). Values < 1 are possible due
to intentional FMA double counting. Approximated prior to
BDW."""
class Metric_IpArith_Scalar_SP:
name = "IpArith_Scalar_SP"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Flops', 'FpScalar', 'InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpArith_Scalar_SP(self, EV, 0)
self.thresh = (self.val < 10)
except ZeroDivisionError:
handle_error_metric(self, "IpArith_Scalar_SP zero division")
desc = """
Instructions per FP Arithmetic Scalar Single-Precision
instruction (lower number means higher occurrence rate).
Values < 1 are possible due to intentional FMA double
counting."""
class Metric_IpArith_Scalar_DP:
name = "IpArith_Scalar_DP"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Flops', 'FpScalar', 'InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpArith_Scalar_DP(self, EV, 0)
self.thresh = (self.val < 10)
except ZeroDivisionError:
handle_error_metric(self, "IpArith_Scalar_DP zero division")
desc = """
Instructions per FP Arithmetic Scalar Double-Precision
instruction (lower number means higher occurrence rate).
Values < 1 are possible due to intentional FMA double
counting."""
class Metric_IpArith_AVX128:
name = "IpArith_AVX128"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Flops', 'FpVector', 'InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpArith_AVX128(self, EV, 0)
self.thresh = (self.val < 10)
except ZeroDivisionError:
handle_error_metric(self, "IpArith_AVX128 zero division")
desc = """
Instructions per FP Arithmetic AVX/SSE 128-bit instruction
(lower number means higher occurrence rate). Values < 1 are
possible due to intentional FMA double counting."""
class Metric_IpArith_AVX256:
name = "IpArith_AVX256"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Flops', 'FpVector', 'InsType'])
sibling = None
def compute(self, EV):
try:
self.val = IpArith_AVX256(self, EV, 0)
self.thresh = (self.val < 10)
except ZeroDivisionError:
handle_error_metric(self, "IpArith_AVX256 zero division")
desc = """
Instructions per FP Arithmetic AVX* 256-bit instruction
(lower number means higher occurrence rate). Values < 1 are
possible due to intentional FMA double counting."""
class Metric_Instructions:
name = "Instructions"
domain = "Count"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Summary', 'TmaL1'])
sibling = None
def compute(self, EV):
try:
self.val = Instructions(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Instructions zero division")
desc = """
Total number of retired Instructions"""
class Metric_Retire:
name = "Retire"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Pipeline"
metricgroup = frozenset(['Pipeline', 'Ret'])
sibling = None
def compute(self, EV):
try:
self.val = Retire(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Retire zero division")
desc = """
Average number of Uops retired in cycles where at least one
uop has retired."""
class Metric_Execute:
name = "Execute"
domain = "Metric"
maxval = Exe_Ports
errcount = 0
area = "Info.Pipeline"
metricgroup = frozenset(['Cor', 'Pipeline', 'PortsUtil', 'SMT'])
sibling = None
def compute(self, EV):
try:
self.val = Execute(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Execute zero division")
desc = """
"""
class Metric_DSB_Coverage:
name = "DSB_Coverage"
domain = "Metric"
maxval = 1.0
errcount = 0
area = "Info.Frontend"
metricgroup = frozenset(['DSB', 'Fed', 'FetchBW'])
sibling = None
def compute(self, EV):
try:
self.val = DSB_Coverage(self, EV, 0)
self.thresh = (self.val < 0.7) and HighIPC(self, EV, 1)
except ZeroDivisionError:
handle_error_metric(self, "DSB_Coverage zero division")
desc = """
Fraction of Uops delivered by the DSB (aka Decoded ICache;
or Uop Cache). See section 'Decoded ICache' in Optimization
Manual. http://www.intel.com/content/www/us/en/architecture-
and-technology/64-ia-32-architectures-optimization-
manual.html"""
class Metric_IpUnknown_Branch:
name = "IpUnknown_Branch"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Frontend"
metricgroup = frozenset(['Fed'])
sibling = None
def compute(self, EV):
try:
self.val = IpUnknown_Branch(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "IpUnknown_Branch zero division")
desc = """
Instructions per speculative Unknown Branch Misprediction
(BAClear) (lower number means higher occurrence rate)"""
class Metric_IpMispredict:
name = "IpMispredict"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Bad_Spec"
metricgroup = frozenset(['Bad', 'BadSpec', 'BrMispredicts'])
sibling = None
def compute(self, EV):
try:
self.val = IpMispredict(self, EV, 0)
self.thresh = (self.val < 200)
except ZeroDivisionError:
handle_error_metric(self, "IpMispredict zero division")
desc = """
Number of Instructions per non-speculative Branch
Misprediction (JEClear) (lower number means higher
occurrence rate)"""
class Metric_IpMisp_Indirect:
name = "IpMisp_Indirect"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.Bad_Spec"
metricgroup = frozenset(['Bad', 'BrMispredicts'])
sibling = None
def compute(self, EV):
try:
self.val = IpMisp_Indirect(self, EV, 0)
self.thresh = (self.val < 1000)
except ZeroDivisionError:
handle_error_metric(self, "IpMisp_Indirect zero division")
desc = """
Instructions per retired Mispredicts for indirect CALL or
JMP branches (lower number means higher occurrence rate)."""
class Metric_Load_Miss_Real_Latency:
name = "Load_Miss_Real_Latency"
domain = "Clocks_Latency"
maxval = 1000
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['Mem', 'MemoryBound', 'MemoryLat'])
sibling = None
def compute(self, EV):
try:
self.val = Load_Miss_Real_Latency(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Load_Miss_Real_Latency zero division")
desc = """
Actual Average Latency for L1 data-cache miss demand load
operations (in core cycles)"""
class Metric_MLP:
name = "MLP"
domain = "Metric"
maxval = 10.0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['Mem', 'MemoryBound', 'MemoryBW'])
sibling = None
def compute(self, EV):
try:
self.val = MLP(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "MLP zero division")
desc = """
Memory-Level-Parallelism (average number of L1 miss demand
load when there is at least one such miss. Per-Logical
Processor)"""
class Metric_L1MPKI:
name = "L1MPKI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['CacheHits', 'Mem'])
sibling = None
def compute(self, EV):
try:
self.val = L1MPKI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L1MPKI zero division")
desc = """
L1 cache true misses per kilo instruction for retired demand
loads"""
class Metric_L2MPKI:
name = "L2MPKI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['Mem', 'Backend', 'CacheHits'])
sibling = None
def compute(self, EV):
try:
self.val = L2MPKI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2MPKI zero division")
desc = """
L2 cache true misses per kilo instruction for retired demand
loads"""
class Metric_L2MPKI_All:
name = "L2MPKI_All"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['CacheHits', 'Mem', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = L2MPKI_All(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2MPKI_All zero division")
desc = """
L2 cache misses per kilo instruction for all request types
(including speculative)"""
class Metric_L2MPKI_Load:
name = "L2MPKI_Load"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['CacheHits', 'Mem'])
sibling = None
def compute(self, EV):
try:
self.val = L2MPKI_Load(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2MPKI_Load zero division")
desc = """
L2 cache misses per kilo instruction for all demand loads
(including speculative)"""
class Metric_L2MPKI_RFO:
name = "L2MPKI_RFO"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['CacheMisses', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = L2MPKI_RFO(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2MPKI_RFO zero division")
desc = """
Offcore requests (L2 cache miss) per kilo instruction for
demand RFOs"""
class Metric_L2HPKI_All:
name = "L2HPKI_All"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['CacheHits', 'Mem'])
sibling = None
def compute(self, EV):
try:
self.val = L2HPKI_All(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2HPKI_All zero division")
desc = """
L2 cache hits per kilo instruction for all request types
(including speculative)"""
class Metric_L2HPKI_Load:
name = "L2HPKI_Load"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['CacheHits', 'Mem'])
sibling = None
def compute(self, EV):
try:
self.val = L2HPKI_Load(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2HPKI_Load zero division")
desc = """
L2 cache hits per kilo instruction for all demand loads
(including speculative)"""
class Metric_L3MPKI:
name = "L3MPKI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['Mem'])
sibling = None
def compute(self, EV):
try:
self.val = L3MPKI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L3MPKI zero division")
desc = """
L3 cache true misses per kilo instruction for retired demand
loads"""
class Metric_L1D_Cache_Fill_BW:
name = "L1D_Cache_Fill_BW"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['Mem', 'MemoryBW'])
sibling = None
def compute(self, EV):
try:
self.val = L1D_Cache_Fill_BW(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L1D_Cache_Fill_BW zero division")
desc = """
"""
class Metric_L2_Cache_Fill_BW:
name = "L2_Cache_Fill_BW"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['Mem', 'MemoryBW'])
sibling = None
def compute(self, EV):
try:
self.val = L2_Cache_Fill_BW(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2_Cache_Fill_BW zero division")
desc = """
"""
class Metric_L3_Cache_Fill_BW:
name = "L3_Cache_Fill_BW"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Memory"
metricgroup = frozenset(['Mem', 'MemoryBW'])
sibling = None
def compute(self, EV):
try:
self.val = L3_Cache_Fill_BW(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L3_Cache_Fill_BW zero division")
desc = """
"""
class Metric_Page_Walks_Utilization:
name = "Page_Walks_Utilization"
domain = "Core_Metric"
maxval = 1.0
errcount = 0
area = "Info.Memory.TLB"
metricgroup = frozenset(['Mem', 'MemoryTLB'])
sibling = None
def compute(self, EV):
try:
self.val = Page_Walks_Utilization(self, EV, 0)
self.thresh = (self.val > 0.5)
except ZeroDivisionError:
handle_error_metric(self, "Page_Walks_Utilization zero division")
desc = """
Utilization of the core's Page Walker(s) serving STLB misses
triggered by instruction/Load/Store accesses"""
class Metric_L1D_Cache_Fill_BW_2T:
name = "L1D_Cache_Fill_BW_2T"
domain = "Core_Metric"
maxval = 0
errcount = 0
area = "Info.Memory.Core"
metricgroup = frozenset(['Mem', 'MemoryBW'])
sibling = None
def compute(self, EV):
try:
self.val = L1D_Cache_Fill_BW_2T(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L1D_Cache_Fill_BW_2T zero division")
desc = """
Average per-core data fill bandwidth to the L1 data cache
[GB / sec]"""
class Metric_L2_Cache_Fill_BW_2T:
name = "L2_Cache_Fill_BW_2T"
domain = "Core_Metric"
maxval = 0
errcount = 0
area = "Info.Memory.Core"
metricgroup = frozenset(['Mem', 'MemoryBW'])
sibling = None
def compute(self, EV):
try:
self.val = L2_Cache_Fill_BW_2T(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L2_Cache_Fill_BW_2T zero division")
desc = """
Average per-core data fill bandwidth to the L2 cache [GB /
sec]"""
class Metric_L3_Cache_Fill_BW_2T:
name = "L3_Cache_Fill_BW_2T"
domain = "Core_Metric"
maxval = 0
errcount = 0
area = "Info.Memory.Core"
metricgroup = frozenset(['Mem', 'MemoryBW'])
sibling = None
def compute(self, EV):
try:
self.val = L3_Cache_Fill_BW_2T(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "L3_Cache_Fill_BW_2T zero division")
desc = """
Average per-core data fill bandwidth to the L3 cache [GB /
sec]"""
class Metric_Load_L2_Miss_Latency:
name = "Load_L2_Miss_Latency"
domain = "Clocks_Latency"
maxval = 1000
errcount = 0
area = "Info.Memory.Latency"
metricgroup = frozenset(['Memory_Lat', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = Load_L2_Miss_Latency(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Load_L2_Miss_Latency zero division")
desc = """
Average Latency for L2 cache miss demand Loads"""
class Metric_Load_L2_MLP:
name = "Load_L2_MLP"
domain = "Metric"
maxval = 100
errcount = 0
area = "Info.Memory.Latency"
metricgroup = frozenset(['Memory_BW', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = Load_L2_MLP(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Load_L2_MLP zero division")
desc = """
Average Parallel L2 cache miss demand Loads"""
class Metric_Data_L2_MLP:
name = "Data_L2_MLP"
domain = "Metric"
maxval = 100
errcount = 0
area = "Info.Memory.Latency"
metricgroup = frozenset(['Memory_BW', 'Offcore'])
sibling = None
def compute(self, EV):
try:
self.val = Data_L2_MLP(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Data_L2_MLP zero division")
desc = """
Average Parallel L2 cache miss data reads"""
class Metric_CPU_Utilization:
name = "CPU_Utilization"
domain = "Metric"
maxval = 1
errcount = 0
area = "Info.System"
metricgroup = frozenset(['HPC', 'Summary'])
sibling = None
def compute(self, EV):
try:
self.val = CPU_Utilization(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "CPU_Utilization zero division")
desc = """
Average CPU Utilization (percentage)"""
class Metric_CPUs_Utilized:
name = "CPUs_Utilized"
domain = "Metric"
maxval = 300
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Summary'])
sibling = None
def compute(self, EV):
try:
self.val = CPUs_Utilized(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "CPUs_Utilized zero division")
desc = """
Average number of utilized CPUs"""
class Metric_Core_Frequency:
name = "Core_Frequency"
domain = "SystemMetric"
maxval = 0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Summary', 'Power'])
sibling = None
def compute(self, EV):
try:
self.val = Core_Frequency(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Core_Frequency zero division")
desc = """
Measured Average Core Frequency for unhalted processors
[GHz]"""
class Metric_GFLOPs:
name = "GFLOPs"
domain = "Metric"
maxval = 200
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Cor', 'Flops', 'HPC'])
sibling = None
def compute(self, EV):
try:
self.val = GFLOPs(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "GFLOPs zero division")
desc = """
Giga Floating Point Operations Per Second. Aggregate across
all supported options of: FP precisions, scalar and vector
instructions, vector-width"""
class Metric_Turbo_Utilization:
name = "Turbo_Utilization"
domain = "Core_Metric"
maxval = 10.0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Power'])
sibling = None
def compute(self, EV):
try:
self.val = Turbo_Utilization(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Turbo_Utilization zero division")
desc = """
Average Frequency Utilization relative nominal frequency"""
class Metric_SMT_2T_Utilization:
name = "SMT_2T_Utilization"
domain = "Core_Metric"
maxval = 1.0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['SMT'])
sibling = None
def compute(self, EV):
try:
self.val = SMT_2T_Utilization(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "SMT_2T_Utilization zero division")
desc = """
Fraction of cycles where both hardware Logical Processors
were active"""
class Metric_Kernel_Utilization:
name = "Kernel_Utilization"
domain = "Metric"
maxval = 1.0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['OS'])
sibling = None
def compute(self, EV):
try:
self.val = Kernel_Utilization(self, EV, 0)
self.thresh = (self.val > 0.05)
except ZeroDivisionError:
handle_error_metric(self, "Kernel_Utilization zero division")
desc = """
Fraction of cycles spent in the Operating System (OS) Kernel
mode"""
class Metric_Kernel_CPI:
name = "Kernel_CPI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['OS'])
sibling = None
def compute(self, EV):
try:
self.val = Kernel_CPI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Kernel_CPI zero division")
desc = """
Cycles Per Instruction for the Operating System (OS) Kernel
mode"""
class Metric_DRAM_BW_Use:
name = "DRAM_BW_Use"
domain = "GB/sec"
maxval = 200
errcount = 0
area = "Info.System"
metricgroup = frozenset(['HPC', 'MemOffcore', 'MemoryBW', 'SoC'])
sibling = None
def compute(self, EV):
try:
self.val = DRAM_BW_Use(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "DRAM_BW_Use zero division")
desc = """
Average external Memory Bandwidth Use for reads and writes
[GB / sec]"""
class Metric_Power:
name = "Power"
domain = "SystemMetric"
maxval = 200
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Power', 'SoC'])
sibling = None
def compute(self, EV):
try:
self.val = Power(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Power zero division")
desc = """
Total package Power in Watts"""
class Metric_Time:
name = "Time"
domain = "Seconds"
maxval = 0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Summary'])
sibling = None
def compute(self, EV):
try:
self.val = Time(self, EV, 0)
self.thresh = (self.val < 1)
except ZeroDivisionError:
handle_error_metric(self, "Time zero division")
desc = """
Run duration time in seconds"""
class Metric_Socket_CLKS:
name = "Socket_CLKS"
domain = "Count"
maxval = 0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['SoC'])
sibling = None
def compute(self, EV):
try:
self.val = Socket_CLKS(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Socket_CLKS zero division")
desc = """
Socket actual clocks when any core is active on that socket"""
class Metric_IpFarBranch:
name = "IpFarBranch"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Branches', 'OS'])
sibling = None
def compute(self, EV):
try:
self.val = IpFarBranch(self, EV, 0)
self.thresh = (self.val < 1000000)
except ZeroDivisionError:
handle_error_metric(self, "IpFarBranch zero division")
desc = """
Instructions per Far Branch ( Far Branches apply upon
transition from application to operating system, handling
interrupts, exceptions) [lower number means higher
occurrence rate]"""
# Schedule
class Setup:
def __init__(self, r):
o = dict()
n = Frontend_Bound() ; r.run(n) ; o["Frontend_Bound"] = n
n = Fetch_Latency() ; r.run(n) ; o["Fetch_Latency"] = n
n = ICache_Misses() ; r.run(n) ; o["ICache_Misses"] = n
n = ITLB_Misses() ; r.run(n) ; o["ITLB_Misses"] = n
n = Branch_Resteers() ; r.run(n) ; o["Branch_Resteers"] = n
n = Mispredicts_Resteers() ; r.run(n) ; o["Mispredicts_Resteers"] = n
n = Clears_Resteers() ; r.run(n) ; o["Clears_Resteers"] = n
n = Unknown_Branches() ; r.run(n) ; o["Unknown_Branches"] = n
n = MS_Switches() ; r.run(n) ; o["MS_Switches"] = n
n = LCP() ; r.run(n) ; o["LCP"] = n
n = DSB_Switches() ; r.run(n) ; o["DSB_Switches"] = n
n = Fetch_Bandwidth() ; r.run(n) ; o["Fetch_Bandwidth"] = n
n = MITE() ; r.run(n) ; o["MITE"] = n
n = DSB() ; r.run(n) ; o["DSB"] = n
n = Bad_Speculation() ; r.run(n) ; o["Bad_Speculation"] = n
n = Branch_Mispredicts() ; r.run(n) ; o["Branch_Mispredicts"] = n
n = Machine_Clears() ; r.run(n) ; o["Machine_Clears"] = n
n = Backend_Bound() ; r.run(n) ; o["Backend_Bound"] = n
n = Memory_Bound() ; r.run(n) ; o["Memory_Bound"] = n
n = L1_Bound() ; r.run(n) ; o["L1_Bound"] = n
n = DTLB_Load() ; r.run(n) ; o["DTLB_Load"] = n
n = Store_Fwd_Blk() ; r.run(n) ; o["Store_Fwd_Blk"] = n
n = Lock_Latency() ; r.run(n) ; o["Lock_Latency"] = n
n = Split_Loads() ; r.run(n) ; o["Split_Loads"] = n
n = G4K_Aliasing() ; r.run(n) ; o["G4K_Aliasing"] = n
n = FB_Full() ; r.run(n) ; o["FB_Full"] = n
n = L2_Bound() ; r.run(n) ; o["L2_Bound"] = n
n = L3_Bound() ; r.run(n) ; o["L3_Bound"] = n
n = Contested_Accesses() ; r.run(n) ; o["Contested_Accesses"] = n
n = Data_Sharing() ; r.run(n) ; o["Data_Sharing"] = n
n = L3_Hit_Latency() ; r.run(n) ; o["L3_Hit_Latency"] = n
n = SQ_Full() ; r.run(n) ; o["SQ_Full"] = n
n = DRAM_Bound() ; r.run(n) ; o["DRAM_Bound"] = n
n = MEM_Bandwidth() ; r.run(n) ; o["MEM_Bandwidth"] = n
n = MEM_Latency() ; r.run(n) ; o["MEM_Latency"] = n
n = Store_Bound() ; r.run(n) ; o["Store_Bound"] = n
n = Store_Latency() ; r.run(n) ; o["Store_Latency"] = n
n = False_Sharing() ; r.run(n) ; o["False_Sharing"] = n
n = Split_Stores() ; r.run(n) ; o["Split_Stores"] = n
n = DTLB_Store() ; r.run(n) ; o["DTLB_Store"] = n
n = Core_Bound() ; r.run(n) ; o["Core_Bound"] = n
n = Divider() ; r.run(n) ; o["Divider"] = n
n = Ports_Utilization() ; r.run(n) ; o["Ports_Utilization"] = n
n = Ports_Utilized_0() ; r.run(n) ; o["Ports_Utilized_0"] = n
n = Ports_Utilized_1() ; r.run(n) ; o["Ports_Utilized_1"] = n
n = Ports_Utilized_2() ; r.run(n) ; o["Ports_Utilized_2"] = n
n = Ports_Utilized_3m() ; r.run(n) ; o["Ports_Utilized_3m"] = n
n = ALU_Op_Utilization() ; r.run(n) ; o["ALU_Op_Utilization"] = n
n = Port_0() ; r.run(n) ; o["Port_0"] = n
n = Port_1() ; r.run(n) ; o["Port_1"] = n
n = Port_5() ; r.run(n) ; o["Port_5"] = n
n = Port_6() ; r.run(n) ; o["Port_6"] = n
n = Load_Op_Utilization() ; r.run(n) ; o["Load_Op_Utilization"] = n
n = Port_2() ; r.run(n) ; o["Port_2"] = n
n = Port_3() ; r.run(n) ; o["Port_3"] = n
n = Store_Op_Utilization() ; r.run(n) ; o["Store_Op_Utilization"] = n
n = Port_4() ; r.run(n) ; o["Port_4"] = n
n = Port_7() ; r.run(n) ; o["Port_7"] = n
n = Retiring() ; r.run(n) ; o["Retiring"] = n
n = Light_Operations() ; r.run(n) ; o["Light_Operations"] = n
n = FP_Arith() ; r.run(n) ; o["FP_Arith"] = n
n = X87_Use() ; r.run(n) ; o["X87_Use"] = n
n = FP_Scalar() ; r.run(n) ; o["FP_Scalar"] = n
n = FP_Vector() ; r.run(n) ; o["FP_Vector"] = n
n = FP_Vector_128b() ; r.run(n) ; o["FP_Vector_128b"] = n
n = FP_Vector_256b() ; r.run(n) ; o["FP_Vector_256b"] = n
n = Heavy_Operations() ; r.run(n) ; o["Heavy_Operations"] = n
n = Microcode_Sequencer() ; r.run(n) ; o["Microcode_Sequencer"] = n
n = Assists() ; r.run(n) ; o["Assists"] = n
n = CISC() ; r.run(n) ; o["CISC"] = n
# parents
o["Fetch_Latency"].parent = o["Frontend_Bound"]
o["ICache_Misses"].parent = o["Fetch_Latency"]
o["ITLB_Misses"].parent = o["Fetch_Latency"]
o["Branch_Resteers"].parent = o["Fetch_Latency"]
o["Mispredicts_Resteers"].parent = o["Branch_Resteers"]
o["Clears_Resteers"].parent = o["Branch_Resteers"]
o["Unknown_Branches"].parent = o["Branch_Resteers"]
o["MS_Switches"].parent = o["Fetch_Latency"]
o["LCP"].parent = o["Fetch_Latency"]
o["DSB_Switches"].parent = o["Fetch_Latency"]
o["Fetch_Bandwidth"].parent = o["Frontend_Bound"]
o["MITE"].parent = o["Fetch_Bandwidth"]
o["DSB"].parent = o["Fetch_Bandwidth"]
o["Branch_Mispredicts"].parent = o["Bad_Speculation"]
o["Machine_Clears"].parent = o["Bad_Speculation"]
o["Memory_Bound"].parent = o["Backend_Bound"]
o["L1_Bound"].parent = o["Memory_Bound"]
o["DTLB_Load"].parent = o["L1_Bound"]
o["Store_Fwd_Blk"].parent = o["L1_Bound"]
o["Lock_Latency"].parent = o["L1_Bound"]
o["Split_Loads"].parent = o["L1_Bound"]
o["G4K_Aliasing"].parent = o["L1_Bound"]
o["FB_Full"].parent = o["L1_Bound"]
o["L2_Bound"].parent = o["Memory_Bound"]
o["L3_Bound"].parent = o["Memory_Bound"]
o["Contested_Accesses"].parent = o["L3_Bound"]
o["Data_Sharing"].parent = o["L3_Bound"]
o["L3_Hit_Latency"].parent = o["L3_Bound"]
o["SQ_Full"].parent = o["L3_Bound"]
o["DRAM_Bound"].parent = o["Memory_Bound"]
o["MEM_Bandwidth"].parent = o["DRAM_Bound"]
o["MEM_Latency"].parent = o["DRAM_Bound"]
o["Store_Bound"].parent = o["Memory_Bound"]
o["Store_Latency"].parent = o["Store_Bound"]
o["False_Sharing"].parent = o["Store_Bound"]
o["Split_Stores"].parent = o["Store_Bound"]
o["DTLB_Store"].parent = o["Store_Bound"]
o["Core_Bound"].parent = o["Backend_Bound"]
o["Divider"].parent = o["Core_Bound"]
o["Ports_Utilization"].parent = o["Core_Bound"]
o["Ports_Utilized_0"].parent = o["Ports_Utilization"]
o["Ports_Utilized_1"].parent = o["Ports_Utilization"]
o["Ports_Utilized_2"].parent = o["Ports_Utilization"]
o["Ports_Utilized_3m"].parent = o["Ports_Utilization"]
o["ALU_Op_Utilization"].parent = o["Ports_Utilized_3m"]
o["Port_0"].parent = o["ALU_Op_Utilization"]
o["Port_1"].parent = o["ALU_Op_Utilization"]
o["Port_5"].parent = o["ALU_Op_Utilization"]
o["Port_6"].parent = o["ALU_Op_Utilization"]
o["Load_Op_Utilization"].parent = o["Ports_Utilized_3m"]
o["Port_2"].parent = o["Load_Op_Utilization"]
o["Port_3"].parent = o["Load_Op_Utilization"]
o["Store_Op_Utilization"].parent = o["Ports_Utilized_3m"]
o["Port_4"].parent = o["Store_Op_Utilization"]
o["Port_7"].parent = o["Store_Op_Utilization"]
o["Light_Operations"].parent = o["Retiring"]
o["FP_Arith"].parent = o["Light_Operations"]
o["X87_Use"].parent = o["FP_Arith"]
o["FP_Scalar"].parent = o["FP_Arith"]
o["FP_Vector"].parent = o["FP_Arith"]
o["FP_Vector_128b"].parent = o["FP_Vector"]
o["FP_Vector_256b"].parent = o["FP_Vector"]
o["Heavy_Operations"].parent = o["Retiring"]
o["Microcode_Sequencer"].parent = o["Heavy_Operations"]
o["Assists"].parent = o["Microcode_Sequencer"]
o["CISC"].parent = o["Microcode_Sequencer"]
# user visible metrics
n = Metric_IPC() ; r.metric(n) ; o["IPC"] = n
n = Metric_UopPI() ; r.metric(n) ; o["UopPI"] = n
n = Metric_UpTB() ; r.metric(n) ; o["UpTB"] = n
n = Metric_CPI() ; r.metric(n) ; o["CPI"] = n
n = Metric_CLKS() ; r.metric(n) ; o["CLKS"] = n
n = Metric_SLOTS() ; r.metric(n) ; o["SLOTS"] = n
n = Metric_Execute_per_Issue() ; r.metric(n) ; o["Execute_per_Issue"] = n
n = Metric_CoreIPC() ; r.metric(n) ; o["CoreIPC"] = n
n = Metric_FLOPc() ; r.metric(n) ; o["FLOPc"] = n
n = Metric_FP_Arith_Utilization() ; r.metric(n) ; o["FP_Arith_Utilization"] = n
n = Metric_ILP() ; r.metric(n) ; o["ILP"] = n
n = Metric_CORE_CLKS() ; r.metric(n) ; o["CORE_CLKS"] = n
n = Metric_IpLoad() ; r.metric(n) ; o["IpLoad"] = n
n = Metric_IpStore() ; r.metric(n) ; o["IpStore"] = n
n = Metric_IpBranch() ; r.metric(n) ; o["IpBranch"] = n
n = Metric_IpCall() ; r.metric(n) ; o["IpCall"] = n
n = Metric_IpTB() ; r.metric(n) ; o["IpTB"] = n
n = Metric_BpTkBranch() ; r.metric(n) ; o["BpTkBranch"] = n
n = Metric_IpFLOP() ; r.metric(n) ; o["IpFLOP"] = n
n = Metric_IpArith() ; r.metric(n) ; o["IpArith"] = n
n = Metric_IpArith_Scalar_SP() ; r.metric(n) ; o["IpArith_Scalar_SP"] = n
n = Metric_IpArith_Scalar_DP() ; r.metric(n) ; o["IpArith_Scalar_DP"] = n
n = Metric_IpArith_AVX128() ; r.metric(n) ; o["IpArith_AVX128"] = n
n = Metric_IpArith_AVX256() ; r.metric(n) ; o["IpArith_AVX256"] = n
n = Metric_Instructions() ; r.metric(n) ; o["Instructions"] = n
n = Metric_Retire() ; r.metric(n) ; o["Retire"] = n
n = Metric_Execute() ; r.metric(n) ; o["Execute"] = n
n = Metric_DSB_Coverage() ; r.metric(n) ; o["DSB_Coverage"] = n
n = Metric_IpUnknown_Branch() ; r.metric(n) ; o["IpUnknown_Branch"] = n
n = Metric_IpMispredict() ; r.metric(n) ; o["IpMispredict"] = n
n = Metric_IpMisp_Indirect() ; r.metric(n) ; o["IpMisp_Indirect"] = n
n = Metric_Load_Miss_Real_Latency() ; r.metric(n) ; o["Load_Miss_Real_Latency"] = n
n = Metric_MLP() ; r.metric(n) ; o["MLP"] = n
n = Metric_L1MPKI() ; r.metric(n) ; o["L1MPKI"] = n
n = Metric_L2MPKI() ; r.metric(n) ; o["L2MPKI"] = n
n = Metric_L2MPKI_All() ; r.metric(n) ; o["L2MPKI_All"] = n
n = Metric_L2MPKI_Load() ; r.metric(n) ; o["L2MPKI_Load"] = n
n = Metric_L2MPKI_RFO() ; r.metric(n) ; o["L2MPKI_RFO"] = n
n = Metric_L2HPKI_All() ; r.metric(n) ; o["L2HPKI_All"] = n
n = Metric_L2HPKI_Load() ; r.metric(n) ; o["L2HPKI_Load"] = n
n = Metric_L3MPKI() ; r.metric(n) ; o["L3MPKI"] = n
n = Metric_L1D_Cache_Fill_BW() ; r.metric(n) ; o["L1D_Cache_Fill_BW"] = n
n = Metric_L2_Cache_Fill_BW() ; r.metric(n) ; o["L2_Cache_Fill_BW"] = n
n = Metric_L3_Cache_Fill_BW() ; r.metric(n) ; o["L3_Cache_Fill_BW"] = n
n = Metric_Page_Walks_Utilization() ; r.metric(n) ; o["Page_Walks_Utilization"] = n
n = Metric_L1D_Cache_Fill_BW_2T() ; r.metric(n) ; o["L1D_Cache_Fill_BW_2T"] = n
n = Metric_L2_Cache_Fill_BW_2T() ; r.metric(n) ; o["L2_Cache_Fill_BW_2T"] = n
n = Metric_L3_Cache_Fill_BW_2T() ; r.metric(n) ; o["L3_Cache_Fill_BW_2T"] = n
n = Metric_Load_L2_Miss_Latency() ; r.metric(n) ; o["Load_L2_Miss_Latency"] = n
n = Metric_Load_L2_MLP() ; r.metric(n) ; o["Load_L2_MLP"] = n
n = Metric_Data_L2_MLP() ; r.metric(n) ; o["Data_L2_MLP"] = n
n = Metric_CPU_Utilization() ; r.metric(n) ; o["CPU_Utilization"] = n
n = Metric_CPUs_Utilized() ; r.metric(n) ; o["CPUs_Utilized"] = n
n = Metric_Core_Frequency() ; r.metric(n) ; o["Core_Frequency"] = n
n = Metric_GFLOPs() ; r.metric(n) ; o["GFLOPs"] = n
n = Metric_Turbo_Utilization() ; r.metric(n) ; o["Turbo_Utilization"] = n
n = Metric_SMT_2T_Utilization() ; r.metric(n) ; o["SMT_2T_Utilization"] = n
n = Metric_Kernel_Utilization() ; r.metric(n) ; o["Kernel_Utilization"] = n
n = Metric_Kernel_CPI() ; r.metric(n) ; o["Kernel_CPI"] = n
n = Metric_DRAM_BW_Use() ; r.metric(n) ; o["DRAM_BW_Use"] = n
n = Metric_Power() ; r.metric(n) ; o["Power"] = n
n = Metric_Time() ; r.metric(n) ; o["Time"] = n
n = Metric_Socket_CLKS() ; r.metric(n) ; o["Socket_CLKS"] = n
n = Metric_IpFarBranch() ; r.metric(n) ; o["IpFarBranch"] = n
# references between groups
o["Mispredicts_Resteers"].Branch_Resteers = o["Branch_Resteers"]
o["Clears_Resteers"].Branch_Resteers = o["Branch_Resteers"]
o["Unknown_Branches"].Clears_Resteers = o["Clears_Resteers"]
o["Unknown_Branches"].Branch_Resteers = o["Branch_Resteers"]
o["Unknown_Branches"].Mispredicts_Resteers = o["Mispredicts_Resteers"]
o["Fetch_Bandwidth"].Frontend_Bound = o["Frontend_Bound"]
o["Fetch_Bandwidth"].Fetch_Latency = o["Fetch_Latency"]
o["Branch_Mispredicts"].Bad_Speculation = o["Bad_Speculation"]
o["Machine_Clears"].Bad_Speculation = o["Bad_Speculation"]
o["Machine_Clears"].Branch_Mispredicts = o["Branch_Mispredicts"]
o["Backend_Bound"].Retiring = o["Retiring"]
o["Backend_Bound"].Bad_Speculation = o["Bad_Speculation"]
o["Backend_Bound"].Frontend_Bound = o["Frontend_Bound"]
o["Memory_Bound"].Retiring = o["Retiring"]
o["Memory_Bound"].Bad_Speculation = o["Bad_Speculation"]
o["Memory_Bound"].Frontend_Bound = o["Frontend_Bound"]
o["Memory_Bound"].Backend_Bound = o["Backend_Bound"]
o["Memory_Bound"].Fetch_Latency = o["Fetch_Latency"]
o["MEM_Latency"].MEM_Bandwidth = o["MEM_Bandwidth"]
o["Core_Bound"].Retiring = o["Retiring"]
o["Core_Bound"].Frontend_Bound = o["Frontend_Bound"]
o["Core_Bound"].Memory_Bound = o["Memory_Bound"]
o["Core_Bound"].Backend_Bound = o["Backend_Bound"]
o["Core_Bound"].Bad_Speculation = o["Bad_Speculation"]
o["Core_Bound"].Fetch_Latency = o["Fetch_Latency"]
o["Ports_Utilization"].Fetch_Latency = o["Fetch_Latency"]
o["Ports_Utilized_0"].Fetch_Latency = o["Fetch_Latency"]
o["Retiring"].Heavy_Operations = o["Heavy_Operations"]
o["Light_Operations"].Retiring = o["Retiring"]
o["Light_Operations"].Heavy_Operations = o["Heavy_Operations"]
o["Light_Operations"].Microcode_Sequencer = o["Microcode_Sequencer"]
o["FP_Arith"].FP_Scalar = o["FP_Scalar"]
o["FP_Arith"].X87_Use = o["X87_Use"]
o["FP_Arith"].FP_Vector = o["FP_Vector"]
o["Heavy_Operations"].Microcode_Sequencer = o["Microcode_Sequencer"]
o["CISC"].Microcode_Sequencer = o["Microcode_Sequencer"]
o["CISC"].Assists = o["Assists"]
# siblings cross-tree
o["Mispredicts_Resteers"].sibling = (o["Branch_Mispredicts"],)
o["Clears_Resteers"].sibling = (o["MS_Switches"], o["Machine_Clears"], o["L1_Bound"], o["Microcode_Sequencer"],)
o["MS_Switches"].sibling = (o["Clears_Resteers"], o["Machine_Clears"], o["L1_Bound"], o["Microcode_Sequencer"],)
o["LCP"].sibling = (o["DSB_Switches"], o["Fetch_Bandwidth"],)
o["DSB_Switches"].sibling = (o["LCP"], o["Fetch_Bandwidth"],)
o["Fetch_Bandwidth"].sibling = (o["LCP"], o["DSB_Switches"],)
o["Branch_Mispredicts"].sibling = (o["Mispredicts_Resteers"],)
o["Machine_Clears"].sibling = (o["Clears_Resteers"], o["MS_Switches"], o["L1_Bound"], o["Contested_Accesses"], o["Data_Sharing"], o["False_Sharing"], o["Microcode_Sequencer"],)
o["L1_Bound"].sibling = (o["Clears_Resteers"], o["MS_Switches"], o["Machine_Clears"], o["Ports_Utilized_1"], o["Microcode_Sequencer"],)
o["DTLB_Load"].sibling = (o["DTLB_Store"],)
o["Lock_Latency"].sibling = (o["Store_Latency"],)
o["FB_Full"].sibling = (o["SQ_Full"], o["MEM_Bandwidth"], o["Store_Latency"],)
o["Contested_Accesses"].sibling = (o["Machine_Clears"], o["Data_Sharing"], o["False_Sharing"],)
o["Data_Sharing"].sibling = (o["Machine_Clears"], o["Contested_Accesses"], o["False_Sharing"],)
o["L3_Hit_Latency"].sibling = (o["MEM_Latency"],)
o["L3_Hit_Latency"].overlap = True
o["SQ_Full"].sibling = (o["FB_Full"], o["MEM_Bandwidth"],)
o["MEM_Bandwidth"].sibling = (o["FB_Full"], o["SQ_Full"],)
o["MEM_Latency"].sibling = (o["L3_Hit_Latency"],)
o["Store_Latency"].sibling = (o["Lock_Latency"], o["FB_Full"],)
o["Store_Latency"].overlap = True
o["False_Sharing"].sibling = (o["Machine_Clears"], o["Contested_Accesses"], o["Data_Sharing"],)
o["Split_Stores"].sibling = (o["Port_4"],)
o["DTLB_Store"].sibling = (o["DTLB_Load"],)
o["Ports_Utilized_1"].sibling = (o["L1_Bound"],)
o["Ports_Utilized_2"].sibling = (o["Port_0"], o["Port_1"], o["Port_5"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"],)
o["Port_0"].sibling = (o["Ports_Utilized_2"], o["Port_1"], o["Port_5"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"],)
o["Port_1"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_5"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"],)
o["Port_5"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"],)
o["Port_6"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"],)
o["Port_4"].sibling = (o["Split_Stores"],)
o["FP_Scalar"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"], o["Port_6"], o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"],)
o["FP_Vector"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"], o["Port_6"], o["FP_Scalar"], o["FP_Vector_128b"], o["FP_Vector_256b"],)
o["FP_Vector_128b"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_256b"],)
o["FP_Vector_256b"].sibling = (o["Ports_Utilized_2"], o["Port_0"], o["Port_1"], o["Port_5"], o["Port_6"], o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"],)
o["Microcode_Sequencer"].sibling = (o["Clears_Resteers"], o["MS_Switches"], o["Machine_Clears"], o["L1_Bound"],)
o["IpTB"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],)
o["DSB_Coverage"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],)
o["DRAM_BW_Use"].sibling = (o["FB_Full"], o["SQ_Full"], o["MEM_Bandwidth"],)
| 142,042 | Python | .py | 3,523 | 34.632132 | 323 | 0.656478 | andikleen/pmu-tools | 1,984 | 331 | 178 | GPL-2.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,894 | tl-barplot.py | andikleen_pmu-tools/tl-barplot.py | #!/usr/bin/env python3
# plot toplev -I... -x, -o ...csv output as bar plot
#
from __future__ import print_function
import os
import re
import argparse
from math import isnan, trunc
from collections import defaultdict
import matplotlib
if os.getenv('DISPLAY') is None:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import gen_level
import tldata
def parse_args():
p = argparse.ArgumentParser(usage='plot toplev -I... -x, output as bar plot')
p.add_argument('file', help='CSV file to plot')
p.add_argument('--output', '-o', help='Save figure to file (.pdf/.png/etc). Otherwise show.',
nargs='?')
p.add_argument('--verbose', '-v', help='Plot all data values even if below threshold', action='store_true')
p.add_argument('--xkcd', help='Enable XKCD mode (with new matplotlib). Please install Humor Sans.', action='store_true')
p.add_argument('--title', help='Set title of plot', nargs='?')
p.add_argument('--quiet', help='Be quiet', action='store_true')
p.add_argument('--cpu', help='CPU to plot (by default first)') # XXX
return p.parse_args()
args = parse_args()
try:
import brewer2mpl
except ImportError:
if not args.quiet:
print("pip install brewer2mpl for better colors")
if args.xkcd:
plt.xkcd()
data = tldata.TLData(args.file, args.verbose)
data.update()
levels = data.levels
timestamps = data.times
ratios = defaultdict(list)
# XXX plot multiple cpus instead
cpu = None
if args.cpu:
cpu = args.cpu
elif 'CLKS' in data.headers and len(data.vals) > 0:
# pick CPU with highest utilization. XXX look at all time series
util = sorted([(data.vals[0][x], x[1]) for x in data.vals[0].keys() if x[0] == 'CLKS'],
reverse=True)
cpu = util[0][1]
elif len(data.cpus) > 0:
cpu = sorted(sorted(data.cpus), key=len, reverse=True)[0]
def cpumatch(x, cpu, base):
return x.startswith(cpu) or x == base
if cpu:
base = None
m = re.match(r'C\d+', cpu)
if m:
base = m.group(0)
aliases = [x for x in data.cpus if cpumatch(x, cpu, base)]
print("plotting cpus:", " ".join(sorted(aliases)))
else:
aliases = []
if len(aliases) == 0:
aliases = [None]
for h in data.headers:
def findval(d):
for c in aliases:
if (h, c) in d:
return d[(h, c)]
return float('nan')
ratios[h] = list(map(findval, data.vals))
def valid_row(r):
s = sum(r)
#if sum([0 if isnan(x) else 1 for x in r]) < len(r)/80.:
# return False
return s != 0.0 and not isnan(s)
def get_colors(non_null):
if 'brewer2mpl' in globals():
num_color = max(min(len(non_null), 11), 3)
all_colors = brewer2mpl.get_map('Spectral', 'Diverging', num_color).hex_colors
else:
all_colors = None
return all_colors
def set_title(ax, t):
try:
ax.set_title(t, {'fontsize': 6}, loc='right')
except AttributeError:
ax.set_title(t)
def suffix(x):
dot = x.rfind('.')
if dot >= 0:
return x[dot + 1:]
return x
n = 0
numplots = len(levels.keys())
ax = None
yset = False
max_legend = 0
xaxis = None
legend_bbox = (0., 0., -0.07, -0.03)
legend_loc = 2
for l in tldata.level_order(data):
non_null = [x for x in levels[l] if valid_row(ratios[x])]
if not non_null:
n += 1
continue
all_colors = get_colors(non_null)
ax = plt.subplot2grid((numplots, 1), (n, 0), sharex=xaxis)
plt.tight_layout()
set_title(ax, l)
r = [[y if not isnan(y) else 0.0 for y in ratios[x]] for x in non_null]
if gen_level.is_metric(non_null[0]):
for j, name in zip(r, non_null):
stack = ax.plot(timestamps, j, label=name)
leg = plt.legend(ncol=6,
loc=legend_loc,
bbox_to_anchor=legend_bbox,
prop={'size':6})
low = min([min(ratios[x]) for x in non_null])
high = max([max(ratios[x]) for x in non_null])
if not isnan(low) and not isnan(high):
ax.yaxis.set_ticks([low, trunc(((high - low) / 2.0) / 100.) * 100., high])
else:
stack = ax.stackplot(timestamps, *r, colors=all_colors)
ax.set_ylim(0, 100)
ax.yaxis.set_ticks([0., 50., 100.])
p = [plt.Rectangle((0, 0), 1, 1, fc=pc.get_facecolor()[0]) for pc in stack]
leg = plt.legend(p, list(map(suffix, non_null)),
ncol=6,
bbox_to_anchor=legend_bbox,
loc=legend_loc,
prop={'size':6})
leg.get_frame().set_alpha(0.5)
for j in ax.get_xticklabels() + ax.get_yticklabels():
j.set_fontsize(6)
if not xaxis:
xaxis = ax
#if n >= 2 and not yset and l != -1:
# ax.set_ylabel('(% of execution time)')
# yset = True
if n != numplots:
max_legend = max(len(non_null), max_legend)
#ax.margins(0, 0)
n += 1
if len(timestamps) == 1:
plt.gca().axes.get_xaxis().set_visible(False)
plt.subplots_adjust(hspace=1.5 if max_legend > 6 else 0.9, bottom=0.20,
top=0.95)
if args.title:
#plt.subplot(numplots, 1, 1)
plt.title(args.title)
if args.output:
plt.savefig(args.output)
else:
plt.show()
| 5,231 | Python | .py | 154 | 28.266234 | 124 | 0.604826 | andikleen/pmu-tools | 1,984 | 331 | 178 | GPL-2.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,895 | jkt_server_ratios.py | andikleen_pmu-tools/jkt_server_ratios.py | # -*- coding: latin-1 -*-
#
# auto generated TopDown/TMA 4.8-full-perf description for Intel Xeon E5 (code named SandyBridge EP)
# Please see http://ark.intel.com for more details on these CPUs.
#
# References:
# http://bit.ly/tma-ispass14
# http://halobates.de/blog/p/262
# https://sites.google.com/site/analysismethods/yasin-pubs
# https://download.01.org/perfmon/
# https://github.com/andikleen/pmu-tools/wiki/toplev-manual
#
# Helpers
print_error = lambda msg: False
smt_enabled = False
ebs_mode = False
version = "4.8-full-perf"
base_frequency = -1.0
Memory = 0
Average_Frequency = 0.0
num_cores = 1
num_threads = 1
num_sockets = 1
def handle_error(obj, msg):
print_error(msg)
obj.errcount += 1
obj.val = 0
obj.thresh = False
def handle_error_metric(obj, msg):
print_error(msg)
obj.errcount += 1
obj.val = 0
# Constants
Exe_Ports = 6
Mem_L3_Weight = 7
Mem_STLB_Hit_Cost = 7
BAClear_Cost = 12
MS_Switches_Cost = 3
Pipeline_Width = 4
OneMillion = 1000000
OneBillion = 1000000000
EBS_Mode = 0
DS = 1
# Aux. formulas
def Backend_Bound_Cycles(self, EV, level):
return (STALLS_TOTAL(self, EV, level) + EV("UOPS_DISPATCHED.THREAD:c1", level) - Few_Uops_Executed_Threshold(self, EV, level) - Frontend_RS_Empty_Cycles(self, EV, level) + EV("RESOURCE_STALLS.SB", level))
def DurationTimeInSeconds(self, EV, level):
return EV("interval-ms", 0) / 1000
def Execute_Cycles(self, EV, level):
return (EV("UOPS_DISPATCHED.CORE:c1", level) / 2) if smt_enabled else EV("UOPS_DISPATCHED.CORE:c1", level)
def Fetched_Uops(self, EV, level):
return (EV("IDQ.DSB_UOPS", level) + EV("LSD.UOPS", level) + EV("IDQ.MITE_UOPS", level) + EV("IDQ.MS_UOPS", level))
def Few_Uops_Executed_Threshold(self, EV, level):
EV("UOPS_DISPATCHED.THREAD:c3", level)
EV("UOPS_DISPATCHED.THREAD:c2", level)
return EV("UOPS_DISPATCHED.THREAD:c3", level) if (IPC(self, EV, level)> 1.8) else EV("UOPS_DISPATCHED.THREAD:c2", level)
# Floating Point computational (arithmetic) Operations Count
def FLOP_Count(self, EV, level):
return (1 *(EV("FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE", level) + EV("FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE", level)) + 2 * EV("FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE", level) + 4 *(EV("FP_COMP_OPS_EXE.SSE_PACKED_SINGLE", level) + EV("SIMD_FP_256.PACKED_DOUBLE", level)) + 8 * EV("SIMD_FP_256.PACKED_SINGLE", level))
# Floating Point computational (arithmetic) Operations Count
def FP_Arith_Scalar(self, EV, level):
return EV("FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE", level) + EV("FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE", level)
# Floating Point computational (arithmetic) Operations Count
def FP_Arith_Vector(self, EV, level):
return EV("FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE", level) + EV("FP_COMP_OPS_EXE.SSE_PACKED_SINGLE", level) + EV("SIMD_FP_256.PACKED_SINGLE", level) + EV("SIMD_FP_256.PACKED_DOUBLE", level)
def Frontend_RS_Empty_Cycles(self, EV, level):
EV("RS_EVENTS.EMPTY_CYCLES", level)
return EV("RS_EVENTS.EMPTY_CYCLES", level) if (self.Fetch_Latency.compute(EV)> 0.1) else 0
def Frontend_Latency_Cycles(self, EV, level):
return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE", level)) , level )
def HighIPC(self, EV, level):
val = IPC(self, EV, level) / Pipeline_Width
return val
def ITLB_Miss_Cycles(self, EV, level):
return (12 * EV("ITLB_MISSES.STLB_HIT", level) + EV("ITLB_MISSES.WALK_DURATION", level))
def Mem_L3_Hit_Fraction(self, EV, level):
return EV("MEM_LOAD_UOPS_RETIRED.LLC_HIT", level) / (EV("MEM_LOAD_UOPS_RETIRED.LLC_HIT", level) + Mem_L3_Weight * EV("MEM_LOAD_UOPS_RETIRED.LLC_MISS", level))
def Memory_Bound_Fraction(self, EV, level):
return (STALLS_MEM_ANY(self, EV, level) + EV("RESOURCE_STALLS.SB", level)) / Backend_Bound_Cycles(self, EV, level)
def Mispred_Clears_Fraction(self, EV, level):
return EV("BR_MISP_RETIRED.ALL_BRANCHES", level) / (EV("BR_MISP_RETIRED.ALL_BRANCHES", level) + EV("MACHINE_CLEARS.COUNT", level))
def ORO_DRD_Any_Cycles(self, EV, level):
return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", level)) , level )
def ORO_DRD_BW_Cycles(self, EV, level):
return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD:c6", level)) , level )
def STALLS_MEM_ANY(self, EV, level):
return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("CYCLE_ACTIVITY.STALLS_L1D_PENDING", level)) , level )
def STALLS_TOTAL(self, EV, level):
return EV(lambda EV , level : min(EV("CPU_CLK_UNHALTED.THREAD", level) , EV("CYCLE_ACTIVITY.CYCLES_NO_DISPATCH", level)) , level )
def Recovery_Cycles(self, EV, level):
return (EV("INT_MISC.RECOVERY_CYCLES_ANY", level) / 2) if smt_enabled else EV("INT_MISC.RECOVERY_CYCLES", level)
def Retire_Fraction(self, EV, level):
return Retired_Slots(self, EV, level) / EV("UOPS_ISSUED.ANY", level)
# Retired slots per Logical Processor
def Retired_Slots(self, EV, level):
return EV("UOPS_RETIRED.RETIRE_SLOTS", level)
# Number of logical processors (enabled or online) on the target system
def Num_CPUs(self, EV, level):
return 8 if smt_enabled else 4
# Instructions Per Cycle (per Logical Processor)
def IPC(self, EV, level):
return EV("INST_RETIRED.ANY", level) / CLKS(self, EV, level)
# Uops Per Instruction
def UopPI(self, EV, level):
val = Retired_Slots(self, EV, level) / EV("INST_RETIRED.ANY", level)
self.thresh = (val > 1.05)
return val
# Cycles Per Instruction (per Logical Processor)
def CPI(self, EV, level):
return 1 / IPC(self, EV, level)
# Per-Logical Processor actual clocks when the Logical Processor is active.
def CLKS(self, EV, level):
return EV("CPU_CLK_UNHALTED.THREAD", level)
# Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward)
def SLOTS(self, EV, level):
return Pipeline_Width * CORE_CLKS(self, EV, level)
# The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of "execute" at rename stage.
def Execute_per_Issue(self, EV, level):
return EV("UOPS_DISPATCHED.THREAD", level) / EV("UOPS_ISSUED.ANY", level)
# Instructions Per Cycle across hyper-threads (per physical core)
def CoreIPC(self, EV, level):
return EV("INST_RETIRED.ANY", level) / CORE_CLKS(self, EV, level)
# Floating Point Operations Per Cycle
def FLOPc(self, EV, level):
return FLOP_Count(self, EV, level) / CORE_CLKS(self, EV, level)
# Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor)
def ILP(self, EV, level):
return EV("UOPS_DISPATCHED.THREAD", level) / Execute_Cycles(self, EV, level)
# Core actual clocks when any Logical Processor is active on the Physical Core
def CORE_CLKS(self, EV, level):
return ((EV("CPU_CLK_UNHALTED.THREAD", level) / 2) * (1 + EV("CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE", level) / EV("CPU_CLK_UNHALTED.REF_XCLK", level))) if ebs_mode else(EV("CPU_CLK_UNHALTED.THREAD_ANY", level) / 2) if smt_enabled else CLKS(self, EV, level)
# Total number of retired Instructions
def Instructions(self, EV, level):
return EV("INST_RETIRED.ANY", level)
# Average number of Uops retired in cycles where at least one uop has retired.
def Retire(self, EV, level):
return Retired_Slots(self, EV, level) / EV("UOPS_RETIRED.RETIRE_SLOTS:c1", level)
# Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). See section 'Decoded ICache' in Optimization Manual. http://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-optimization-manual.html
def DSB_Coverage(self, EV, level):
val = EV("IDQ.DSB_UOPS", level) / Fetched_Uops(self, EV, level)
self.thresh = (val < 0.7) and HighIPC(self, EV, 1)
return val
# Average CPU Utilization (percentage)
def CPU_Utilization(self, EV, level):
return CPUs_Utilized(self, EV, level) / Num_CPUs(self, EV, level)
# Average number of utilized CPUs
def CPUs_Utilized(self, EV, level):
return EV("CPU_CLK_UNHALTED.REF_TSC", level) / EV("msr/tsc/", 0)
# Measured Average Core Frequency for unhalted processors [GHz]
def Core_Frequency(self, EV, level):
return Turbo_Utilization(self, EV, level) * EV("msr/tsc/", 0) / OneBillion / Time(self, EV, level)
# Measured Average Uncore Frequency for the SoC [GHz]
def Uncore_Frequency(self, EV, level):
return Socket_CLKS(self, EV, level) / 1e9 / Time(self, EV, level)
# Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width
def GFLOPs(self, EV, level):
return (FLOP_Count(self, EV, level) / OneBillion) / Time(self, EV, level)
# Average Frequency Utilization relative nominal frequency
def Turbo_Utilization(self, EV, level):
return CLKS(self, EV, level) / EV("CPU_CLK_UNHALTED.REF_TSC", level)
# Fraction of cycles where both hardware Logical Processors were active
def SMT_2T_Utilization(self, EV, level):
return 1 - EV("CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE", level) / (EV("CPU_CLK_UNHALTED.REF_XCLK_ANY", level) / 2) if smt_enabled else 0
# Fraction of cycles spent in the Operating System (OS) Kernel mode
def Kernel_Utilization(self, EV, level):
val = EV("CPU_CLK_UNHALTED.THREAD_P:SUP", level) / EV("CPU_CLK_UNHALTED.THREAD", level)
self.thresh = (val > 0.05)
return val
# Cycles Per Instruction for the Operating System (OS) Kernel mode
def Kernel_CPI(self, EV, level):
return EV("CPU_CLK_UNHALTED.THREAD_P:SUP", level) / EV("INST_RETIRED.ANY_P:SUP", level)
# Average external Memory Bandwidth Use for reads and writes [GB / sec]
def DRAM_BW_Use(self, EV, level):
return (64 *(EV("UNC_M_CAS_COUNT.RD", level) + EV("UNC_M_CAS_COUNT.WR", level)) / OneBillion) / Time(self, EV, level)
# Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches.
def MEM_Read_Latency(self, EV, level):
return OneBillion *(EV("UNC_C_TOR_OCCUPANCY.MISS_OPCODE:Match=0x182", level) / EV("UNC_C_TOR_INSERTS.MISS_OPCODE:Match=0x182", level)) / (Socket_CLKS(self, EV, level) / Time(self, EV, level))
# Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches
def MEM_Parallel_Reads(self, EV, level):
return EV("UNC_C_TOR_OCCUPANCY.MISS_OPCODE:Match=0x182", level) / EV("UNC_C_TOR_OCCUPANCY.MISS_OPCODE:Match=0x182:c1", level)
# Run duration time in seconds
def Time(self, EV, level):
val = EV("interval-s", 0)
self.thresh = (val < 1)
return val
# Socket actual clocks when any core is active on that socket
def Socket_CLKS(self, EV, level):
return EV("UNC_C_CLOCKTICKS:one_unit", level)
# Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]
def IpFarBranch(self, EV, level):
val = EV("INST_RETIRED.ANY", level) / EV("BR_INST_RETIRED.FAR_BRANCH:USER", level)
self.thresh = (val < 1000000)
return val
# Event groups
class Frontend_Bound:
name = "Frontend_Bound"
domain = "Slots"
area = "FE"
level = 1
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvFB', 'BvIO', 'TmaL1', 'PGO'])
maxval = None
def compute(self, EV):
try:
self.val = EV("IDQ_UOPS_NOT_DELIVERED.CORE", 1) / SLOTS(self, EV, 1)
self.thresh = (self.val > 0.15)
except ZeroDivisionError:
handle_error(self, "Frontend_Bound zero division")
return self.val
desc = """
This category represents fraction of slots where the
processor's Frontend undersupplies its Backend. Frontend
denotes the first part of the processor core responsible to
fetch operations that are executed later on by the Backend
part. Within the Frontend; a branch predictor predicts the
next address to fetch; cache-lines are fetched from the
memory subsystem; parsed into instructions; and lastly
decoded into micro-operations (uops). Ideally the Frontend
can issue Pipeline_Width uops every cycle to the Backend.
Frontend Bound denotes unutilized issue-slots when there is
no Backend stall; i.e. bubbles where Frontend delivered no
uops while Backend could have accepted them. For example;
stalls due to instruction-cache misses would be categorized
under Frontend Bound."""
class Fetch_Latency:
name = "Fetch_Latency"
domain = "Slots"
area = "FE"
level = 2
htoff = False
sample = ['RS_EVENTS.EMPTY_END']
errcount = 0
sibling = None
metricgroup = frozenset(['Frontend', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = Pipeline_Width * Frontend_Latency_Cycles(self, EV, 2) / SLOTS(self, EV, 2)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Fetch_Latency zero division")
return self.val
desc = """
This metric represents fraction of slots the CPU was stalled
due to Frontend latency issues. For example; instruction-
cache misses; iTLB misses or fetch stalls after a branch
misprediction are categorized under Frontend Latency. In
such cases; the Frontend eventually delivers no uops for
some period."""
class ITLB_Misses:
name = "ITLB_Misses"
domain = "Clocks"
area = "FE"
level = 3
htoff = False
sample = ['ITLB_MISSES.WALK_COMPLETED']
errcount = 0
sibling = None
metricgroup = frozenset(['BigFootprint', 'BvBC', 'FetchLat', 'MemoryTLB'])
maxval = None
def compute(self, EV):
try:
self.val = ITLB_Miss_Cycles(self, EV, 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "ITLB_Misses zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due to Instruction TLB (ITLB) misses.. Consider
large 2M pages for code (selectively prefer hot large-size
function, due to limited 2M entries). Linux options:
standard binaries use libhugetlbfs; Hfsort.. https://github.
com/libhugetlbfs/libhugetlbfs;https://research.fb.com/public
ations/optimizing-function-placement-for-large-scale-data-
center-applications-2/"""
class Branch_Resteers:
name = "Branch_Resteers"
domain = "Clocks"
area = "FE"
level = 3
htoff = False
sample = ['BR_MISP_RETIRED.ALL_BRANCHES']
errcount = 0
sibling = None
metricgroup = frozenset(['FetchLat'])
maxval = None
def compute(self, EV):
try:
self.val = BAClear_Cost *(EV("BR_MISP_RETIRED.ALL_BRANCHES", 3) + EV("MACHINE_CLEARS.COUNT", 3) + EV("BACLEARS.ANY", 3)) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Branch_Resteers zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due to Branch Resteers. Branch Resteers estimates
the Frontend delay in fetching operations from corrected
path; following all sorts of miss-predicted branches. For
example; branchy code with lots of miss-predictions might
get categorized under Branch Resteers. Note the value of
this node may overlap with its siblings."""
class MS_Switches:
name = "MS_Switches"
domain = "Clocks_Estimated"
area = "FE"
level = 3
htoff = False
sample = ['IDQ.MS_SWITCHES']
errcount = 0
sibling = None
metricgroup = frozenset(['FetchLat', 'MicroSeq'])
maxval = 1.0
def compute(self, EV):
try:
self.val = MS_Switches_Cost * EV("IDQ.MS_SWITCHES", 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "MS_Switches zero division")
return self.val
desc = """
This metric estimates the fraction of cycles when the CPU
was stalled due to switches of uop delivery to the Microcode
Sequencer (MS). Commonly used instructions are optimized for
delivery by the DSB (decoded i-cache) or MITE (legacy
instruction decode) pipelines. Certain operations cannot be
handled natively by the execution pipeline; and must be
performed by microcode (small programs injected into the
execution stream). Switching to the MS too often can
negatively impact performance. The MS is designated to
deliver long uop flows required by CISC instructions like
CPUID; or uncommon conditions like Floating Point Assists
when dealing with Denormals."""
class LCP:
name = "LCP"
domain = "Clocks"
area = "FE"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['FetchLat'])
maxval = None
def compute(self, EV):
try:
self.val = EV("ILD_STALL.LCP", 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "LCP zero division")
return self.val
desc = """
This metric represents fraction of cycles CPU was stalled
due to Length Changing Prefixes (LCPs). Using proper
compiler flags or Intel Compiler by default will certainly
avoid this."""
class DSB_Switches:
name = "DSB_Switches"
domain = "Clocks"
area = "FE"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['DSBmiss', 'FetchLat'])
maxval = None
def compute(self, EV):
try:
self.val = EV("DSB2MITE_SWITCHES.PENALTY_CYCLES", 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "DSB_Switches zero division")
return self.val
desc = """
This metric represents fraction of cycles the CPU was
stalled due to switches from DSB to MITE pipelines. The DSB
(decoded i-cache) is a Uop Cache where the front-end
directly delivers Uops (micro operations) avoiding heavy x86
decoding. The DSB pipeline has shorter latency and delivered
higher bandwidth than the MITE (legacy instruction decode
pipeline). Switching between the two pipelines can cause
penalties hence this metric measures the exposed penalty..
See section 'Optimization for Decoded Icache' in
Optimization Manual:. http://www.intel.com/content/www/us/en
/architecture-and-technology/64-ia-32-architectures-
optimization-manual.html"""
class Fetch_Bandwidth:
name = "Fetch_Bandwidth"
domain = "Slots"
area = "FE"
level = 2
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['FetchBW', 'Frontend', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = self.Frontend_Bound.compute(EV) - self.Fetch_Latency.compute(EV)
self.thresh = (self.val > 0.2)
except ZeroDivisionError:
handle_error(self, "Fetch_Bandwidth zero division")
return self.val
desc = """
This metric represents fraction of slots the CPU was stalled
due to Frontend bandwidth issues. For example;
inefficiencies at the instruction decoders; or restrictions
for caching in the DSB (decoded uops cache) are categorized
under Fetch Bandwidth. In such cases; the Frontend typically
delivers suboptimal amount of uops to the Backend."""
class Bad_Speculation:
name = "Bad_Speculation"
domain = "Slots"
area = "BAD"
level = 1
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['TmaL1'])
maxval = None
def compute(self, EV):
try:
self.val = (EV("UOPS_ISSUED.ANY", 1) - Retired_Slots(self, EV, 1) + Pipeline_Width * Recovery_Cycles(self, EV, 1)) / SLOTS(self, EV, 1)
self.thresh = (self.val > 0.15)
except ZeroDivisionError:
handle_error(self, "Bad_Speculation zero division")
return self.val
desc = """
This category represents fraction of slots wasted due to
incorrect speculations. This include slots used to issue
uops that do not eventually get retired and slots for which
the issue-pipeline was blocked due to recovery from earlier
incorrect speculation. For example; wasted work due to miss-
predicted branches are categorized under Bad Speculation
category. Incorrect data speculation followed by Memory
Ordering Nukes is another example."""
class Branch_Mispredicts:
name = "Branch_Mispredicts"
domain = "Slots"
area = "BAD"
level = 2
htoff = False
sample = ['BR_MISP_RETIRED.ALL_BRANCHES:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['BadSpec', 'BrMispredicts', 'BvMP', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = Mispred_Clears_Fraction(self, EV, 2) * self.Bad_Speculation.compute(EV)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Branch_Mispredicts zero division")
return self.val
desc = """
This metric represents fraction of slots the CPU has wasted
due to Branch Misprediction. These slots are either wasted
by uops fetched from an incorrectly speculated program path;
or stalls when the out-of-order part of the machine needs to
recover its state from a speculative path.. Using profile
feedback in the compiler may help. Please see the
Optimization Manual for general strategies for addressing
branch misprediction issues..
http://www.intel.com/content/www/us/en/architecture-and-
technology/64-ia-32-architectures-optimization-manual.html"""
class Machine_Clears:
name = "Machine_Clears"
domain = "Slots"
area = "BAD"
level = 2
htoff = False
sample = ['MACHINE_CLEARS.COUNT']
errcount = 0
sibling = None
metricgroup = frozenset(['BadSpec', 'BvMS', 'MachineClears', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = self.Bad_Speculation.compute(EV) - self.Branch_Mispredicts.compute(EV)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Machine_Clears zero division")
return self.val
desc = """
This metric represents fraction of slots the CPU has wasted
due to Machine Clears. These slots are either wasted by
uops fetched prior to the clear; or stalls the out-of-order
portion of the machine needs to recover its state after the
clear. For example; this can happen due to memory ordering
Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code
(SMC) nukes.. See \"Memory Disambiguation\" in Optimization
Manual and:. https://software.intel.com/sites/default/files/
m/d/4/1/d/8/sma.pdf"""
class Backend_Bound:
name = "Backend_Bound"
domain = "Slots"
area = "BE"
level = 1
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvOB', 'TmaL1'])
maxval = None
def compute(self, EV):
try:
self.val = 1 -(self.Frontend_Bound.compute(EV) + self.Bad_Speculation.compute(EV) + self.Retiring.compute(EV))
self.thresh = (self.val > 0.2)
except ZeroDivisionError:
handle_error(self, "Backend_Bound zero division")
return self.val
desc = """
This category represents fraction of slots where no uops are
being delivered due to a lack of required resources for
accepting new uops in the Backend. Backend is the portion of
the processor core where the out-of-order scheduler
dispatches ready uops into their respective execution units;
and once completed these uops get retired according to
program order. For example; stalls due to data-cache misses
or stalls due to the divider unit being overloaded are both
categorized under Backend Bound. Backend Bound is further
divided into two main categories: Memory Bound and Core
Bound."""
class Memory_Bound:
name = "Memory_Bound"
domain = "Slots"
area = "BE/Mem"
level = 2
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Backend', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = Memory_Bound_Fraction(self, EV, 2) * self.Backend_Bound.compute(EV)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Memory_Bound zero division")
return self.val
desc = """
This metric represents fraction of slots the Memory
subsystem within the Backend was a bottleneck. Memory Bound
estimates fraction of slots where pipeline is likely stalled
due to demand load or store instructions. This accounts
mainly for (1) non-completed in-flight memory demand loads
which coincides with execution units starvation; in addition
to (2) cases where stores could impose backpressure on the
pipeline when many of them get buffered at the same time
(less common out of the two)."""
class DTLB_Load:
name = "DTLB_Load"
domain = "Clocks_Estimated"
area = "BE/Mem"
level = 4
htoff = False
sample = ['MEM_UOPS_RETIRED.STLB_MISS_LOADS:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['BvMT', 'MemoryTLB'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (Mem_STLB_Hit_Cost * EV("DTLB_LOAD_MISSES.STLB_HIT", 4) + EV("DTLB_LOAD_MISSES.WALK_DURATION", 4)) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "DTLB_Load zero division")
return self.val
desc = """
This metric roughly estimates the fraction of cycles where
the Data TLB (DTLB) was missed by load accesses. TLBs
(Translation Look-aside Buffers) are processor caches for
recently used entries out of the Page Tables that are used
to map virtual- to physical-addresses by the operating
system. This metric approximates the potential delay of
demand loads missing the first-level data TLB (assuming
worst case scenario with back to back misses to different
pages). This includes hitting in the second-level TLB (STLB)
as well as performing a hardware page walk on an STLB miss.."""
class L3_Bound:
name = "L3_Bound"
domain = "Stalls"
area = "BE/Mem"
level = 3
htoff = False
sample = ['MEM_LOAD_UOPS_RETIRED.LLC_HIT:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['CacheHits', 'MemoryBound', 'TmaL3mem'])
maxval = None
def compute(self, EV):
try:
self.val = Mem_L3_Hit_Fraction(self, EV, 3) * EV("CYCLE_ACTIVITY.STALLS_L2_PENDING", 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "L3_Bound zero division")
return self.val
desc = """
This metric estimates how often the CPU was stalled due to
loads accesses to L3 cache or contended with a sibling Core.
Avoiding cache misses (i.e. L2 misses/L3 hits) can improve
the latency and increase performance."""
class DRAM_Bound:
name = "DRAM_Bound"
domain = "Stalls"
area = "BE/Mem"
level = 3
htoff = False
sample = ['MEM_LOAD_UOPS_RETIRED.LLC_MISS:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['MemoryBound', 'TmaL3mem'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (1 - Mem_L3_Hit_Fraction(self, EV, 3)) * EV("CYCLE_ACTIVITY.STALLS_L2_PENDING", 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "DRAM_Bound zero division")
return self.val
desc = """
This metric estimates how often the CPU was stalled on
accesses to external memory (DRAM) by loads. Better caching
can improve the latency and increase performance."""
class MEM_Bandwidth:
name = "MEM_Bandwidth"
domain = "Clocks"
area = "BE/Mem"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvMB', 'MemoryBW', 'Offcore'])
maxval = None
def compute(self, EV):
try:
self.val = ORO_DRD_BW_Cycles(self, EV, 4) / CLKS(self, EV, 4)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "MEM_Bandwidth zero division")
return self.val
desc = """
This metric estimates fraction of cycles where the core's
performance was likely hurt due to approaching bandwidth
limits of external memory - DRAM ([SPR-HBM] and/or HBM).
The underlying heuristic assumes that a similar off-core
traffic is generated by all IA cores. This metric does not
aggregate non-data-read requests by this logical processor;
requests from other IA Logical Processors/Physical
Cores/sockets; or other non-IA devices like GPU; hence the
maximum external memory bandwidth limits may or may not be
approached when this metric is flagged (see Uncore counters
for that).. Improve data accesses to reduce cacheline
transfers from/to memory. Examples: 1) Consume all bytes of
a each cacheline before it is evicted (e.g. reorder
structure elements and split non-hot ones), 2) merge
computed-limited with BW-limited loops, 3) NUMA
optimizations in multi-socket system. Note: software
prefetches will not help BW-limited application.."""
class MEM_Latency:
name = "MEM_Latency"
domain = "Clocks"
area = "BE/Mem"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['BvML', 'MemoryLat', 'Offcore'])
maxval = None
def compute(self, EV):
try:
self.val = ORO_DRD_Any_Cycles(self, EV, 4) / CLKS(self, EV, 4) - self.MEM_Bandwidth.compute(EV)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "MEM_Latency zero division")
return self.val
desc = """
This metric estimates fraction of cycles where the
performance was likely hurt due to latency from external
memory - DRAM ([SPR-HBM] and/or HBM). This metric does not
aggregate requests from other Logical Processors/Physical
Cores/sockets (see Uncore counters for that).. Improve data
accesses or interleave them with compute. Examples: 1) Data
layout re-structuring, 2) Software Prefetches (also through
the compiler).."""
class Store_Bound:
name = "Store_Bound"
domain = "Stalls"
area = "BE/Mem"
level = 3
htoff = False
sample = ['MEM_UOPS_RETIRED.ALL_STORES:pp']
errcount = 0
sibling = None
metricgroup = frozenset(['MemoryBound', 'TmaL3mem'])
maxval = None
def compute(self, EV):
try:
self.val = EV("RESOURCE_STALLS.SB", 3) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Store_Bound zero division")
return self.val
desc = """
This metric estimates how often CPU was stalled due to RFO
store memory accesses; RFO store issue a read-for-ownership
request before the write. Even though store accesses do not
typically stall out-of-order CPUs; there are few cases where
stores can lead to actual stalls. This metric will be
flagged should RFO stores be a bottleneck."""
class Core_Bound:
name = "Core_Bound"
domain = "Slots"
area = "BE/Core"
level = 2
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Backend', 'TmaL2', 'Compute'])
maxval = None
def compute(self, EV):
try:
self.val = self.Backend_Bound.compute(EV) - self.Memory_Bound.compute(EV)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Core_Bound zero division")
return self.val
desc = """
This metric represents fraction of slots where Core non-
memory issues were of a bottleneck. Shortage in hardware
compute resources; or dependencies in software's
instructions are both categorized under Core Bound. Hence it
may indicate the machine ran out of an out-of-order
resource; certain execution units are overloaded or
dependencies in program's data- or instruction-flow are
limiting the performance (e.g. FP-chained long-latency
arithmetic operations).. Tip: consider Port Saturation
analysis as next step."""
class Divider:
name = "Divider"
domain = "Clocks"
area = "BE/Core"
level = 3
htoff = False
sample = ['ARITH.FPU_DIV_ACTIVE']
errcount = 0
sibling = None
metricgroup = frozenset(['BvCB'])
maxval = 1.0
def compute(self, EV):
try:
self.val = EV("ARITH.FPU_DIV_ACTIVE", 3) / CORE_CLKS(self, EV, 3)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Divider zero division")
return self.val
desc = """
This metric represents fraction of cycles where the Divider
unit was active. Divide and square root instructions are
performed by the Divider unit and can take considerably
longer latency than integer or Floating Point addition;
subtraction; or multiplication."""
class Ports_Utilization:
name = "Ports_Utilization"
domain = "Clocks"
area = "BE/Core"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['PortsUtil'])
maxval = None
def compute(self, EV):
try:
self.val = (Backend_Bound_Cycles(self, EV, 3) - EV("RESOURCE_STALLS.SB", 3) - STALLS_MEM_ANY(self, EV, 3)) / CLKS(self, EV, 3)
self.thresh = (self.val > 0.15) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Ports_Utilization zero division")
return self.val
desc = """
This metric estimates fraction of cycles the CPU performance
was potentially limited due to Core computation issues (non
divider-related). Two distinct categories can be attributed
into this metric: (1) heavy data-dependency among contiguous
instructions would manifest in this metric - such cases are
often referred to as low Instruction Level Parallelism
(ILP). (2) Contention on some hardware execution unit other
than Divider. For example; when there are too many multiply
operations.. Loop Vectorization -most compilers feature
auto-Vectorization options today- reduces pressure on the
execution ports as multiple elements are calculated with
same uop."""
class Retiring:
name = "Retiring"
domain = "Slots"
area = "RET"
level = 1
htoff = False
sample = ['UOPS_RETIRED.RETIRE_SLOTS']
errcount = 0
sibling = None
metricgroup = frozenset(['BvUW', 'TmaL1'])
maxval = None
def compute(self, EV):
try:
self.val = Retired_Slots(self, EV, 1) / SLOTS(self, EV, 1)
self.thresh = (self.val > 0.7) or self.Heavy_Operations.thresh
except ZeroDivisionError:
handle_error(self, "Retiring zero division")
return self.val
desc = """
This category represents fraction of slots utilized by
useful work i.e. issued uops that eventually get retired.
Ideally; all pipeline slots would be attributed to the
Retiring category. Retiring of 100% would indicate the
maximum Pipeline_Width throughput was achieved. Maximizing
Retiring typically increases the Instructions-per-cycle (see
IPC metric). Note that a high Retiring value does not
necessary mean there is no room for more performance. For
example; Heavy-operations or Microcode Assists are
categorized under Retiring. They often indicate suboptimal
performance and can often be optimized or avoided. . A high
Retiring value for non-vectorized code may be a good hint
for programmer to consider vectorizing his code. Doing so
essentially lets more computations be done without
significantly increasing number of instructions thus
improving the performance."""
class Light_Operations:
name = "Light_Operations"
domain = "Slots"
area = "RET"
level = 2
htoff = False
sample = ['INST_RETIRED.PREC_DIST']
errcount = 0
sibling = None
metricgroup = frozenset(['Retire', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = self.Retiring.compute(EV) - self.Heavy_Operations.compute(EV)
self.thresh = (self.val > 0.6)
except ZeroDivisionError:
handle_error(self, "Light_Operations zero division")
return self.val
desc = """
This metric represents fraction of slots where the CPU was
retiring light-weight operations -- instructions that
require no more than one uop (micro-operation). This
correlates with total number of instructions used by the
program. A uops-per-instruction (see UopPI metric) ratio of
1 or less should be expected for decently optimized code
running on Intel Core/Xeon products. While this often
indicates efficient X86 instructions were executed; high
value does not necessarily mean better performance cannot be
achieved. . Focus on techniques that reduce instruction
count or result in more efficient instructions generation
such as vectorization."""
class FP_Arith:
name = "FP_Arith"
domain = "Uops"
area = "RET"
level = 3
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['HPC'])
maxval = None
def compute(self, EV):
try:
self.val = self.X87_Use.compute(EV) + self.FP_Scalar.compute(EV) + self.FP_Vector.compute(EV)
self.thresh = (self.val > 0.2) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "FP_Arith zero division")
return self.val
desc = """
This metric represents overall arithmetic floating-point
(FP) operations fraction the CPU has executed (retired).
Note this metric's value may exceed its parent due to use of
\"Uops\" CountDomain and FMA double-counting."""
class X87_Use:
name = "X87_Use"
domain = "Uops"
area = "RET"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Compute'])
maxval = None
def compute(self, EV):
try:
self.val = Retired_Slots(self, EV, 4) * EV("FP_COMP_OPS_EXE.X87", 4) / EV("UOPS_DISPATCHED.THREAD", 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "X87_Use zero division")
return self.val
desc = """
This metric serves as an approximation of legacy x87 usage.
It accounts for instructions beyond X87 FP arithmetic
operations; hence may be used as a thermometer to avoid X87
high usage and preferably upgrade to modern ISA. See Tip
under Tuning Hint.. Tip: consider compiler flags to generate
newer AVX (or SSE) instruction sets; which typically perform
better and feature vectors."""
class FP_Scalar:
name = "FP_Scalar"
domain = "Uops"
area = "RET"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Compute', 'Flops'])
maxval = None
def compute(self, EV):
try:
self.val = FP_Arith_Scalar(self, EV, 4) / EV("UOPS_DISPATCHED.THREAD", 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "FP_Scalar zero division")
return self.val
desc = """
This metric approximates arithmetic floating-point (FP)
scalar uops fraction the CPU has retired. May overcount due
to FMA double counting.. Investigate what limits (compiler)
generation of vector code."""
class FP_Vector:
name = "FP_Vector"
domain = "Uops"
area = "RET"
level = 4
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Compute', 'Flops'])
maxval = 1.0
def compute(self, EV):
try:
self.val = FP_Arith_Vector(self, EV, 4) / EV("UOPS_DISPATCHED.THREAD", 4)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "FP_Vector zero division")
return self.val
desc = """
This metric approximates arithmetic floating-point (FP)
vector uops fraction the CPU has retired aggregated across
all vector widths. May overcount due to FMA double
counting.. Check if vector width is expected"""
class FP_Vector_128b:
name = "FP_Vector_128b"
domain = "Uops"
area = "RET"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Compute', 'Flops'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (EV("FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE", 5) + EV("FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE", 5)) / EV("UOPS_DISPATCHED.THREAD", 5)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "FP_Vector_128b zero division")
return self.val
desc = """
This metric approximates arithmetic FP vector uops fraction
the CPU has retired for 128-bit wide vectors. May overcount
due to FMA double counting.. Try to exploit wider vector
length"""
class FP_Vector_256b:
name = "FP_Vector_256b"
domain = "Uops"
area = "RET"
level = 5
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Compute', 'Flops'])
maxval = 1.0
def compute(self, EV):
try:
self.val = (EV("SIMD_FP_256.PACKED_DOUBLE", 5) + EV("SIMD_FP_256.PACKED_SINGLE", 5)) / EV("UOPS_DISPATCHED.THREAD", 5)
self.thresh = (self.val > 0.1) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "FP_Vector_256b zero division")
return self.val
desc = """
This metric approximates arithmetic FP vector uops fraction
the CPU has retired for 256-bit wide vectors. May overcount
due to FMA double counting.. Try to exploit wider vector
length"""
class Heavy_Operations:
name = "Heavy_Operations"
domain = "Slots"
area = "RET"
level = 2
htoff = False
sample = []
errcount = 0
sibling = None
metricgroup = frozenset(['Retire', 'TmaL2'])
maxval = None
def compute(self, EV):
try:
self.val = self.Microcode_Sequencer.compute(EV)
self.thresh = (self.val > 0.1)
except ZeroDivisionError:
handle_error(self, "Heavy_Operations zero division")
return self.val
desc = """
This metric represents fraction of slots where the CPU was
retiring heavy-weight operations -- instructions that
require two or more uops or micro-coded sequences. This
highly-correlates with the uop length of these
instructions/sequences."""
class Microcode_Sequencer:
name = "Microcode_Sequencer"
domain = "Slots"
area = "RET"
level = 3
htoff = False
sample = ['IDQ.MS_UOPS']
errcount = 0
sibling = None
metricgroup = frozenset(['MicroSeq'])
maxval = None
def compute(self, EV):
try:
self.val = Retire_Fraction(self, EV, 3) * EV("IDQ.MS_UOPS", 3) / SLOTS(self, EV, 3)
self.thresh = (self.val > 0.05) and self.parent.thresh
except ZeroDivisionError:
handle_error(self, "Microcode_Sequencer zero division")
return self.val
desc = """
This metric represents fraction of slots the CPU was
retiring uops fetched by the Microcode Sequencer (MS) unit.
The MS is used for CISC instructions not supported by the
default decoders (like repeat move strings; or CPUID); or by
microcode assists used to address some operation modes (like
in Floating Point assists). These cases can often be
avoided.."""
class Metric_IPC:
name = "IPC"
domain = "Metric"
maxval = Pipeline_Width + 2
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['Ret', 'Summary'])
sibling = None
def compute(self, EV):
try:
self.val = IPC(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "IPC zero division")
desc = """
Instructions Per Cycle (per Logical Processor)"""
class Metric_UopPI:
name = "UopPI"
domain = "Metric"
maxval = 2.0
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['Pipeline', 'Ret', 'Retire'])
sibling = None
def compute(self, EV):
try:
self.val = UopPI(self, EV, 0)
self.thresh = (self.val > 1.05)
except ZeroDivisionError:
handle_error_metric(self, "UopPI zero division")
desc = """
Uops Per Instruction"""
class Metric_CPI:
name = "CPI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['Pipeline', 'Mem'])
sibling = None
def compute(self, EV):
try:
self.val = CPI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "CPI zero division")
desc = """
Cycles Per Instruction (per Logical Processor)"""
class Metric_CLKS:
name = "CLKS"
domain = "Count"
maxval = 0
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['Pipeline'])
sibling = None
def compute(self, EV):
try:
self.val = CLKS(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "CLKS zero division")
desc = """
Per-Logical Processor actual clocks when the Logical
Processor is active."""
class Metric_SLOTS:
name = "SLOTS"
domain = "Count"
maxval = 0
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['TmaL1'])
sibling = None
def compute(self, EV):
try:
self.val = SLOTS(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "SLOTS zero division")
desc = """
Total issue-pipeline slots (per-Physical Core till ICL; per-
Logical Processor ICL onward)"""
class Metric_Execute_per_Issue:
name = "Execute_per_Issue"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Thread"
metricgroup = frozenset(['Cor', 'Pipeline'])
sibling = None
def compute(self, EV):
try:
self.val = Execute_per_Issue(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Execute_per_Issue zero division")
desc = """
The ratio of Executed- by Issued-Uops. Ratio > 1 suggests
high rate of uop micro-fusions. Ratio < 1 suggest high rate
of \"execute\" at rename stage."""
class Metric_CoreIPC:
name = "CoreIPC"
domain = "Core_Metric"
maxval = Pipeline_Width + 2
errcount = 0
area = "Info.Core"
metricgroup = frozenset(['Ret', 'SMT', 'TmaL1'])
sibling = None
def compute(self, EV):
try:
self.val = CoreIPC(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "CoreIPC zero division")
desc = """
Instructions Per Cycle across hyper-threads (per physical
core)"""
class Metric_FLOPc:
name = "FLOPc"
domain = "Core_Metric"
maxval = 10.0
errcount = 0
area = "Info.Core"
metricgroup = frozenset(['Ret', 'Flops'])
sibling = None
def compute(self, EV):
try:
self.val = FLOPc(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "FLOPc zero division")
desc = """
Floating Point Operations Per Cycle"""
class Metric_ILP:
name = "ILP"
domain = "Metric"
maxval = Exe_Ports
errcount = 0
area = "Info.Core"
metricgroup = frozenset(['Backend', 'Cor', 'Pipeline', 'PortsUtil'])
sibling = None
def compute(self, EV):
try:
self.val = ILP(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "ILP zero division")
desc = """
Instruction-Level-Parallelism (average number of uops
executed when there is execution) per thread (logical-
processor)"""
class Metric_CORE_CLKS:
name = "CORE_CLKS"
domain = "Count"
maxval = 0
errcount = 0
area = "Info.Core"
metricgroup = frozenset(['SMT'])
sibling = None
def compute(self, EV):
try:
self.val = CORE_CLKS(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "CORE_CLKS zero division")
desc = """
Core actual clocks when any Logical Processor is active on
the Physical Core"""
class Metric_Instructions:
name = "Instructions"
domain = "Count"
maxval = 0
errcount = 0
area = "Info.Inst_Mix"
metricgroup = frozenset(['Summary', 'TmaL1'])
sibling = None
def compute(self, EV):
try:
self.val = Instructions(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Instructions zero division")
desc = """
Total number of retired Instructions"""
class Metric_Retire:
name = "Retire"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.Pipeline"
metricgroup = frozenset(['Pipeline', 'Ret'])
sibling = None
def compute(self, EV):
try:
self.val = Retire(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Retire zero division")
desc = """
Average number of Uops retired in cycles where at least one
uop has retired."""
class Metric_DSB_Coverage:
name = "DSB_Coverage"
domain = "Metric"
maxval = 1.0
errcount = 0
area = "Info.Frontend"
metricgroup = frozenset(['DSB', 'Fed', 'FetchBW'])
sibling = None
def compute(self, EV):
try:
self.val = DSB_Coverage(self, EV, 0)
self.thresh = (self.val < 0.7) and HighIPC(self, EV, 1)
except ZeroDivisionError:
handle_error_metric(self, "DSB_Coverage zero division")
desc = """
Fraction of Uops delivered by the DSB (aka Decoded ICache;
or Uop Cache). See section 'Decoded ICache' in Optimization
Manual. http://www.intel.com/content/www/us/en/architecture-
and-technology/64-ia-32-architectures-optimization-
manual.html"""
class Metric_CPU_Utilization:
name = "CPU_Utilization"
domain = "Metric"
maxval = 1
errcount = 0
area = "Info.System"
metricgroup = frozenset(['HPC', 'Summary'])
sibling = None
def compute(self, EV):
try:
self.val = CPU_Utilization(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "CPU_Utilization zero division")
desc = """
Average CPU Utilization (percentage)"""
class Metric_CPUs_Utilized:
name = "CPUs_Utilized"
domain = "Metric"
maxval = 300
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Summary'])
sibling = None
def compute(self, EV):
try:
self.val = CPUs_Utilized(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "CPUs_Utilized zero division")
desc = """
Average number of utilized CPUs"""
class Metric_Core_Frequency:
name = "Core_Frequency"
domain = "SystemMetric"
maxval = 0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Summary', 'Power'])
sibling = None
def compute(self, EV):
try:
self.val = Core_Frequency(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Core_Frequency zero division")
desc = """
Measured Average Core Frequency for unhalted processors
[GHz]"""
class Metric_Uncore_Frequency:
name = "Uncore_Frequency"
domain = "SystemMetric"
maxval = 0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['SoC'])
sibling = None
def compute(self, EV):
try:
self.val = Uncore_Frequency(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Uncore_Frequency zero division")
desc = """
Measured Average Uncore Frequency for the SoC [GHz]"""
class Metric_GFLOPs:
name = "GFLOPs"
domain = "Metric"
maxval = 200
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Cor', 'Flops', 'HPC'])
sibling = None
def compute(self, EV):
try:
self.val = GFLOPs(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "GFLOPs zero division")
desc = """
Giga Floating Point Operations Per Second. Aggregate across
all supported options of: FP precisions, scalar and vector
instructions, vector-width"""
class Metric_Turbo_Utilization:
name = "Turbo_Utilization"
domain = "Core_Metric"
maxval = 10.0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Power'])
sibling = None
def compute(self, EV):
try:
self.val = Turbo_Utilization(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Turbo_Utilization zero division")
desc = """
Average Frequency Utilization relative nominal frequency"""
class Metric_SMT_2T_Utilization:
name = "SMT_2T_Utilization"
domain = "Core_Metric"
maxval = 1.0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['SMT'])
sibling = None
def compute(self, EV):
try:
self.val = SMT_2T_Utilization(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "SMT_2T_Utilization zero division")
desc = """
Fraction of cycles where both hardware Logical Processors
were active"""
class Metric_Kernel_Utilization:
name = "Kernel_Utilization"
domain = "Metric"
maxval = 1.0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['OS'])
sibling = None
def compute(self, EV):
try:
self.val = Kernel_Utilization(self, EV, 0)
self.thresh = (self.val > 0.05)
except ZeroDivisionError:
handle_error_metric(self, "Kernel_Utilization zero division")
desc = """
Fraction of cycles spent in the Operating System (OS) Kernel
mode"""
class Metric_Kernel_CPI:
name = "Kernel_CPI"
domain = "Metric"
maxval = 0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['OS'])
sibling = None
def compute(self, EV):
try:
self.val = Kernel_CPI(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Kernel_CPI zero division")
desc = """
Cycles Per Instruction for the Operating System (OS) Kernel
mode"""
class Metric_DRAM_BW_Use:
name = "DRAM_BW_Use"
domain = "GB/sec"
maxval = 200
errcount = 0
area = "Info.System"
metricgroup = frozenset(['HPC', 'MemOffcore', 'MemoryBW', 'SoC'])
sibling = None
def compute(self, EV):
try:
self.val = DRAM_BW_Use(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "DRAM_BW_Use zero division")
desc = """
Average external Memory Bandwidth Use for reads and writes
[GB / sec]"""
class Metric_MEM_Read_Latency:
name = "MEM_Read_Latency"
domain = "NanoSeconds"
maxval = 1000
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Mem', 'MemoryLat', 'SoC'])
sibling = None
def compute(self, EV):
try:
self.val = MEM_Read_Latency(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "MEM_Read_Latency zero division")
desc = """
Average latency of data read request to external memory (in
nanoseconds). Accounts for demand loads and L1/L2
prefetches."""
class Metric_MEM_Parallel_Reads:
name = "MEM_Parallel_Reads"
domain = "SystemMetric"
maxval = 100
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Mem', 'MemoryBW', 'SoC'])
sibling = None
def compute(self, EV):
try:
self.val = MEM_Parallel_Reads(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "MEM_Parallel_Reads zero division")
desc = """
Average number of parallel data read requests to external
memory. Accounts for demand loads and L1/L2 prefetches"""
class Metric_Time:
name = "Time"
domain = "Seconds"
maxval = 0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Summary'])
sibling = None
def compute(self, EV):
try:
self.val = Time(self, EV, 0)
self.thresh = (self.val < 1)
except ZeroDivisionError:
handle_error_metric(self, "Time zero division")
desc = """
Run duration time in seconds"""
class Metric_Socket_CLKS:
name = "Socket_CLKS"
domain = "Count"
maxval = 0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['SoC'])
sibling = None
def compute(self, EV):
try:
self.val = Socket_CLKS(self, EV, 0)
self.thresh = True
except ZeroDivisionError:
handle_error_metric(self, "Socket_CLKS zero division")
desc = """
Socket actual clocks when any core is active on that socket"""
class Metric_IpFarBranch:
name = "IpFarBranch"
domain = "Inst_Metric"
maxval = 0
errcount = 0
area = "Info.System"
metricgroup = frozenset(['Branches', 'OS'])
sibling = None
def compute(self, EV):
try:
self.val = IpFarBranch(self, EV, 0)
self.thresh = (self.val < 1000000)
except ZeroDivisionError:
handle_error_metric(self, "IpFarBranch zero division")
desc = """
Instructions per Far Branch ( Far Branches apply upon
transition from application to operating system, handling
interrupts, exceptions) [lower number means higher
occurrence rate]"""
# Schedule
class Setup:
def __init__(self, r):
o = dict()
n = Frontend_Bound() ; r.run(n) ; o["Frontend_Bound"] = n
n = Fetch_Latency() ; r.run(n) ; o["Fetch_Latency"] = n
n = ITLB_Misses() ; r.run(n) ; o["ITLB_Misses"] = n
n = Branch_Resteers() ; r.run(n) ; o["Branch_Resteers"] = n
n = MS_Switches() ; r.run(n) ; o["MS_Switches"] = n
n = LCP() ; r.run(n) ; o["LCP"] = n
n = DSB_Switches() ; r.run(n) ; o["DSB_Switches"] = n
n = Fetch_Bandwidth() ; r.run(n) ; o["Fetch_Bandwidth"] = n
n = Bad_Speculation() ; r.run(n) ; o["Bad_Speculation"] = n
n = Branch_Mispredicts() ; r.run(n) ; o["Branch_Mispredicts"] = n
n = Machine_Clears() ; r.run(n) ; o["Machine_Clears"] = n
n = Backend_Bound() ; r.run(n) ; o["Backend_Bound"] = n
n = Memory_Bound() ; r.run(n) ; o["Memory_Bound"] = n
n = DTLB_Load() ; r.run(n) ; o["DTLB_Load"] = n
n = L3_Bound() ; r.run(n) ; o["L3_Bound"] = n
n = DRAM_Bound() ; r.run(n) ; o["DRAM_Bound"] = n
n = MEM_Bandwidth() ; r.run(n) ; o["MEM_Bandwidth"] = n
n = MEM_Latency() ; r.run(n) ; o["MEM_Latency"] = n
n = Store_Bound() ; r.run(n) ; o["Store_Bound"] = n
n = Core_Bound() ; r.run(n) ; o["Core_Bound"] = n
n = Divider() ; r.run(n) ; o["Divider"] = n
n = Ports_Utilization() ; r.run(n) ; o["Ports_Utilization"] = n
n = Retiring() ; r.run(n) ; o["Retiring"] = n
n = Light_Operations() ; r.run(n) ; o["Light_Operations"] = n
n = FP_Arith() ; r.run(n) ; o["FP_Arith"] = n
n = X87_Use() ; r.run(n) ; o["X87_Use"] = n
n = FP_Scalar() ; r.run(n) ; o["FP_Scalar"] = n
n = FP_Vector() ; r.run(n) ; o["FP_Vector"] = n
n = FP_Vector_128b() ; r.run(n) ; o["FP_Vector_128b"] = n
n = FP_Vector_256b() ; r.run(n) ; o["FP_Vector_256b"] = n
n = Heavy_Operations() ; r.run(n) ; o["Heavy_Operations"] = n
n = Microcode_Sequencer() ; r.run(n) ; o["Microcode_Sequencer"] = n
# parents
o["Fetch_Latency"].parent = o["Frontend_Bound"]
o["ITLB_Misses"].parent = o["Fetch_Latency"]
o["Branch_Resteers"].parent = o["Fetch_Latency"]
o["MS_Switches"].parent = o["Fetch_Latency"]
o["LCP"].parent = o["Fetch_Latency"]
o["DSB_Switches"].parent = o["Fetch_Latency"]
o["Fetch_Bandwidth"].parent = o["Frontend_Bound"]
o["Branch_Mispredicts"].parent = o["Bad_Speculation"]
o["Machine_Clears"].parent = o["Bad_Speculation"]
o["Memory_Bound"].parent = o["Backend_Bound"]
o["DTLB_Load"].parent = o["Memory_Bound"]
o["L3_Bound"].parent = o["Memory_Bound"]
o["DRAM_Bound"].parent = o["Memory_Bound"]
o["MEM_Bandwidth"].parent = o["DRAM_Bound"]
o["MEM_Latency"].parent = o["DRAM_Bound"]
o["Store_Bound"].parent = o["Memory_Bound"]
o["Core_Bound"].parent = o["Backend_Bound"]
o["Divider"].parent = o["Core_Bound"]
o["Ports_Utilization"].parent = o["Core_Bound"]
o["Light_Operations"].parent = o["Retiring"]
o["FP_Arith"].parent = o["Light_Operations"]
o["X87_Use"].parent = o["FP_Arith"]
o["FP_Scalar"].parent = o["FP_Arith"]
o["FP_Vector"].parent = o["FP_Arith"]
o["FP_Vector_128b"].parent = o["FP_Vector"]
o["FP_Vector_256b"].parent = o["FP_Vector"]
o["Heavy_Operations"].parent = o["Retiring"]
o["Microcode_Sequencer"].parent = o["Heavy_Operations"]
# user visible metrics
n = Metric_IPC() ; r.metric(n) ; o["IPC"] = n
n = Metric_UopPI() ; r.metric(n) ; o["UopPI"] = n
n = Metric_CPI() ; r.metric(n) ; o["CPI"] = n
n = Metric_CLKS() ; r.metric(n) ; o["CLKS"] = n
n = Metric_SLOTS() ; r.metric(n) ; o["SLOTS"] = n
n = Metric_Execute_per_Issue() ; r.metric(n) ; o["Execute_per_Issue"] = n
n = Metric_CoreIPC() ; r.metric(n) ; o["CoreIPC"] = n
n = Metric_FLOPc() ; r.metric(n) ; o["FLOPc"] = n
n = Metric_ILP() ; r.metric(n) ; o["ILP"] = n
n = Metric_CORE_CLKS() ; r.metric(n) ; o["CORE_CLKS"] = n
n = Metric_Instructions() ; r.metric(n) ; o["Instructions"] = n
n = Metric_Retire() ; r.metric(n) ; o["Retire"] = n
n = Metric_DSB_Coverage() ; r.metric(n) ; o["DSB_Coverage"] = n
n = Metric_CPU_Utilization() ; r.metric(n) ; o["CPU_Utilization"] = n
n = Metric_CPUs_Utilized() ; r.metric(n) ; o["CPUs_Utilized"] = n
n = Metric_Core_Frequency() ; r.metric(n) ; o["Core_Frequency"] = n
n = Metric_Uncore_Frequency() ; r.metric(n) ; o["Uncore_Frequency"] = n
n = Metric_GFLOPs() ; r.metric(n) ; o["GFLOPs"] = n
n = Metric_Turbo_Utilization() ; r.metric(n) ; o["Turbo_Utilization"] = n
n = Metric_SMT_2T_Utilization() ; r.metric(n) ; o["SMT_2T_Utilization"] = n
n = Metric_Kernel_Utilization() ; r.metric(n) ; o["Kernel_Utilization"] = n
n = Metric_Kernel_CPI() ; r.metric(n) ; o["Kernel_CPI"] = n
n = Metric_DRAM_BW_Use() ; r.metric(n) ; o["DRAM_BW_Use"] = n
n = Metric_MEM_Read_Latency() ; r.metric(n) ; o["MEM_Read_Latency"] = n
n = Metric_MEM_Parallel_Reads() ; r.metric(n) ; o["MEM_Parallel_Reads"] = n
n = Metric_Time() ; r.metric(n) ; o["Time"] = n
n = Metric_Socket_CLKS() ; r.metric(n) ; o["Socket_CLKS"] = n
n = Metric_IpFarBranch() ; r.metric(n) ; o["IpFarBranch"] = n
# references between groups
o["Fetch_Bandwidth"].Frontend_Bound = o["Frontend_Bound"]
o["Fetch_Bandwidth"].Fetch_Latency = o["Fetch_Latency"]
o["Branch_Mispredicts"].Bad_Speculation = o["Bad_Speculation"]
o["Machine_Clears"].Bad_Speculation = o["Bad_Speculation"]
o["Machine_Clears"].Branch_Mispredicts = o["Branch_Mispredicts"]
o["Backend_Bound"].Retiring = o["Retiring"]
o["Backend_Bound"].Bad_Speculation = o["Bad_Speculation"]
o["Backend_Bound"].Frontend_Bound = o["Frontend_Bound"]
o["Memory_Bound"].Retiring = o["Retiring"]
o["Memory_Bound"].Bad_Speculation = o["Bad_Speculation"]
o["Memory_Bound"].Frontend_Bound = o["Frontend_Bound"]
o["Memory_Bound"].Backend_Bound = o["Backend_Bound"]
o["Memory_Bound"].Fetch_Latency = o["Fetch_Latency"]
o["MEM_Latency"].MEM_Bandwidth = o["MEM_Bandwidth"]
o["Core_Bound"].Retiring = o["Retiring"]
o["Core_Bound"].Frontend_Bound = o["Frontend_Bound"]
o["Core_Bound"].Memory_Bound = o["Memory_Bound"]
o["Core_Bound"].Backend_Bound = o["Backend_Bound"]
o["Core_Bound"].Bad_Speculation = o["Bad_Speculation"]
o["Core_Bound"].Fetch_Latency = o["Fetch_Latency"]
o["Ports_Utilization"].Fetch_Latency = o["Fetch_Latency"]
o["Retiring"].Heavy_Operations = o["Heavy_Operations"]
o["Light_Operations"].Retiring = o["Retiring"]
o["Light_Operations"].Heavy_Operations = o["Heavy_Operations"]
o["Light_Operations"].Microcode_Sequencer = o["Microcode_Sequencer"]
o["FP_Arith"].FP_Scalar = o["FP_Scalar"]
o["FP_Arith"].X87_Use = o["X87_Use"]
o["FP_Arith"].FP_Vector = o["FP_Vector"]
o["Heavy_Operations"].Microcode_Sequencer = o["Microcode_Sequencer"]
# siblings cross-tree
o["MS_Switches"].sibling = (o["Machine_Clears"], o["Microcode_Sequencer"],)
o["LCP"].sibling = (o["DSB_Switches"], o["Fetch_Bandwidth"],)
o["DSB_Switches"].sibling = (o["LCP"], o["Fetch_Bandwidth"],)
o["Fetch_Bandwidth"].sibling = (o["LCP"], o["DSB_Switches"],)
o["Machine_Clears"].sibling = (o["MS_Switches"], o["Microcode_Sequencer"],)
o["FP_Scalar"].sibling = (o["FP_Vector"], o["FP_Vector_128b"], o["FP_Vector_256b"],)
o["FP_Vector"].sibling = (o["FP_Scalar"], o["FP_Vector_128b"], o["FP_Vector_256b"],)
o["FP_Vector_128b"].sibling = (o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_256b"],)
o["FP_Vector_256b"].sibling = (o["FP_Scalar"], o["FP_Vector"], o["FP_Vector_128b"],)
o["Microcode_Sequencer"].sibling = (o["MS_Switches"], o["Machine_Clears"],)
o["DSB_Coverage"].sibling = (o["LCP"], o["DSB_Switches"], o["Fetch_Bandwidth"],)
o["DRAM_BW_Use"].sibling = (o["MEM_Bandwidth"],)
| 67,078 | Python | .py | 1,671 | 34.648713 | 306 | 0.666247 | andikleen/pmu-tools | 1,984 | 331 | 178 | GPL-2.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,896 | interval-normalize.py | andikleen_pmu-tools/interval-normalize.py | #!/usr/bin/env python3
# convert perf stat -Ixxx -x, / toplev -Ixxx -x, output to normalized output
# this version buffers all data in memory, so it can use a lot of memory.
# t1,ev1,num1
# t1,ev2,num1
# t2,ev1,num3
# ->
# timestamp,ev1,ev2
# t1,num1,num2
# t2,num3,,
# when the input has CPU generate separate lines for each CPU (may need post filtering)
from __future__ import print_function
import sys
import csv
import argparse
import collections
import csv_formats
ap = argparse.ArgumentParser(description=
'Normalize CSV data from perf or toplev. All values are printed on a single line.')
ap.add_argument('inputfile', type=argparse.FileType('r'), default=sys.stdin, nargs='?')
ap.add_argument('--output', '-o', type=argparse.FileType('w'), default=sys.stdout, nargs='?')
ap.add_argument('--cpu', nargs='?', help='Only output for this cpu')
ap.add_argument('--na', nargs='?', help='Value to use if data is not available', default="")
ap.add_argument('--error-exit', action='store_true', help='Force error exit on parse error')
ap.add_argument('--normalize-cpu', action='store_true', help='Normalize CPUs into unique columns too')
args = ap.parse_args()
printed_header = False
timestamp = None
events = collections.OrderedDict()
out = []
times = []
cpus = []
rc = csv.reader(args.inputfile)
res = []
writer = csv.writer(args.output, lineterminator='\n')
lastcpu = None
cpu = None
lineno = 1
for row in rc:
if len(row) > 0 and (row[0] == "Timestamp" or row[0].startswith("#")):
lineno += 1
continue
r = csv_formats.parse_csv_row(row, error_exit=args.error_exit)
if r is None:
print("at line %d" % lineno, file=sys.stderr)
lineno += 1
continue
ts, cpu, ev, val = r.ts, r.cpu, r.ev, r.val
if ts != timestamp or (cpu != lastcpu and not args.normalize_cpu):
if timestamp:
if args.cpu and cpu != args.cpu:
continue
# delay in case we didn't see all headers
# only need to do that for toplev, directly output for perf?
# could limit buffering to save memory?
out.append(res)
times.append(timestamp)
cpus.append(cpu)
res = []
timestamp = ts
lastcpu = cpu
if cpu is not None and args.normalize_cpu:
ev = cpu + " " + ev
# use a list for row storage to keep memory requirements down
if ev not in events:
events[ev] = len(res)
ind = events[ev]
if ind >= len(res):
res += [None] * ((ind + 1) - len(res))
res[ind] = val
lineno += 1
if res and not (args.cpu and cpu != args.cpu):
out.append(res)
times.append(timestamp)
cpus.append(cpu)
def resolve(row, ind):
if ind >= len(row):
return args.na
v = row[ind]
if v is None:
return args.na
return v
def cpulist():
if args.normalize_cpu:
return []
if cpu is not None:
return ["CPU"]
return []
keys = events.keys()
writer.writerow(["Timestamp"] + cpulist() + list(keys))
for row, ts, cpunum in zip(out, times, cpus):
writer.writerow([ts] +
([cpunum] if (cpu is not None and not args.normalize_cpu) else []) +
([resolve(row, events[x]) for x in keys]))
| 3,252 | Python | .py | 94 | 29.712766 | 102 | 0.639568 | andikleen/pmu-tools | 1,984 | 331 | 178 | GPL-2.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,897 | tsx_metrics.py | andikleen_pmu-tools/tsx_metrics.py | #
# TSX metrics
#
# XXX force all these into a single group
# XXX: force % in caller
import os
def TXCycles(EV, level):
return EV("cpu/cycles-t/", level) / EV("cycles", level)
class TransactionalCycles:
name = "Transactional cycles"
desc = """
Percent cycles spent in a transaction. When low or zero either the program
does not use locks (or other transactions), or the locks are not enabled with lock elision."""
subplot = "TSX"
unit = "%"
sample = ["mem_uops_retired.lock_loads"]
server = True
def compute(self, EV):
try:
self.val = TXCycles(EV, 1) * 100.
self.thresh = (self.val >= 0.01)
except ZeroDivisionError:
self.val = 0
self.thresh = False
class AbortedCycles:
name = "Aborted cycles"
desc = """
Percent cycles wasted in transaction aborts. When a significant part of the transactional cycles
start sampling for abort causes."""
subplot = "TSX"
unit = "%"
sample = ["cpu/tx-abort/pp", "cpu/hle-abort/pp"]
server = True
def compute(self, EV):
try:
self.val = ((EV("cpu/cycles-t/", 1) - EV("cpu/cycles-ct/", 1)) / EV("cycles", 1)) * 100.
self.thresh = (self.val >= 0.01)
except ZeroDivisionError:
self.val = 0
self.thresh = False
class AverageRTM:
name = "Average RTM transaction length"
desc = """
Average RTM transaction length. Assumes most transactions are RTM.
When low consider increasing the size of the critical sections to lower overhead."""
subplot = "TSX Latencies"
unit = "cycles"
server = True
def compute(self, EV):
try:
self.val = EV("cpu/cycles-t/", 1) / EV("RTM_RETIRED.START", 1)
self.thresh = TXCycles(EV, 1) >= 0.01 and self.val > 0
except ZeroDivisionError:
self.val = 0
self.thresh = False
class AverageHLE:
name = "Average HLE transaction length"
desc = """
Average HLE transaction length. Assumes most transactions are HLE.
When low consider increasing the size of the critical sections to lower overhead."""
subplot = "TSX Latencies"
unit = "cycles"
def compute(self, EV):
try:
self.val = EV("cpu/cycles-t/", 1) / EV("HLE_RETIRED.START", 1)
self.thresh = TXCycles(EV, 1) >= 0.01 and self.val > 0
except ZeroDivisionError:
self.val = 0
self.thresh = False
class Setup:
def __init__(self, r):
# XXX allow override
if os.path.exists("/sys/devices/cpu/events/cycles-t"):
r.force_metric(TransactionalCycles())
r.force_metric(AbortedCycles())
r.force_metric(AverageRTM())
#r.force_metric(AverageHLE())
| 2,765 | Python | .py | 77 | 29.246753 | 100 | 0.622015 | andikleen/pmu-tools | 1,984 | 331 | 178 | GPL-2.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,898 | genretlat.py | andikleen_pmu-tools/genretlat.py | #!/usr/bin/env python3
# -*- coding: utf-8
# generate return latencies to tune toplev model
# Copyright (c) 2023, Intel Corporation
# Author: Andi Kleen
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
from __future__ import print_function
import subprocess as subp
import os
import sys
import ocperf
import json
import argparse
import random
from dummyarith import DummyArith
from collections import Counter, defaultdict
from copy import copy
import csv
def cmd(args, l):
if args.verbose:
print(" ".join(l))
return l
class SamplePerfRun(object):
"""Run perf record to collect Timed PEBS information and generate averages for event weights."""
def __init__(self, args):
self.pi = self.pr = self.ps = None
self.args = args
def execute(self, perf, evsamples, origevents, pargs):
self.perf = perf
self.rmap = { x: y for x, y in zip(evsamples, origevents) }
pr = subp.Popen(cmd(self.args, [perf, "record",
"-W",
"-B",
"-c", "%d" % self.args.interval,
"-e", ",".join(evsamples),
"-o", "-"] + pargs),
stdout=subp.PIPE)
pi = subp.Popen(cmd(self.args, [perf, "inject"]),
stdin=pr.stdout,
stdout=subp.PIPE)
ps = subp.Popen(cmd(self.args, [perf, "script",
"-i", "-",
"--ns",
"-F", "time,retire_lat,event"]),
stdin=pi.stdout,
stdout=subp.PIPE)
pi.stdout.close()
pr.stdout.close()
self.pr, self.pi, self.ps = pr, pi, ps
def handle_samples(self):
for l in self.ps.stdout:
l = l.decode()
n = l.split()
ts, event, weight = float(n[0].replace(":","")), n[1].replace(":",""), int(n[2])
if self.args.csvplot:
self.args.csvplot.writerow([ts, self.rmap[event] if event in self.rmap else event, weight])
if self.args.fake:
yield event, random.randint(0, 100)
else:
yield event, weight
self.pr.wait()
self.pi.wait()
self.ps.wait()
NUM_SPARKS = 8
SPARK_SHIFT = 8
def spark_f(l):
min_ = min(l)
f = int(max(((max(l) - min_) << SPARK_SHIFT) / (NUM_SPARKS - 1), 1))
return f, min_
# this samples unlike normal spark lines
def gen_spark_buckets(l):
if len(l) == 0:
return [], 0, 1
f, min_ = spark_f(l)
return [((x - min_) << SPARK_SHIFT) / f for x in random.sample(l, min(NUM_SPARKS, len(l)))], min_, f
def lookup(s, i, fb):
if 0 <= i <= len(s) - 1:
return s[i]
return fb
def gen_spark(buckets, min_, f):
return "".join([lookup("▁▂▃▄▅▆▇█", int((int((x - min_)) << SPARK_SHIFT) / f), " ") for x in buckets])
def gen_stat(samples):
# {
# "COUNT": 5358917,
# "MIN": 0,
# "MAX": 65535,
# "MEAN": 3.23,
# "MEDIAN": 0,
# "NZ_MEDIAN": 1,
# "MODE": 0,
# "MODE_COUNT": 3631698,
# "NZ_MODE": 1,
# "NZ_MODE_COUNT": 1213029,
# "BUCKETS": 2344
# },
nz = [x for x in samples if x != 0.0]
buckets = Counter(samples)
nz_buckets = copy(buckets)
if 0 in nz_buckets:
del nz_buckets[0]
spark, min_, f = gen_spark_buckets(samples)
spark_nz, min_nz, f_nz = gen_spark_buckets(nz)
return {
"COUNT": len(samples),
"MIN": min(samples),
"MAX": max(samples),
"MEAN": round(float(sum(samples)) / len(samples), 2),
"MEDIAN": sorted(samples)[len(samples)//2] if len(samples) > 0 else 0.0,
"NZ_MEDIAN": sorted(nz)[len(nz)//2] if len(nz) > 0 else 0.0,
"MODE": buckets.most_common(1)[0][0] if len(buckets) > 0 else 0.0,
"MODE_COUNT": buckets.most_common(1)[0][1] if len(buckets) > 0 else 0,
"NZ_MODE": nz_buckets.most_common(1)[0][0] if len(nz_buckets) > 0 else 0,
"NZ_MODE_COUNT": nz_buckets.most_common(1)[0][1] if len(nz_buckets) > 0 else 0,
"BUCKETS": len(buckets),
"F": f,
"F_NZ": f_nz,
"MIN_NZ": min_nz,
"SPARK_BUCKETS": ",".join(["%d" % x for x in spark]),
"SPARK_BUCKETS_NZ": ",".join(["%d" % x for x in spark_nz]),
}
def human_output(data):
d = data["Data"]
for ev in sorted(d.keys()):
print("%s: " % ev, end="")
for m in sorted(d[ev].keys()):
if m.startswith("SPARK"):
if d[ev][m] == "":
continue
l = [(int(x) if x.isdecimal() else 0) for x in d[ev][m].split(",")]
s = gen_spark(l, d[ev]["MIN"], d[ev]["F_NZ" if m.endswith("_NZ") else "F"])
print("%s %s " % (m.lower(), s), end="")
else:
print("%s %s " % (m.lower(), d[ev][m]), end="")
print()
def get_model_number():
with open("/proc/cpuinfo") as f:
for l in f:
n = l.split()
if len(n) >= 3 and n[0] == "model" and n[1] == ":":
return int(n[2])
return 0
def find_model(args):
if not args.cpu:
cpu = open("/sys/devices/cpu/caps/pmu_name").read().strip()
if cpu == "meteorlake_hybrid":
args.cpu = "mtl"
elif cpu == "sapphire_rapids":
model = get_model_number()
if model == 0xad or model == 0xae:
args.cpu = "gnr"
else:
sys.exit("Unsupported CPU %d" % model)
elif cpu == "granite_rapids":
args.cpu = "gnr"
else:
sys.exit("Unsupported CPU %s" % cpu)
if args.cpu == "mtl":
import mtl_rwc_ratios
return mtl_rwc_ratios
elif args.cpu == "gnr":
import gnr_server_ratios
return gnr_server_ratios
sys.exit("Unknown cpu %s" % args.cpu)
def gen_events(args):
model = find_model(args)
nodes = []
class Runner(object):
def metric(self, n):
nodes.append(n)
n.thresh = True
def run(self, n):
nodes.append(n)
n.thresh = True
model.Setup(Runner())
events = set()
def collect(name, level):
if level == 999:
events.add(name + ":pp")
return DummyArith()
for n in nodes:
n.compute(collect)
return sorted(events)
def getevent(emap, e):
ev = emap.getevent(e)
if ev is None:
print(e, "not found")
return "dummy"
return ev.output()
def init_args():
ap = argparse.ArgumentParser('Generate JSON of retirement latencies to tune toplev')
ap.add_argument('--output', '-o', type=argparse.FileType('w'), default=sys.stdout,
help="")
ap.add_argument('--verbose', '-v', action='store_true', help='be verbose')
ap.add_argument('--fake', action='store_true', help=argparse.SUPPRESS)
ap.add_argument('--interval', '-i', type=int, default=103, help="Interval for sampling")
ap.add_argument('--pmu', '-p', nargs='*', default=["cpu", "cpu_core"], help="for which PMUs to collect")
ap.add_argument('--quiet', '-q', action='store_true')
ap.add_argument('--csv', '-c', type=argparse.FileType('w'), help="Generate CSV file with pushout latencies", default=None)
ap.add_argument('--cpu', help="Set CPU type (gnr, mtl)")
args, rest = ap.parse_known_args()
if args.csv:
args.csvplot = csv.writer(args.csv)
else:
args.csvplot = None
return args, rest
def main():
args, rest = init_args()
events = gen_events(args)
pmus = ocperf.find_pmus()
emap = ocperf.find_emap(pmu=[x for x in pmus if x != "cpu_atom"][0])
if emap is None:
sys.exit("Cannot find json event list")
pevents = [getevent(emap, e) for e in events]
if args.verbose:
print(events)
s = SamplePerfRun(args)
perf = os.getenv("PERF")
if perf is None:
perf = "perf"
s.execute(perf, pevents, events, rest)
samples = defaultdict(list)
for ev, weight in s.handle_samples():
samples[ev].append(weight)
data = { "Data": { ev.upper().replace("CPU_CORE","").replace("/","").replace(":","").replace("RETIRED_", "RETIRED."): gen_stat(s)
for ev, s in samples.items()
if "/" not in ev or any([x in ev for x in args.pmu]) } }
json.dump(data, args.output, indent=2, sort_keys=True)
if not args.quiet:
human_output(data)
if __name__ == '__main__':
main()
| 9,118 | Python | .py | 239 | 29.485356 | 133 | 0.541629 | andikleen/pmu-tools | 1,984 | 331 | 178 | GPL-2.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,899 | simple_ratios.py | andikleen_pmu-tools/simple_ratios.py | #
# Simple 5 event top level model
#
print_error = lambda msg: False
version = "1.0"
check_event = lambda ev: False
# Constants
PipelineWidth = 4
uops_retired_slots = "UOPS_RETIRED.RETIRE_SLOTS"
def CLKS(EV):
return EV("CPU_CLK_UNHALTED.THREAD", 1)
def SLOTS(EV):
return PipelineWidth * CLKS(EV)
# Instructions Per Cycle
def IPC(EV, level):
return EV("INST_RETIRED.ANY", level) / CLKS(EV)
# Uops Per Instruction
def UPI(EV, level):
return EV(uops_retired_slots, level) / EV("INST_RETIRED.ANY", level)
# Average Frequency Utilization relative nominal frequency
def TurboUtilization(EV, level):
return CLKS(EV) / EV("CPU_CLK_UNHALTED.REF_TSC", level)
class FrontendBound:
name = "Frontend Bound"
domain = "Slots"
desc = """
This category reflects slots where the Frontend of the processor undersupplies
its Backend."""
level = 1
def compute(self, EV):
try:
self.val = EV("IDQ_UOPS_NOT_DELIVERED.CORE", 1) / SLOTS(EV)
self.thresh = self.val > 0.2
except ZeroDivisionError:
self.val = 0
self.thresh = False
return self.val
class BadSpeculation:
name = "Bad Speculation"
domain = "Slots"
desc = """
This category reflects slots wasted due to incorrect speculations, which
include slots used to allocate uops that do not eventually get retired and
slots for which allocation was blocked due to recovery from earlier incorrect
speculation. For example, wasted work due to miss-predicted branches is
categorized under the Bad Speculation category"""
level = 1
def compute(self, EV):
try:
self.val = ( EV("UOPS_ISSUED.ANY", 1) - EV(uops_retired_slots, 1) + PipelineWidth * EV("INT_MISC.RECOVERY_CYCLES", 1) ) / SLOTS(EV)
self.thresh = self.val > 0.1
except ZeroDivisionError:
self.val = 0
self.thresh = False
return self.val
class BackendBound:
name = "Backend Bound"
domain = "Slots"
desc = """
This category reflects slots where no uops are being delivered due to a lack
of required resources for accepting more uops in the Backend of the pipeline."""
level = 1
def compute(self, EV):
try:
self.val = 1 - ( self.FrontendBound.compute(EV) + self.BadSpeculation.compute(EV) + self.Retiring.compute(EV) )
self.thresh = self.val > 0.2
except ZeroDivisionError:
self.val = 0
self.thresh = False
return self.val
class Retiring:
name = "Retiring"
domain = "Slots"
desc = """
This category reflects slots utilized by good uops i.e. allocated uops that
eventually get retired."""
level = 1
def compute(self, EV):
try:
self.val = EV(uops_retired_slots, 1) / SLOTS(EV)
self.thresh = self.val > 0.7
except ZeroDivisionError:
self.val = 0
self.thresh = False
return self.val
class Metric_IPC:
name = "IPC"
desc = """
Instructions Per Cycle"""
errcount = 0
def compute(self, EV):
try:
self.val = IPC(EV, 0)
except ZeroDivisionError:
print_error("IPC zero division")
self.errcount += 1
self.val = 0
class Metric_UPI:
name = "UPI"
desc = """
Uops Per Instruction"""
errcount = 0
def compute(self, EV):
try:
self.val = UPI(EV, 0)
except ZeroDivisionError:
print_error("UPI zero division")
self.errcount += 1
self.val = 0
class Metric_TurboUtilization:
name = "TurboUtilization"
desc = """
Average Frequency Utilization relative nominal frequency"""
errcount = 0
def compute(self, EV):
try:
self.val = TurboUtilization(EV, 0)
except ZeroDivisionError:
print_error("TurboUtilization zero division")
self.errcount += 1
self.val = 0
class Setup:
def __init__(self, r):
if check_event("UOPS_RETIRED.SLOTS"):
global uops_retired_slots
uops_retired_slots = "UOPS_RETIRED.SLOTS"
o = dict()
n = FrontendBound() ; r.run(n)
o["FrontendBound"] = n
n = BadSpeculation() ; r.run(n)
o["BadSpeculation"] = n
n = BackendBound() ; r.run(n)
o["BackendBound"] = n
n = Retiring() ; r.run(n)
o["Retiring"] = n
o["FrontendBound"].parent = None
o["BadSpeculation"].parent = None
o["BackendBound"].parent = None
o["Retiring"].parent = None
o["BackendBound"].FrontendBound = o["FrontendBound"]
o["BackendBound"].BadSpeculation = o["BadSpeculation"]
o["BackendBound"].Retiring = o["Retiring"]
o["FrontendBound"].sibling = None
o["BadSpeculation"].sibling = None
o["BackendBound"].sibling = None
o["Retiring"].sibling = None
o["FrontendBound"].sample = []
o["BadSpeculation"].sample = []
o["BackendBound"].sample = []
o["Retiring"].sample = []
# user visible metrics
n = Metric_IPC() ; r.metric(n)
n = Metric_UPI() ; r.metric(n)
n = Metric_TurboUtilization() ; r.metric(n)
| 5,235 | Python | .py | 154 | 27.058442 | 143 | 0.620574 | andikleen/pmu-tools | 1,984 | 331 | 178 | GPL-2.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |