content
stringlengths 7
1.05M
| fixed_cases
stringlengths 1
1.28M
|
|---|---|
questions = open('youtube_chat.txt', 'r').readlines()
with open('question_dataset.txt', 'w+') as file:
for s in set(questions):
print(s.rstrip()[1:-1], file=file)
|
questions = open('youtube_chat.txt', 'r').readlines()
with open('question_dataset.txt', 'w+') as file:
for s in set(questions):
print(s.rstrip()[1:-1], file=file)
|
A = 'avalue'
B = {
'key' : 'value'
}
C = ['array']
|
a = 'avalue'
b = {'key': 'value'}
c = ['array']
|
CARGO = "Cargo"
COMPOSER = "Composer"
GO = "Go"
MAVEN = "Maven"
NPM = "npm"
NUGET = "NuGet"
PYPI = PIP = "pip"
RUBYGEMS = "RubyGems"
ecosystems = [CARGO, COMPOSER, GO, MAVEN, NPM, NUGET, PYPI, RUBYGEMS]
|
cargo = 'Cargo'
composer = 'Composer'
go = 'Go'
maven = 'Maven'
npm = 'npm'
nuget = 'NuGet'
pypi = pip = 'pip'
rubygems = 'RubyGems'
ecosystems = [CARGO, COMPOSER, GO, MAVEN, NPM, NUGET, PYPI, RUBYGEMS]
|
# CPU: 0.08 s
n_villagers = int(input())
villagers = {key: set() for key in range(1, n_villagers + 1)}
song_counter = 0
for _ in range(int(input())):
_, *participants = map(int, input().split())
if 1 in participants:
song_counter += 1
for participant in participants:
villagers[participant].add(song_counter)
else:
for participant in participants:
for song in villagers[participant]:
for participant in participants:
villagers[participant].add(song)
for villager, songs in villagers.items():
if len(songs) == song_counter:
print(villager)
|
n_villagers = int(input())
villagers = {key: set() for key in range(1, n_villagers + 1)}
song_counter = 0
for _ in range(int(input())):
(_, *participants) = map(int, input().split())
if 1 in participants:
song_counter += 1
for participant in participants:
villagers[participant].add(song_counter)
else:
for participant in participants:
for song in villagers[participant]:
for participant in participants:
villagers[participant].add(song)
for (villager, songs) in villagers.items():
if len(songs) == song_counter:
print(villager)
|
# Given an n x n array, return the array elements arranged from outermost elements to the middle element, traveling clockwise.
def snail(array):
snail_array = []
while len(array) > 0:
snail_array.extend(array.pop(0))
length_array = len(array)
for i in range(length_array):
adder = array[i].pop(-1)
snail_array.append(adder)
if length_array > 0:
array[-1].reverse()
snail_array.extend(array.pop(-1))
length_array = len(array)
for i in range(length_array -1, -1, -1):
adder = array[i].pop(0)
snail_array.append(adder)
return snail_array
|
def snail(array):
snail_array = []
while len(array) > 0:
snail_array.extend(array.pop(0))
length_array = len(array)
for i in range(length_array):
adder = array[i].pop(-1)
snail_array.append(adder)
if length_array > 0:
array[-1].reverse()
snail_array.extend(array.pop(-1))
length_array = len(array)
for i in range(length_array - 1, -1, -1):
adder = array[i].pop(0)
snail_array.append(adder)
return snail_array
|
class NodeRegistry:
def __init__(self):
self.nodes = set()
def register(self, *nodes: List[Type[Node]]):
self.nodes.update(nodes)
def pipeline_factory(self, pipeline_spec):
"""Construct a pipeline according to the spec.
"""
...
@staticmethod
def _port_to_tuple(port: Port):
return (
None,
inspect.cleandoc(port.help) if port.help else None
)
@staticmethod
def _parse_docstr(obj):
try:
return docstring_parser.parse(obj.__doc__)
except:
print("Error parsing docstring of {}".format(obj.__name__))
raise
@staticmethod
def _parse_arguments(node_cls: Type[Node]):
# Use type annotations to determine the type.
# Use the docstring for each argument.
annotations = node_cls.__init__.__annotations__
# Get docstring for each argument
arg_desc = {
p.arg_name: p.description
for p in NodeRegistry._parse_docstr(node_cls).params
}
arg_desc.update({
p.arg_name: p.description
for p in NodeRegistry._parse_docstr(node_cls.__init__).params
})
return {
k: (annotations[k], arg_desc[k])
for k in annotations.keys() & arg_desc.keys()
}
@classmethod
def _node_to_dict(cls, node_cls: Type[Node]):
doc = cls._parse_docstr(node_cls)
return {
"name": node_cls.__name__,
"short_description": doc.short_description,
"long_description": doc.long_description,
"inputs": {p.name: cls._port_to_tuple(p) for p in getattr(node_cls, "inputs", [])},
"outputs": {p.name: cls._port_to_tuple(p) for p in getattr(node_cls, "outputs", [])},
"options": cls._parse_arguments(node_cls),
}
def to_dict(self) -> dict:
return {
n.__name__: self._node_to_dict(n) for n in self.nodes
}
|
class Noderegistry:
def __init__(self):
self.nodes = set()
def register(self, *nodes: List[Type[Node]]):
self.nodes.update(nodes)
def pipeline_factory(self, pipeline_spec):
"""Construct a pipeline according to the spec.
"""
...
@staticmethod
def _port_to_tuple(port: Port):
return (None, inspect.cleandoc(port.help) if port.help else None)
@staticmethod
def _parse_docstr(obj):
try:
return docstring_parser.parse(obj.__doc__)
except:
print('Error parsing docstring of {}'.format(obj.__name__))
raise
@staticmethod
def _parse_arguments(node_cls: Type[Node]):
annotations = node_cls.__init__.__annotations__
arg_desc = {p.arg_name: p.description for p in NodeRegistry._parse_docstr(node_cls).params}
arg_desc.update({p.arg_name: p.description for p in NodeRegistry._parse_docstr(node_cls.__init__).params})
return {k: (annotations[k], arg_desc[k]) for k in annotations.keys() & arg_desc.keys()}
@classmethod
def _node_to_dict(cls, node_cls: Type[Node]):
doc = cls._parse_docstr(node_cls)
return {'name': node_cls.__name__, 'short_description': doc.short_description, 'long_description': doc.long_description, 'inputs': {p.name: cls._port_to_tuple(p) for p in getattr(node_cls, 'inputs', [])}, 'outputs': {p.name: cls._port_to_tuple(p) for p in getattr(node_cls, 'outputs', [])}, 'options': cls._parse_arguments(node_cls)}
def to_dict(self) -> dict:
return {n.__name__: self._node_to_dict(n) for n in self.nodes}
|
db_config = {
'user': '##username##',
'passwd': '##password##',
'host': '##host##',
'db': 'employees',
}
|
db_config = {'user': '##username##', 'passwd': '##password##', 'host': '##host##', 'db': 'employees'}
|
class Solution(object):
def reachNumber(self, target):
"""
:type target: int
:rtype: int
"""
target = abs(target)
n = int((target * 2) ** 0.5)
steps = n * (n+1) // 2
while steps < target or (steps - target) % 2:
n += 1
steps += n
return n
|
class Solution(object):
def reach_number(self, target):
"""
:type target: int
:rtype: int
"""
target = abs(target)
n = int((target * 2) ** 0.5)
steps = n * (n + 1) // 2
while steps < target or (steps - target) % 2:
n += 1
steps += n
return n
|
a="J`e^\x1cf_l]_WiUa\x12UQ]\x0esdj^hp\x1a\\mZ_\x15hT`XQ]\x0eumrrg\x1bg^fZ[\\U[\x12aU]gea_o]i\x1a<gm_Y!$+\x11iPOh-\x1e?pr\x1am``i\x15]f\x12j_d` ej^c\x1b4\x19;K<GoV&c#Ng0tp\\o.f_W+dYS'^h$ha_bs`-Zn-f^*cq!\x12=_eS sml\x1cn_^\x18`j\x15Vhf\x11]PYe\x1fwlqm\x1afaeZ\x15[_ah\x10d^"
b=""
for i in range(len(a)):
print(a[i])
b+=chr(ord(a[i])+(i%19))
print(b)
|
a = "J`e^\x1cf_l]_WiUa\x12UQ]\x0esdj^hp\x1a\\mZ_\x15hT`XQ]\x0eumrrg\x1bg^fZ[\\U[\x12aU]gea_o]i\x1a<gm_Y!$+\x11iPOh-\x1e?pr\x1am``i\x15]f\x12j_d` ej^c\x1b4\x19;K<GoV&c#Ng0tp\\o.f_W+dYS'^h$ha_bs`-Zn-f^*cq!\x12=_eS sml\x1cn_^\x18`j\x15Vhf\x11]PYe\x1fwlqm\x1afaeZ\x15[_ah\x10d^"
b = ''
for i in range(len(a)):
print(a[i])
b += chr(ord(a[i]) + i % 19)
print(b)
|
factor = int(input())
count = int(input())
list = []
counter = factor
for _ in range(count):
list.append(counter)
counter += factor
print(list)
|
factor = int(input())
count = int(input())
list = []
counter = factor
for _ in range(count):
list.append(counter)
counter += factor
print(list)
|
# Definition for a binary tree node.
class _TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def isSameTree(self, p: _TreeNode, q: _TreeNode) -> bool:
# If both are none, the nodes are the same.
if p is None and q is None:
return True
# If either is none, one is not.
if p is None or q is None:
return False
# No need to recurse if they're the same object.
if id(p) == id(q):
return True
# Check the values and recurse, DFS.
return (
p.val == q.val
and self.isSameTree(p.left, q.left)
and self.isSameTree(p.right, q.right)
)
|
class _Treenode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def is_same_tree(self, p: _TreeNode, q: _TreeNode) -> bool:
if p is None and q is None:
return True
if p is None or q is None:
return False
if id(p) == id(q):
return True
return p.val == q.val and self.isSameTree(p.left, q.left) and self.isSameTree(p.right, q.right)
|
"""Top-level package for DRF Compose."""
__author__ = """Sotunde Abiodun"""
__email__ = "sotundeabiodun00@gmail.com"
__version__ = "0.1.1"
|
"""Top-level package for DRF Compose."""
__author__ = 'Sotunde Abiodun'
__email__ = 'sotundeabiodun00@gmail.com'
__version__ = '0.1.1'
|
# Based on https://github.com/zricethezav/gitleaks/blob/6f5ad9dc0b385c872f652324188ce91da7157c7c/test_data/test_repos/test_dir_1/server.test2.py
# Do not hard code credentials
client = boto3.client(
's3',
# Hard coded strings as credentials, not recommended.
aws_access_key_id='AKIAIO5FODNN7EXAMPLE',
aws_secret_access_key='ABCDEF+c2L7yXeGvUyrPgYsDnWRRC1AYEXAMPLE'
)
# gh_pat = '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'
|
client = boto3.client('s3', aws_access_key_id='AKIAIO5FODNN7EXAMPLE', aws_secret_access_key='ABCDEF+c2L7yXeGvUyrPgYsDnWRRC1AYEXAMPLE')
|
"""
Bisect Squares.
Given two squares on a two-dimensional plane, find
a line that would cut these two squares in half. Assume
that the top and the bottom sides of the square run
parallel to the x-axis.
"""
class BisectSquares():
class Square():
class Line():
def __init__(self, p1, p2):
self.p1 = p1
self.p2 = p2
def __repr__(self):
return (self.p1, self.p1)
def __str__(self):
return f'[{self.p1}, {self.p2}]'
class Point():
def __init__(self, x, y):
self.x = x
self.y = y
def __str__(self):
return f'({self.x}, {self.y})'
def __init__(self, left, top, bottom, right):
self.top = top
self.left = left
self.bottom = bottom
self.right = right
def size(self) -> Point:
return (self.right - self.left) * (self.top - self.bottom)
def mid(self) -> Point:
return self.Point((self.left + self.right) / 2.0,
(self.top + self.bottom) / 2.0)
def extend(self, mid1, mid2, size):
xdir = -1 if mid1.x < mid2.x else 1
ydir = -1 if mid1.y < mid2.y else 1
if mid1.x == mid2.x:
return self.Point(mid1.x, mid1.y + ydir * size / 2.0)
slope = (mid1.y - mid2.y) / (mid1.x - mid2.x)
y1, y2 = 0, 0
if abs(slope) == 1:
x1 = mid1.x + xdir * size / 2.0
y1 = mid1.y + ydir * size / 2.0
elif abs(slope) < 1:
x1 = mid1.x + xdir * size / 2.0
y1 = slope * (x1 - mid1.x) + mid1.y
else:
y1 = mid1.y + ydir * size / 2.0
x1 = (y1 - mid1.y) / slope + mid1.x
return self.Point(x1, y1)
def cut(self, other) -> Line:
p1 = self.extend(self.mid(), other.mid(), self.size())
p2 = self.extend(self.mid(), other.mid(), -1 * self.size())
p3 = self.extend(other.mid(), self.mid(), other.size())
p4 = self.extend(other.mid(), self.mid(), -1 * other.size())
start, end = p1, p1
for point in [p2, p3, p4]:
if point.x < start.x or \
point.x == start.x and point.y < start.y:
start = point
elif point.x > end.x or \
point.x == end.x and point.y > end.y:
end = point
return self.Line(start, end)
|
"""
Bisect Squares.
Given two squares on a two-dimensional plane, find
a line that would cut these two squares in half. Assume
that the top and the bottom sides of the square run
parallel to the x-axis.
"""
class Bisectsquares:
class Square:
class Line:
def __init__(self, p1, p2):
self.p1 = p1
self.p2 = p2
def __repr__(self):
return (self.p1, self.p1)
def __str__(self):
return f'[{self.p1}, {self.p2}]'
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
def __str__(self):
return f'({self.x}, {self.y})'
def __init__(self, left, top, bottom, right):
self.top = top
self.left = left
self.bottom = bottom
self.right = right
def size(self) -> Point:
return (self.right - self.left) * (self.top - self.bottom)
def mid(self) -> Point:
return self.Point((self.left + self.right) / 2.0, (self.top + self.bottom) / 2.0)
def extend(self, mid1, mid2, size):
xdir = -1 if mid1.x < mid2.x else 1
ydir = -1 if mid1.y < mid2.y else 1
if mid1.x == mid2.x:
return self.Point(mid1.x, mid1.y + ydir * size / 2.0)
slope = (mid1.y - mid2.y) / (mid1.x - mid2.x)
(y1, y2) = (0, 0)
if abs(slope) == 1:
x1 = mid1.x + xdir * size / 2.0
y1 = mid1.y + ydir * size / 2.0
elif abs(slope) < 1:
x1 = mid1.x + xdir * size / 2.0
y1 = slope * (x1 - mid1.x) + mid1.y
else:
y1 = mid1.y + ydir * size / 2.0
x1 = (y1 - mid1.y) / slope + mid1.x
return self.Point(x1, y1)
def cut(self, other) -> Line:
p1 = self.extend(self.mid(), other.mid(), self.size())
p2 = self.extend(self.mid(), other.mid(), -1 * self.size())
p3 = self.extend(other.mid(), self.mid(), other.size())
p4 = self.extend(other.mid(), self.mid(), -1 * other.size())
(start, end) = (p1, p1)
for point in [p2, p3, p4]:
if point.x < start.x or (point.x == start.x and point.y < start.y):
start = point
elif point.x > end.x or (point.x == end.x and point.y > end.y):
end = point
return self.Line(start, end)
|
def find_range_values(curr_range):
return list(map(int, curr_range.split(",")))
def find_set(curr_range):
start_value, end_value = find_range_values(curr_range)
curr_set = set(range(start_value, end_value + 1))
return curr_set
def find_longest_intersection(n):
longest_intersection = set()
for _ in range(n):
first_range, second_range = input().split("-")
first_set = find_set(first_range)
second_set = find_set(second_range)
curr_intersection = first_set.intersection(second_set)
if len(curr_intersection) > len(longest_intersection):
longest_intersection = curr_intersection
return list(longest_intersection)
def print_result(longest_intersection):
print(f"Longest intersection is {longest_intersection} "
f"with length {len(longest_intersection)}")
print_result(find_longest_intersection(int(input())))
|
def find_range_values(curr_range):
return list(map(int, curr_range.split(',')))
def find_set(curr_range):
(start_value, end_value) = find_range_values(curr_range)
curr_set = set(range(start_value, end_value + 1))
return curr_set
def find_longest_intersection(n):
longest_intersection = set()
for _ in range(n):
(first_range, second_range) = input().split('-')
first_set = find_set(first_range)
second_set = find_set(second_range)
curr_intersection = first_set.intersection(second_set)
if len(curr_intersection) > len(longest_intersection):
longest_intersection = curr_intersection
return list(longest_intersection)
def print_result(longest_intersection):
print(f'Longest intersection is {longest_intersection} with length {len(longest_intersection)}')
print_result(find_longest_intersection(int(input())))
|
# Complete solution
# https://leetcode.com/problems/remove-sub-folders-from-the-filesystem/discuss/409028/JavaPython-3-3-methods-from-O(n-*-(logn-%2B-m-2))-to-O(n-*-m)-w-brief-explanation-and-analysis.
# use startswith
class Solution:
def removeSubfolders(self, folder: List[str]) -> List[str]:
"""
Sort folders, so that parent will always occur in front of child
For each folder, check if it starts with parent folder
If it does, it's a subfolder, skip it. If not, make it next parent folder.
"""
folders = folder
folders.sort()
output = []
parent = ' '
for folder in folders:
if not folder.startswith(parent):
output.append(folder)
parent = folder + '/'
return output
# Time: O(NlogN)
# Space:O(1), not count the output, if count, then O(N)
# trie
class Solution:
def removeSubfolders(self, folder: List[str]) -> List[str]:
Node = lambda: defaultdict(Node)
trie = Node()
ans = []
for path in sorted(folder):
n = trie
for c in path[1:].split('/'):
n = n[c]
if '$' in n:
break
else:
n['$'] = True
ans.append(path)
return ans
# Time: O(NM)
# Space:O(1)
|
class Solution:
def remove_subfolders(self, folder: List[str]) -> List[str]:
"""
Sort folders, so that parent will always occur in front of child
For each folder, check if it starts with parent folder
If it does, it's a subfolder, skip it. If not, make it next parent folder.
"""
folders = folder
folders.sort()
output = []
parent = ' '
for folder in folders:
if not folder.startswith(parent):
output.append(folder)
parent = folder + '/'
return output
class Solution:
def remove_subfolders(self, folder: List[str]) -> List[str]:
node = lambda : defaultdict(Node)
trie = node()
ans = []
for path in sorted(folder):
n = trie
for c in path[1:].split('/'):
n = n[c]
if '$' in n:
break
else:
n['$'] = True
ans.append(path)
return ans
|
CKAN_ROOT = "https://data.wprdc.org/"
API_PATH = "api/3/action/"
SQL_SEARCH_ENDPOINT = "datastore_search_sql"
API_URL = CKAN_ROOT + API_PATH + SQL_SEARCH_ENDPOINT
|
ckan_root = 'https://data.wprdc.org/'
api_path = 'api/3/action/'
sql_search_endpoint = 'datastore_search_sql'
api_url = CKAN_ROOT + API_PATH + SQL_SEARCH_ENDPOINT
|
"""Role testing files using testinfra"""
def test_daemon_config(host):
"""Check docker daemon config"""
f = host.file("/etc/docker/daemon.json")
assert f.is_file
assert f.user == "root"
assert f.group == "root"
config = (
"{\n"
" \"live-restore\": true,\n"
" \"log-driver\": \"local\",\n"
" \"log-opts\": {\n"
" \"max-size\": \"100m\"\n"
" }\n"
"}"
)
assert config in f.content_string
def test_cron_job(host):
"""Check cron job"""
cmd = "docker system prune --all --volumes --force"
f = host.file("/var/spool/cron/crontabs/root").content_string
assert cmd in f
def test_docker_service(host):
"""Check docker service"""
s = host.service("docker")
assert s.is_running
assert s.is_enabled
|
"""Role testing files using testinfra"""
def test_daemon_config(host):
"""Check docker daemon config"""
f = host.file('/etc/docker/daemon.json')
assert f.is_file
assert f.user == 'root'
assert f.group == 'root'
config = '{\n "live-restore": true,\n "log-driver": "local",\n "log-opts": {\n "max-size": "100m"\n }\n}'
assert config in f.content_string
def test_cron_job(host):
"""Check cron job"""
cmd = 'docker system prune --all --volumes --force'
f = host.file('/var/spool/cron/crontabs/root').content_string
assert cmd in f
def test_docker_service(host):
"""Check docker service"""
s = host.service('docker')
assert s.is_running
assert s.is_enabled
|
# variables 3
a = "abc"
print("a:", a, type(a))
a = 3
print("a:", a, type(a))
|
a = 'abc'
print('a:', a, type(a))
a = 3
print('a:', a, type(a))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Copyright 2020, Yutong Xie, UIUC.
Using Greedy Algorithm to solve balloon burst problem.
'''
class Solution(object):
def findMinArrowShots(self, points):
"""
:type points: List[List[int]]
:rtype: int
"""
if not points:
return 0
points = sorted(points, key=lambda i:i[1])
count = 1
end = points[0][1]
for i in points:
start = i[0]
if start > end:
count += 1
end = i[1]
return count
|
"""
Copyright 2020, Yutong Xie, UIUC.
Using Greedy Algorithm to solve balloon burst problem.
"""
class Solution(object):
def find_min_arrow_shots(self, points):
"""
:type points: List[List[int]]
:rtype: int
"""
if not points:
return 0
points = sorted(points, key=lambda i: i[1])
count = 1
end = points[0][1]
for i in points:
start = i[0]
if start > end:
count += 1
end = i[1]
return count
|
def put_languages(self, root):
if hasattr(self, "languages") and self.languages:
lang_string = ",".join(["/".join(x) for x in self.languages])
root.attrib["languages"] = lang_string
def put_address(self, root):
if self.address:
if isinstance(self.address, str):
root.attrib["address"] = self.address
else:
root.attrib["address"] = str("|".join(self.address))
|
def put_languages(self, root):
if hasattr(self, 'languages') and self.languages:
lang_string = ','.join(['/'.join(x) for x in self.languages])
root.attrib['languages'] = lang_string
def put_address(self, root):
if self.address:
if isinstance(self.address, str):
root.attrib['address'] = self.address
else:
root.attrib['address'] = str('|'.join(self.address))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""RackTablesDB - a python library to access the racktables database.
"""
__author__ = "John van Zantvoort"
__email__ = "john.van.zantvoort@snow.nl"
__license__ = "The MIT License (MIT)"
__version__ = "1.0.1"
|
"""RackTablesDB - a python library to access the racktables database.
"""
__author__ = 'John van Zantvoort'
__email__ = 'john.van.zantvoort@snow.nl'
__license__ = 'The MIT License (MIT)'
__version__ = '1.0.1'
|
# n = nums.length
# time = 0(n)
# space = O(1)
class Solution:
def maxSubArray(self, nums: List[int]) -> int:
ret = max(nums)
sub_sum = 0
for num in nums:
sub_sum = max(0, sub_sum) + num
ret = max(ret, sub_sum)
return ret
|
class Solution:
def max_sub_array(self, nums: List[int]) -> int:
ret = max(nums)
sub_sum = 0
for num in nums:
sub_sum = max(0, sub_sum) + num
ret = max(ret, sub_sum)
return ret
|
class Heroes3(object):
def __init__(self):
super(Heroes3, self).__init__()
self._army_size = {
"Few" : (1, 4),
"Several" : (5, 9),
"Pack" : (10, 19),
"Lots" : (20, 49),
"Horde" : (50, 100),
"Throng" : (100, 249),
"Swarm" : (250, 499),
"Zounds" : (500, 999),
"Legion" : (1000, float("inf"))
}
def get_all(self):
return self._army_size
def get_army_description(self, size):
for key in self._army_size:
minimum, maximum = self._army_size[key]
if size >= minimum and size <= maximum:
return key
else:
return "Unknown"
def main():
print("Hello, Heroes of Might and Magic 3!")
if __name__ == "__main__":
main()
|
class Heroes3(object):
def __init__(self):
super(Heroes3, self).__init__()
self._army_size = {'Few': (1, 4), 'Several': (5, 9), 'Pack': (10, 19), 'Lots': (20, 49), 'Horde': (50, 100), 'Throng': (100, 249), 'Swarm': (250, 499), 'Zounds': (500, 999), 'Legion': (1000, float('inf'))}
def get_all(self):
return self._army_size
def get_army_description(self, size):
for key in self._army_size:
(minimum, maximum) = self._army_size[key]
if size >= minimum and size <= maximum:
return key
else:
return 'Unknown'
def main():
print('Hello, Heroes of Might and Magic 3!')
if __name__ == '__main__':
main()
|
"""
572
subtree of another tree
easy
Given the roots of two binary trees root and subRoot, return true if
there is a subtree of root with the same structure and node values of
subRoot and false otherwise.
A subtree of a binary tree tree is a tree that consists of a node in
tree and all of this node's descendants. The tree tree could also be
considered as a subtree of itself.
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def isSubtree(self, s: TreeNode, t: TreeNode) -> bool:
def compare(n1, n2):
if n1 is None and n2 is None:
return True
elif n1 is None or n2 is None:
return False
else:
if n1.val != n2.val:
return False
return compare(n1.left, n2.left) and compare(n1.right, n2.right)
stack = [s]
while stack:
current = stack.pop()
if compare(current, t):
return True
if current.left is not None:
stack.append(current.left)
if current.right is not None:
stack.append(current.right)
return False
|
"""
572
subtree of another tree
easy
Given the roots of two binary trees root and subRoot, return true if
there is a subtree of root with the same structure and node values of
subRoot and false otherwise.
A subtree of a binary tree tree is a tree that consists of a node in
tree and all of this node's descendants. The tree tree could also be
considered as a subtree of itself.
"""
class Treenode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def is_subtree(self, s: TreeNode, t: TreeNode) -> bool:
def compare(n1, n2):
if n1 is None and n2 is None:
return True
elif n1 is None or n2 is None:
return False
else:
if n1.val != n2.val:
return False
return compare(n1.left, n2.left) and compare(n1.right, n2.right)
stack = [s]
while stack:
current = stack.pop()
if compare(current, t):
return True
if current.left is not None:
stack.append(current.left)
if current.right is not None:
stack.append(current.right)
return False
|
count = int(input())
for i in range(count):
k = int(input())
n = int(input())
people = [j for j in range(1,n+1)]
for x in range (k):
for v in range(n-1):
people[v+1] += people[v]
print(people[-1])
|
count = int(input())
for i in range(count):
k = int(input())
n = int(input())
people = [j for j in range(1, n + 1)]
for x in range(k):
for v in range(n - 1):
people[v + 1] += people[v]
print(people[-1])
|
class Solution:
def canPlaceFlowers(self, flowerbed, n):
"""
:type flowerbed: List[int]
:type n: int
:rtype: bool
"""
num=n
if len(flowerbed)<=1:
if (num==1 and flowerbed==[0]) or (num==0):
return True
else:
return False
if flowerbed[0]==0 and flowerbed[1]==0:
flowerbed[0]=1
num-=1
if flowerbed[-1]==0 and flowerbed[-2]==0:
flowerbed[-1]=1
num-=1
for i in range(1,len(flowerbed)-2):
if flowerbed[i]!=1 and flowerbed[i+1]!=1 and flowerbed[i-1]!=1:
flowerbed[i]=1
num-=1
if num<=0:
return True
return False
|
class Solution:
def can_place_flowers(self, flowerbed, n):
"""
:type flowerbed: List[int]
:type n: int
:rtype: bool
"""
num = n
if len(flowerbed) <= 1:
if num == 1 and flowerbed == [0] or num == 0:
return True
else:
return False
if flowerbed[0] == 0 and flowerbed[1] == 0:
flowerbed[0] = 1
num -= 1
if flowerbed[-1] == 0 and flowerbed[-2] == 0:
flowerbed[-1] = 1
num -= 1
for i in range(1, len(flowerbed) - 2):
if flowerbed[i] != 1 and flowerbed[i + 1] != 1 and (flowerbed[i - 1] != 1):
flowerbed[i] = 1
num -= 1
if num <= 0:
return True
return False
|
class Solution:
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) <= 2:
return len(nums)
i, j = 2, 2
while i < len(nums):
if not (nums[j-1] == nums[j-2] == nums[i]):
nums[j] = nums[i]
j += 1
i += 1
return j
if __name__ == "__main__":
print(Solution().removeDuplicates([0, 0, 1, 1, 1, 1, 2, 3, 3]))
|
class Solution:
def remove_duplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) <= 2:
return len(nums)
(i, j) = (2, 2)
while i < len(nums):
if not nums[j - 1] == nums[j - 2] == nums[i]:
nums[j] = nums[i]
j += 1
i += 1
return j
if __name__ == '__main__':
print(solution().removeDuplicates([0, 0, 1, 1, 1, 1, 2, 3, 3]))
|
"""Project exceptions"""
class ProjectImportError (Exception):
"""Failure to import a project from a repository."""
pass
|
"""Project exceptions"""
class Projectimporterror(Exception):
"""Failure to import a project from a repository."""
pass
|
""" Remote repositories, used by this project itself """
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
def repositories():
_all_content = """filegroup(name = "all", srcs = glob(["**"]), visibility = ["//visibility:public"])"""
http_archive(
name = "bazel_skylib",
sha256 = "97e70364e9249702246c0e9444bccdc4b847bed1eb03c5a3ece4f83dfe6abc44",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/releases/download/1.0.2/bazel-skylib-1.0.2.tar.gz",
"https://github.com/bazelbuild/bazel-skylib/releases/download/1.0.2/bazel-skylib-1.0.2.tar.gz",
],
)
http_archive(
name = "ninja_build",
build_file_content = _all_content,
sha256 = "3810318b08489435f8efc19c05525e80a993af5a55baa0dfeae0465a9d45f99f",
strip_prefix = "ninja-1.10.0",
urls = [
"https://github.com/ninja-build/ninja/archive/v1.10.0.tar.gz",
],
)
http_archive(
name = "cmake",
build_file_content = _all_content,
sha256 = "fc77324c4f820a09052a7785549b8035ff8d3461ded5bbd80d252ae7d1cd3aa5",
strip_prefix = "cmake-3.17.2",
urls = [
"https://github.com/Kitware/CMake/releases/download/v3.17.2/cmake-3.17.2.tar.gz",
],
)
|
""" Remote repositories, used by this project itself """
load('@bazel_tools//tools/build_defs/repo:http.bzl', 'http_archive')
def repositories():
_all_content = 'filegroup(name = "all", srcs = glob(["**"]), visibility = ["//visibility:public"])'
http_archive(name='bazel_skylib', sha256='97e70364e9249702246c0e9444bccdc4b847bed1eb03c5a3ece4f83dfe6abc44', urls=['https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/releases/download/1.0.2/bazel-skylib-1.0.2.tar.gz', 'https://github.com/bazelbuild/bazel-skylib/releases/download/1.0.2/bazel-skylib-1.0.2.tar.gz'])
http_archive(name='ninja_build', build_file_content=_all_content, sha256='3810318b08489435f8efc19c05525e80a993af5a55baa0dfeae0465a9d45f99f', strip_prefix='ninja-1.10.0', urls=['https://github.com/ninja-build/ninja/archive/v1.10.0.tar.gz'])
http_archive(name='cmake', build_file_content=_all_content, sha256='fc77324c4f820a09052a7785549b8035ff8d3461ded5bbd80d252ae7d1cd3aa5', strip_prefix='cmake-3.17.2', urls=['https://github.com/Kitware/CMake/releases/download/v3.17.2/cmake-3.17.2.tar.gz'])
|
"""Patch Server for Jamf Pro"""
__title__ = "Patch Server"
__version__ = "2020.10.02"
__author__ = "Bryson Tyrrell"
|
"""Patch Server for Jamf Pro"""
__title__ = 'Patch Server'
__version__ = '2020.10.02'
__author__ = 'Bryson Tyrrell'
|
def goTo(logic, x, y):
hero.moveXY(x, y)
hero.say(logic)
hero.moveXY(26, 16);
a = hero.findNearestFriend().getSecretA()
b = hero.findNearestFriend().getSecretB()
c = hero.findNearestFriend().getSecretC()
goTo(a and b or c, 25, 26)
goTo((a or b) and c, 26, 32)
goTo((a or c) and (b or c), 35, 32)
goTo((a and b) or (not c and b), 40, 22)
|
def go_to(logic, x, y):
hero.moveXY(x, y)
hero.say(logic)
hero.moveXY(26, 16)
a = hero.findNearestFriend().getSecretA()
b = hero.findNearestFriend().getSecretB()
c = hero.findNearestFriend().getSecretC()
go_to(a and b or c, 25, 26)
go_to((a or b) and c, 26, 32)
go_to((a or c) and (b or c), 35, 32)
go_to(a and b or (not c and b), 40, 22)
|
def afl(x):
"""
If no 'l' key is included, add a list of None's the same length as key 'a'.
"""
if 'l' in x:
return x
else:
x.update({'l': ['']*len(x['a'])})
return x
class V1:
def __init__(self,version='std',**kwargs):
self.version=version
@property
def doms(self):
return {'uc': 'qD', 'currapp': 'qD', 'potapp': 'qD'}
@property
def conds(self):
return {'uc': 'KT_subset', 'currapp': 'u_subset', 'potapp': 'c_subset'}
@property
def vartext(self):
if self.version is 'std':
return {'PwT': afl({'a': [None]}),
'qD' : afl({'a': [None, 'a_aa', 'a_aaa']}),
'theta_c': afl({'a': [None]}),
'theta_p': afl({'a': [None]}),
'cbar': afl({'a': [None]}),
'n': afl({'a': ['a_aa','a_aaa']}),
'k2t': afl({'a': [None]}),
'u2c': afl({'a': [None]}),
'c2e': afl({'a': [None, ['a_aa','aa_aaa']]})}
elif self.version is 'Q2P':
return {'PwT': afl({'a': ['a_aaa']}),
'qD' : afl({'a': [None, 'a_aa', 'a_aaa']}),
'theta_c': afl({'a': [None]}),
'theta_p': afl({'a': [None]}),
'cbar': afl({'a': [None]}),
'n': afl({'a': ['a_aa','a_aaa']}),
'k2t': afl({'a': [None]}),
'u2c': afl({'a': [None]}),
'c2e': afl({'a': [None, ['a_aa','aa_aaa']]}),
'q2p': afl({'a': ['aa_aaa']})
}
|
def afl(x):
"""
If no 'l' key is included, add a list of None's the same length as key 'a'.
"""
if 'l' in x:
return x
else:
x.update({'l': [''] * len(x['a'])})
return x
class V1:
def __init__(self, version='std', **kwargs):
self.version = version
@property
def doms(self):
return {'uc': 'qD', 'currapp': 'qD', 'potapp': 'qD'}
@property
def conds(self):
return {'uc': 'KT_subset', 'currapp': 'u_subset', 'potapp': 'c_subset'}
@property
def vartext(self):
if self.version is 'std':
return {'PwT': afl({'a': [None]}), 'qD': afl({'a': [None, 'a_aa', 'a_aaa']}), 'theta_c': afl({'a': [None]}), 'theta_p': afl({'a': [None]}), 'cbar': afl({'a': [None]}), 'n': afl({'a': ['a_aa', 'a_aaa']}), 'k2t': afl({'a': [None]}), 'u2c': afl({'a': [None]}), 'c2e': afl({'a': [None, ['a_aa', 'aa_aaa']]})}
elif self.version is 'Q2P':
return {'PwT': afl({'a': ['a_aaa']}), 'qD': afl({'a': [None, 'a_aa', 'a_aaa']}), 'theta_c': afl({'a': [None]}), 'theta_p': afl({'a': [None]}), 'cbar': afl({'a': [None]}), 'n': afl({'a': ['a_aa', 'a_aaa']}), 'k2t': afl({'a': [None]}), 'u2c': afl({'a': [None]}), 'c2e': afl({'a': [None, ['a_aa', 'aa_aaa']]}), 'q2p': afl({'a': ['aa_aaa']})}
|
# http://code.activestate.com/recipes/119466-dijkstras-algorithm-for-shortest-paths/
"""
G = {'s':{'u':10, 'x':5},
'u':{'v':1, 'x':2},
'v':{'y':4},
'x':{'u':3, 'v':9, 'y':2},
'y':{'s':7, 'v':6}}
"""
def graph_to_dot(G):
s = """digraph G {\nnode [width=.3,height=.3,shape=octagon,style=filled,color=skyblue];\noverlap="false";\nrankdir="LR";\n%s}"""
r = ''
for i in G:
for j in G[i]:
r+='%s -> %s [label="%s"];\n' % (i, j, str(G[i][j]))
return s % (r)
# http://graphviz-dev.appspot.com/
# http://ashitani.jp/gv/#
|
"""
G = {'s':{'u':10, 'x':5},
'u':{'v':1, 'x':2},
'v':{'y':4},
'x':{'u':3, 'v':9, 'y':2},
'y':{'s':7, 'v':6}}
"""
def graph_to_dot(G):
s = 'digraph G {\nnode [width=.3,height=.3,shape=octagon,style=filled,color=skyblue];\noverlap="false";\nrankdir="LR";\n%s}'
r = ''
for i in G:
for j in G[i]:
r += '%s -> %s [label="%s"];\n' % (i, j, str(G[i][j]))
return s % r
|
def print_array(array):
for i in array:
print(i, end=" ")
print("")
def bubble_sort(array):
for i in range(len(array)):
swapped = False
for j in range(0, len(array)-i-1):
if array[j] >= array[j+1]:
tmp = array[j+1]
array[j+1] = array[j]
array[j] = tmp
swapped = True
if not swapped:
break
foo_array = [9, 4, 3, 5, 1]
print_array(foo_array)
bubble_sort(foo_array)
print_array(foo_array)
|
def print_array(array):
for i in array:
print(i, end=' ')
print('')
def bubble_sort(array):
for i in range(len(array)):
swapped = False
for j in range(0, len(array) - i - 1):
if array[j] >= array[j + 1]:
tmp = array[j + 1]
array[j + 1] = array[j]
array[j] = tmp
swapped = True
if not swapped:
break
foo_array = [9, 4, 3, 5, 1]
print_array(foo_array)
bubble_sort(foo_array)
print_array(foo_array)
|
class Solution:
def findAnagrams(self, s: str, p: str) -> List[int]:
size_s = len(s)
size_p = len(p)
counter1 = collections.defaultdict(int)
counter2 = collections.defaultdict(int)
ans = []
for c in p:
counter2[c] += 1
for c in s[:size_p-1]:
counter1[c] += 1
for i in range(size_p-1, size_s):
counter1[s[i]] += 1
if i - size_p >= 0:
counter1[s[i-size_p]] -= 1
if counter1[s[i-size_p]] == 0:
del counter1[s[i-size_p]]
if counter1 == counter2:
ans.append(i-size_p+1)
return ans
|
class Solution:
def find_anagrams(self, s: str, p: str) -> List[int]:
size_s = len(s)
size_p = len(p)
counter1 = collections.defaultdict(int)
counter2 = collections.defaultdict(int)
ans = []
for c in p:
counter2[c] += 1
for c in s[:size_p - 1]:
counter1[c] += 1
for i in range(size_p - 1, size_s):
counter1[s[i]] += 1
if i - size_p >= 0:
counter1[s[i - size_p]] -= 1
if counter1[s[i - size_p]] == 0:
del counter1[s[i - size_p]]
if counter1 == counter2:
ans.append(i - size_p + 1)
return ans
|
'''
We have an array A of integers, and an array queries of queries.
For the i-th query val = queries[i][0], index = queries[i][1], we add val to A[index]. Then, the answer to the i-th query is the sum of the even values of A.
(Here, the given index = queries[i][1] is a 0-based index, and each query permanently modifies the array A.)
Return the answer to all queries. Your answer array should have answer[i] as the answer to the i-th query.
Example 1:
Input: A = [1,2,3,4], queries = [[1,0],[-3,1],[-4,0],[2,3]]
Output: [8,6,2,4]
'''
class Solution(object):
def sumEvenAfterQueries(self, A, queries):
"""
:type A: List[int]
:type queries: List[List[int]]
:rtype: List[int]
"""
result = 0
for val in A:
if val%2 == 0:
result += val
f_result = []
for val_index in queries:
val, index = val_index[0], val_index[1]
prev_val = A[index]
if prev_val%2 == 0:
result -= prev_val
new_val = prev_val + val
if new_val %2 == 0:
result += new_val
A[index] = new_val
f_result.append(result)
return f_result
|
"""
We have an array A of integers, and an array queries of queries.
For the i-th query val = queries[i][0], index = queries[i][1], we add val to A[index]. Then, the answer to the i-th query is the sum of the even values of A.
(Here, the given index = queries[i][1] is a 0-based index, and each query permanently modifies the array A.)
Return the answer to all queries. Your answer array should have answer[i] as the answer to the i-th query.
Example 1:
Input: A = [1,2,3,4], queries = [[1,0],[-3,1],[-4,0],[2,3]]
Output: [8,6,2,4]
"""
class Solution(object):
def sum_even_after_queries(self, A, queries):
"""
:type A: List[int]
:type queries: List[List[int]]
:rtype: List[int]
"""
result = 0
for val in A:
if val % 2 == 0:
result += val
f_result = []
for val_index in queries:
(val, index) = (val_index[0], val_index[1])
prev_val = A[index]
if prev_val % 2 == 0:
result -= prev_val
new_val = prev_val + val
if new_val % 2 == 0:
result += new_val
A[index] = new_val
f_result.append(result)
return f_result
|
'''
Created on Oct 3, 2015
@author: bcy-3
'''
|
"""
Created on Oct 3, 2015
@author: bcy-3
"""
|
app_name = "pusta2"
prefix_url = "pusta2"
static_files = {
'js': {
'pusta2/js/': ['main.js', ]
},
'css': {
'pusta2/css/': ['main.css', ]
},
'html': {
'pusta2/html/': ['index.html', ]
}
}
permissions = {
"edit": "Editing actualy nothing.",
"sample1": "sample1longversion",
}
|
app_name = 'pusta2'
prefix_url = 'pusta2'
static_files = {'js': {'pusta2/js/': ['main.js']}, 'css': {'pusta2/css/': ['main.css']}, 'html': {'pusta2/html/': ['index.html']}}
permissions = {'edit': 'Editing actualy nothing.', 'sample1': 'sample1longversion'}
|
"""This problem was asked by Facebook.
We have some historical clickstream data gathered from our site anonymously using cookies.
The histories contain URLs that users have visited in chronological order.
Write a function that takes two users' browsing histories as input and returns the longest
contiguous sequence of URLs that appear in both.
For example, given the following two users' histories:
user1 = ['/home', '/register', '/login', '/user', '/one', '/two']
user2 = ['/home', '/red', '/login', '/user', '/one', '/pink']
You should return the following:
['/login', '/user', '/one']
"""
|
"""This problem was asked by Facebook.
We have some historical clickstream data gathered from our site anonymously using cookies.
The histories contain URLs that users have visited in chronological order.
Write a function that takes two users' browsing histories as input and returns the longest
contiguous sequence of URLs that appear in both.
For example, given the following two users' histories:
user1 = ['/home', '/register', '/login', '/user', '/one', '/two']
user2 = ['/home', '/red', '/login', '/user', '/one', '/pink']
You should return the following:
['/login', '/user', '/one']
"""
|
input = open('input.txt', 'r').read().split("\n")
preamble_length = 25
invalid = 0
for i in range(preamble_length, len(input)):
current = int(input[i])
found = False
for j in range(i - preamble_length, i):
for k in range (j + 1, i):
sum = int(input[j]) + int(input[k])
if sum == current:
found = True
if not found:
invalid = int(input[i])
print("No match found for " + input[i])
break
def find_sequence(search):
for i in range(0, len(input)) :
sum_window = []
sum = 0
for k in range(i, len(input)):
value = int(input[k])
sum += value
sum_window.append(value)
if sum == search:
return sum_window
if sum > search:
break
sequence = find_sequence(invalid)
min_val = min(sequence)
max_val = max(sequence)
print(sequence)
print("min = " + str(min_val))
print("max = " + str(max_val))
print("min + max = " + str(min_val + max_val))
|
input = open('input.txt', 'r').read().split('\n')
preamble_length = 25
invalid = 0
for i in range(preamble_length, len(input)):
current = int(input[i])
found = False
for j in range(i - preamble_length, i):
for k in range(j + 1, i):
sum = int(input[j]) + int(input[k])
if sum == current:
found = True
if not found:
invalid = int(input[i])
print('No match found for ' + input[i])
break
def find_sequence(search):
for i in range(0, len(input)):
sum_window = []
sum = 0
for k in range(i, len(input)):
value = int(input[k])
sum += value
sum_window.append(value)
if sum == search:
return sum_window
if sum > search:
break
sequence = find_sequence(invalid)
min_val = min(sequence)
max_val = max(sequence)
print(sequence)
print('min = ' + str(min_val))
print('max = ' + str(max_val))
print('min + max = ' + str(min_val + max_val))
|
x,y = map(float, input().split())
if (x == y == 0):
print("Origem")
elif (y == 0):
print("Eixo X")
elif (x == 0):
print("Eixo Y")
elif (x > 0) and (y > 0):
print("Q1")
elif (x < 0) and (y > 0):
print("Q2")
elif (x < 0) and (y < 0):
print("Q3")
elif (x > 0) and (y < 0):
print("Q4")
|
(x, y) = map(float, input().split())
if x == y == 0:
print('Origem')
elif y == 0:
print('Eixo X')
elif x == 0:
print('Eixo Y')
elif x > 0 and y > 0:
print('Q1')
elif x < 0 and y > 0:
print('Q2')
elif x < 0 and y < 0:
print('Q3')
elif x > 0 and y < 0:
print('Q4')
|
"""ROM methods."""
def read_all_rom(self) -> list:
"""
Return the values of all the locations of ROM.
Parameters
----------
self : Processor, mandatory
The instance of the processor containing the registers, accumulator etc
Returns
-------
ROM
The values of all the locations of ROM
"""
return self.ROM
def read_all_rom_ports(self) -> list:
"""
Return the values of all the ROM ports.
Parameters
----------
self : Processor, mandatory
The instance of the processor containing the registers, accumulator etc
Returns
-------
ROM_PORT
The values of all the ROM ports
"""
return self.ROM_PORT
|
"""ROM methods."""
def read_all_rom(self) -> list:
"""
Return the values of all the locations of ROM.
Parameters
----------
self : Processor, mandatory
The instance of the processor containing the registers, accumulator etc
Returns
-------
ROM
The values of all the locations of ROM
"""
return self.ROM
def read_all_rom_ports(self) -> list:
"""
Return the values of all the ROM ports.
Parameters
----------
self : Processor, mandatory
The instance of the processor containing the registers, accumulator etc
Returns
-------
ROM_PORT
The values of all the ROM ports
"""
return self.ROM_PORT
|
class Point(object):
def __init__(self, x, y):
self._x = x
self._y = y
def get_x(self):
return self._x
def set_x(self, x):
self._x = x
def get_y(self):
return self._y
def set_y(self, y):
self._y = y
def euclidean_distance(a, b):
ax = a.get_x()
ay = a.get_y()
bx = b.get_x()
by = b.get_y()
dist = ((ax - bx) ** 2 + (ay - by) ** 2) ** 0.5
return dist
|
class Point(object):
def __init__(self, x, y):
self._x = x
self._y = y
def get_x(self):
return self._x
def set_x(self, x):
self._x = x
def get_y(self):
return self._y
def set_y(self, y):
self._y = y
def euclidean_distance(a, b):
ax = a.get_x()
ay = a.get_y()
bx = b.get_x()
by = b.get_y()
dist = ((ax - bx) ** 2 + (ay - by) ** 2) ** 0.5
return dist
|
print("Height: ", end='')
while True:
height = input()
# check if int
try:
height = int(height)
except ValueError:
print("Retry: ", end='')
continue
# check if suitable value
if height >= 0 and height <= 23:
break
else:
print("Height: ", end='')
# draw pyramid
for i in range(height):
hashes = "#" * (i + 1)
line = hashes.rjust(height) + ' ' + hashes
print(line)
|
print('Height: ', end='')
while True:
height = input()
try:
height = int(height)
except ValueError:
print('Retry: ', end='')
continue
if height >= 0 and height <= 23:
break
else:
print('Height: ', end='')
for i in range(height):
hashes = '#' * (i + 1)
line = hashes.rjust(height) + ' ' + hashes
print(line)
|
# In Search for the Lost Memory [Explorer Pirate + Jett] (3527)
recoveredMemory = 7081
kyrin = 1090000
sm.setSpeakerID(kyrin)
sm.sendNext("A stable position, with a calm demanor-- but I can tell you're hiding your explosive attacking abilities-- "
"you've become quite an impressive pirate, #h #. It's been a while.")
sm.sendSay("You used to be a kid that was scared of water-- and look at you now. "
"I knew you'd grow to a formidable pirate, but like this? I am thrilled to see you all grown up like this.")
sm.sendSay("What I can tell you is-- keep going. "
"As the person responsible for making you a pirate, I have no doubt in my mind that you still have room to grow-- "
"and that you will become an even more powerful force.")
sm.startQuest(parentID)
sm.completeQuest(parentID)
sm.startQuest(recoveredMemory)
sm.setQRValue(recoveredMemory, "1", False)
|
recovered_memory = 7081
kyrin = 1090000
sm.setSpeakerID(kyrin)
sm.sendNext("A stable position, with a calm demanor-- but I can tell you're hiding your explosive attacking abilities-- you've become quite an impressive pirate, #h #. It's been a while.")
sm.sendSay("You used to be a kid that was scared of water-- and look at you now. I knew you'd grow to a formidable pirate, but like this? I am thrilled to see you all grown up like this.")
sm.sendSay('What I can tell you is-- keep going. As the person responsible for making you a pirate, I have no doubt in my mind that you still have room to grow-- and that you will become an even more powerful force.')
sm.startQuest(parentID)
sm.completeQuest(parentID)
sm.startQuest(recoveredMemory)
sm.setQRValue(recoveredMemory, '1', False)
|
class Powerup:
def __init__(self, coord):
self.coord = coord
def use(self, player):
raise NotImplementedError
def ascii(self):
return "P"
|
class Powerup:
def __init__(self, coord):
self.coord = coord
def use(self, player):
raise NotImplementedError
def ascii(self):
return 'P'
|
languages = {
"c": "c",
"cpp": "cpp",
"cc": "cpp",
"cs": "csharp",
"java": "java",
"py": "python",
"rb": "ruby"
}
|
languages = {'c': 'c', 'cpp': 'cpp', 'cc': 'cpp', 'cs': 'csharp', 'java': 'java', 'py': 'python', 'rb': 'ruby'}
|
# Create database engine for data.db
engine = create_engine('sqlite:///data.db')
# Write query to get date, tmax, and tmin from weather
query = """
SELECT date,
tmax,
tmin
FROM weather;
"""
# Make a data frame by passing query and engine to read_sql()
temperatures = pd.read_sql(query, engine)
# View the resulting data frame
print(temperatures)
'''
script.py> output:
date tmax tmin
0 12/01/2017 52 42
1 12/02/2017 48 39
2 12/03/2017 48 42
3 12/04/2017 51 40
...
119 03/30/2018 62 44
120 03/31/2018 58 39
[121 rows x 3 columns]
Selecting columns is useful when you only want a few columns from a table.
If you want most of the columns, it may be easier to load them all and then use pandas to drop unwanted columns.
'''
|
engine = create_engine('sqlite:///data.db')
query = '\nSELECT date, \n tmax, \n tmin\n FROM weather;\n'
temperatures = pd.read_sql(query, engine)
print(temperatures)
'\nscript.py> output:\n date tmax tmin\n 0 12/01/2017 52 42\n 1 12/02/2017 48 39\n 2 12/03/2017 48 42\n 3 12/04/2017 51 40\n ...\n 119 03/30/2018 62 44\n 120 03/31/2018 58 39\n \n [121 rows x 3 columns]\n\n\nSelecting columns is useful when you only want a few columns from a table. \nIf you want most of the columns, it may be easier to load them all and then use pandas to drop unwanted columns.\n\n'
|
#Arquivo que contem os parametros do jogo
quantidade_jogadores = 2 #quantidade de Jogadores
jogadores = [] #array que contem os jogadores(na ordem de jogo)
tamanho_tabuleiro = 40 #tamanho do array do tabuleiro (sempre multiplo de 4 para o tabuleiro ficar quadrado)
quantidade_dados = 2 #quantos dados serao usados
quantidade_reves = int(tamanho_tabuleiro/5) #quantos Sorte/Reves existirao no tabuleiro
dinheiro_inicial = 10000000 #dinheiro inicial de cada Jogador
jogadas_default = 1 #quantidade de Jogadas que cada jogador possui(pode alterarse dados forem iguais)
#cadeia e vai para cadeia devem ficar em cantos opostos do tabuleiro. Dividimos o tabuleiro em 4,
# e colocamos o vai para cadeia na primeira "esquina", e a cadeia na terceira esquina
pos_vai_para_cadeia = int(tamanho_tabuleiro/4) #Posicao da casa "vai para cadeia"
pos_Cadeia = int(pos_vai_para_cadeia * 3) #posicao da casa "cadeia"
contrucoes={
'1': 'Nada',
'2': 'Casa',
'3': 'Hotel'
}
possiveis_sorte = [
{"Ganhou na loteria!": "500"},
{"Foi promovido no emprego!": "1500"}
]
possiveis_reves = [
{"Perdeu o mindinho da mao esquerda": "500"},
{"Seu filho pegou Piolho": "50"},
{"Policia Apreendeu seus 15 hectares de maconha, por pouco nao foi preso!": "3500"}
]
|
quantidade_jogadores = 2
jogadores = []
tamanho_tabuleiro = 40
quantidade_dados = 2
quantidade_reves = int(tamanho_tabuleiro / 5)
dinheiro_inicial = 10000000
jogadas_default = 1
pos_vai_para_cadeia = int(tamanho_tabuleiro / 4)
pos__cadeia = int(pos_vai_para_cadeia * 3)
contrucoes = {'1': 'Nada', '2': 'Casa', '3': 'Hotel'}
possiveis_sorte = [{'Ganhou na loteria!': '500'}, {'Foi promovido no emprego!': '1500'}]
possiveis_reves = [{'Perdeu o mindinho da mao esquerda': '500'}, {'Seu filho pegou Piolho': '50'}, {'Policia Apreendeu seus 15 hectares de maconha, por pouco nao foi preso!': '3500'}]
|
s = 'azcbobobegghakl'
num = 0
for i in range(0, len(s) - 2):
if s[i] + s[i + 1] + s[i + 2] == 'bob':
num += 1
print('Number of times bob occurs is: ' + str(num))
|
s = 'azcbobobegghakl'
num = 0
for i in range(0, len(s) - 2):
if s[i] + s[i + 1] + s[i + 2] == 'bob':
num += 1
print('Number of times bob occurs is: ' + str(num))
|
# --- Day 14: Docking Data ---
# As your ferry approaches the sea port, the captain asks for your help again. The computer system that runs this port isn't compatible with the docking program on the ferry, so the docking parameters aren't being correctly initialized in the docking program's memory.
# After a brief inspection, you discover that the sea port's computer system uses a strange bitmask system in its initialization program. Although you don't have the correct decoder chip handy, you can emulate it in software!
# The initialization program (your puzzle input) can either update the bitmask or write a value to memory. Values and memory addresses are both 36-bit unsigned integers. For example, ignoring bitmasks for a moment, a line like mem[8] = 11 would write the value 11 to memory address 8.
# The bitmask is always given as a string of 36 bits, written with the most significant bit (representing 2^35) on the left and the least significant bit (2^0, that is, the 1s bit) on the right. The current bitmask is applied to values immediately before they are written to memory: a 0 or 1 overwrites the corresponding bit in the value, while an X leaves the bit in the value unchanged.
# For example, consider the following program:
# mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X
# mem[8] = 11
# mem[7] = 101
# mem[8] = 0
# This program starts by specifying a bitmask (mask = ....). The mask it specifies will overwrite two bits in every written value: the 2s bit is overwritten with 0, and the 64s bit is overwritten with 1.
# The program then attempts to write the value 11 to memory address 8. By expanding everything out to individual bits, the mask is applied as follows:
# value: 000000000000000000000000000000001011 (decimal 11)
# mask: XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X
# result: 000000000000000000000000000001001001 (decimal 73)
# So, because of the mask, the value 73 is written to memory address 8 instead. Then, the program tries to write 101 to address 7:
# value: 000000000000000000000000000001100101 (decimal 101)
# mask: XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X
# result: 000000000000000000000000000001100101 (decimal 101)
# This time, the mask has no effect, as the bits it overwrote were already the values the mask tried to set. Finally, the program tries to write 0 to address 8:
# value: 000000000000000000000000000000000000 (decimal 0)
# mask: XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X
# result: 000000000000000000000000000001000000 (decimal 64)
# 64 is written to address 8 instead, overwriting the value that was there previously.
# To initialize your ferry's docking program, you need the sum of all values left in memory after the initialization program completes. (The entire 36-bit address space begins initialized to the value 0 at every address.) In the above example, only two values in memory are not zero - 101 (at address 7) and 64 (at address 8) - producing a sum of 165.
# Execute the initialization program. What is the sum of all values left in memory after it completes?
def fileInput():
f = open(inputFile, 'r')
with open(inputFile) as f:
read_data = f.read().split('\n')
f.close()
return read_data
def splitData(data):
dataLine = []
maxSize = 0
global mem
for line in data:
newLine = line.split(' = ')
if newLine[0] != 'mask':
newLine[0] = int(newLine[0].lstrip("mem[").rstrip("]"))
maxSize = max(maxSize,newLine[0]+1)
newLine[1] = f'{int(newLine[1]):036b}'
dataLine.append(newLine)
mem = [0 for x in range(maxSize)]
return dataLine
def processData(data):
global mask
global mem
global maskCount
for line in data:
if line[0] == 'mask':
mask = line[1]
maskCount = mask.count('X')
else:
line[1] = updateBits(mask,line[1])
mem[line[0]] = int(line[1],2)
def updateBits(mask,bits):
mask = [bit for bit in mask]
bits = [bit for bit in bits]
for i in range(36):
if mask[i] != 'X':
bits[i] = mask[i]
return ''.join(bits)
#///////////////////////////////////////////////////
inputFile = 'day14-input.txt'
mask = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
maskCount = 0
mem = []
if __name__ == "__main__":
data = fileInput()
data = splitData(data)
processData(data)
print(sum(mem))
|
def file_input():
f = open(inputFile, 'r')
with open(inputFile) as f:
read_data = f.read().split('\n')
f.close()
return read_data
def split_data(data):
data_line = []
max_size = 0
global mem
for line in data:
new_line = line.split(' = ')
if newLine[0] != 'mask':
newLine[0] = int(newLine[0].lstrip('mem[').rstrip(']'))
max_size = max(maxSize, newLine[0] + 1)
newLine[1] = f'{int(newLine[1]):036b}'
dataLine.append(newLine)
mem = [0 for x in range(maxSize)]
return dataLine
def process_data(data):
global mask
global mem
global maskCount
for line in data:
if line[0] == 'mask':
mask = line[1]
mask_count = mask.count('X')
else:
line[1] = update_bits(mask, line[1])
mem[line[0]] = int(line[1], 2)
def update_bits(mask, bits):
mask = [bit for bit in mask]
bits = [bit for bit in bits]
for i in range(36):
if mask[i] != 'X':
bits[i] = mask[i]
return ''.join(bits)
input_file = 'day14-input.txt'
mask = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
mask_count = 0
mem = []
if __name__ == '__main__':
data = file_input()
data = split_data(data)
process_data(data)
print(sum(mem))
|
#!/usr/bin/env python3
class DNSMasq_DHCP_Generic_Switchable:
def __init__(self, name, value):
self.name = name
self.value = value
def __str__(self):
if self.value is None:
return self.name
elif self.value is not None:
return self.name + "=" + self.value
class DNSMasq_DHCP_Option:
def __init__(self, option, value):
scope = None
self.option = option
self.value = value
def __init__(self, scope, option, value):
self.scope = scope
self.option = option
self.value = value
def get_scope(self):
return self.scope
def get_option(self):
return self.option
def get_value(self):
return self.value
def get_comment(self):
if self.get_option() == "3":
return "# Default Gateway"
elif self.get_option() == "6":
return "# Default DNS"
elif self.get_option() == "42":
return "# Default NTP"
else:
return ""
def __add__(self, o):
return self.get_str() + o
def __str__(self):
return self.get_str()
def get_str(self):
res = []
if self.get_scope() is not None:
res.append(str(self.get_scope()))
res.append(str(self.get_option()))
res.append(str(self.get_value()))
return "dhcp-option=" + \
",".join(res) + \
" " + \
str(self.get_comment())
class DNSMasq_DHCP_Range:
def __init__(self, range_min, range_max, netmask, lease_time):
scope = None
self.range_min = range_min
self.range_max = range_max
self.netmask = netmask
self.lease_time = lease_time
def __init__(self, scope, range_min, range_max, netmask, lease_time):
self.scope = scope
self.range_min = range_min
self.range_max = range_max
self.netmask = netmask
self.lease_time = lease_time
def get_scope(self):
return self.scope
def get_range_min(self):
return self.range_min
def get_range_max(self):
return self.range_max
def get_netmask(self):
return self.netmask
def get_lease_time(self):
return self.lease_time
def __add__(self, o):
return self.get_str() + o
def __str__(self):
return self.get_str()
def get_str(self):
res = []
if self.get_scope() is not None:
res.append(str(self.get_scope()))
res.append(str(self.get_range_min()))
res.append(str(self.get_range_max()))
res.append(str(self.get_netmask()))
res.append(str(self.get_lease_time()))
return "dhcp-range=" + \
",".join(res)
class DNSMasq_DHCP_Host:
def __init__(self, mac_address, hostname, ip_address, lease_time):
scope = None
self.mac_address = mac_address
self.hostname = hostname
self.ip_address = ip_address
self.lease_time = lease_time
def __init__(self, scope, mac_address, hostname, ip_address, lease_time):
self.scope = scope
self.mac_address = mac_address
self.hostname = hostname
self.ip_address = ip_address
self.lease_time = lease_time
def get_scope(self):
return self.scope
def get_mac_address(self):
return self.mac_address
def get_hostname(self):
return self.hostname
def get_ip_address(self):
return self.ip_address
def get_lease_time(self):
return self.lease_time
def __add__(self, o):
return self.get_str() + o
def __str__(self):
return self.get_str()
def get_str(self):
res = []
if self.get_scope() is not None:
res.append(str(self.get_scope()))
res.append(str(self.get_mac_address()))
res.append(str(self.get_hostname()))
res.append(str(self.get_ip_address()))
res.append(str(self.get_lease_time()))
return "dhcp-host=" + \
",".join(res)
class DNSMasq_DHCP_Section:
def __init__(self):
self.site = None
self.role = None
self.vlan_id = None
self.vlan_name = None
self.vrf_name = None
self.prefix = None
self.dhcp_options = []
self.dhcp_ranges = []
self.dhcp_hosts = []
def set_site(self, site):
self.site = site
def set_role(self, role):
self.role = role
def set_vlan_id(self, vlan_id):
self.vlan_id = vlan_id
def set_vlan_name(self, vlan_name):
self.vlan_name = vlan_name
def set_vrf_name(self, vrf_name):
self.vrf_name = vrf_name
def set_prefix(self, prefix):
self.prefix = prefix
def append_dhcp_option(self, dhcp_option):
self.dhcp_options.append(dhcp_option)
def append_dhcp_range(self, dhcp_range):
self.dhcp_ranges.append(dhcp_range)
def append_dhcp_host(self, dhcp_host):
self.dhcp_hosts.append(dhcp_host)
def get_header(self):
# Example
### Site: Home
### Role: Untagged
### Vlan: 66 (Home VLAN) with ID: 66
### VRF: vrf_66_homelan
### Prefix: 192.168.1.0/24
res = []
if self.site is not None:
res.append("### Site: " + self.site)
if self.role is not None:
res.append("### Role: " + self.role)
if self.vlan_id is not None and self.vlan_name is not None:
res.append("### Vlan: " + self.vlan_name + " with ID: " + str(self.vlan_id))
elif self.vlan_id is not None:
res.append("### Vlan ID: " + str(self.vlan_id))
elif self.vlan_name is not None:
res.append("### Vlan: " + self.vlan_name)
if self.vrf_name is not None:
res.append("### VRF: " + self.vrf_name)
if self.prefix is not None:
res.append("### Prefix: " + self.prefix)
return "\n".join(res)
def get_options(self):
return self.dhcp_options
def get_ranges(self):
return self.dhcp_ranges
def get_hosts(self):
return self.dhcp_hosts
class DNSMasq_DHCP_Config:
def __init__(self):
self.dhcp_config_generic_switches = []
self.dhcp_config_sections = []
def append_to_dhcp_config_generic_switches(self, obj):
self.dhcp_config_generic_switches.append(obj)
def append_to_dhcp_config_sections(self, obj):
self.dhcp_config_sections.append(obj)
def print(self):
print(self)
def __str__(self):
res = []
for sw in self.dhcp_config_generic_switches:
res.append(str(sw))
for sec in self.dhcp_config_sections:
res.append(str(""))
res.append(str(""))
res.append(str(sec.get_header()))
res.append(str(""))
for opts in sec.get_options():
res.append(str(opts))
res.append(str(""))
for ran in sec.get_ranges():
res.append(str(ran))
res.append(str(""))
for host in sec.get_hosts():
res.append(str(host))
return "\n".join(res)
|
class Dnsmasq_Dhcp_Generic_Switchable:
def __init__(self, name, value):
self.name = name
self.value = value
def __str__(self):
if self.value is None:
return self.name
elif self.value is not None:
return self.name + '=' + self.value
class Dnsmasq_Dhcp_Option:
def __init__(self, option, value):
scope = None
self.option = option
self.value = value
def __init__(self, scope, option, value):
self.scope = scope
self.option = option
self.value = value
def get_scope(self):
return self.scope
def get_option(self):
return self.option
def get_value(self):
return self.value
def get_comment(self):
if self.get_option() == '3':
return '# Default Gateway'
elif self.get_option() == '6':
return '# Default DNS'
elif self.get_option() == '42':
return '# Default NTP'
else:
return ''
def __add__(self, o):
return self.get_str() + o
def __str__(self):
return self.get_str()
def get_str(self):
res = []
if self.get_scope() is not None:
res.append(str(self.get_scope()))
res.append(str(self.get_option()))
res.append(str(self.get_value()))
return 'dhcp-option=' + ','.join(res) + ' ' + str(self.get_comment())
class Dnsmasq_Dhcp_Range:
def __init__(self, range_min, range_max, netmask, lease_time):
scope = None
self.range_min = range_min
self.range_max = range_max
self.netmask = netmask
self.lease_time = lease_time
def __init__(self, scope, range_min, range_max, netmask, lease_time):
self.scope = scope
self.range_min = range_min
self.range_max = range_max
self.netmask = netmask
self.lease_time = lease_time
def get_scope(self):
return self.scope
def get_range_min(self):
return self.range_min
def get_range_max(self):
return self.range_max
def get_netmask(self):
return self.netmask
def get_lease_time(self):
return self.lease_time
def __add__(self, o):
return self.get_str() + o
def __str__(self):
return self.get_str()
def get_str(self):
res = []
if self.get_scope() is not None:
res.append(str(self.get_scope()))
res.append(str(self.get_range_min()))
res.append(str(self.get_range_max()))
res.append(str(self.get_netmask()))
res.append(str(self.get_lease_time()))
return 'dhcp-range=' + ','.join(res)
class Dnsmasq_Dhcp_Host:
def __init__(self, mac_address, hostname, ip_address, lease_time):
scope = None
self.mac_address = mac_address
self.hostname = hostname
self.ip_address = ip_address
self.lease_time = lease_time
def __init__(self, scope, mac_address, hostname, ip_address, lease_time):
self.scope = scope
self.mac_address = mac_address
self.hostname = hostname
self.ip_address = ip_address
self.lease_time = lease_time
def get_scope(self):
return self.scope
def get_mac_address(self):
return self.mac_address
def get_hostname(self):
return self.hostname
def get_ip_address(self):
return self.ip_address
def get_lease_time(self):
return self.lease_time
def __add__(self, o):
return self.get_str() + o
def __str__(self):
return self.get_str()
def get_str(self):
res = []
if self.get_scope() is not None:
res.append(str(self.get_scope()))
res.append(str(self.get_mac_address()))
res.append(str(self.get_hostname()))
res.append(str(self.get_ip_address()))
res.append(str(self.get_lease_time()))
return 'dhcp-host=' + ','.join(res)
class Dnsmasq_Dhcp_Section:
def __init__(self):
self.site = None
self.role = None
self.vlan_id = None
self.vlan_name = None
self.vrf_name = None
self.prefix = None
self.dhcp_options = []
self.dhcp_ranges = []
self.dhcp_hosts = []
def set_site(self, site):
self.site = site
def set_role(self, role):
self.role = role
def set_vlan_id(self, vlan_id):
self.vlan_id = vlan_id
def set_vlan_name(self, vlan_name):
self.vlan_name = vlan_name
def set_vrf_name(self, vrf_name):
self.vrf_name = vrf_name
def set_prefix(self, prefix):
self.prefix = prefix
def append_dhcp_option(self, dhcp_option):
self.dhcp_options.append(dhcp_option)
def append_dhcp_range(self, dhcp_range):
self.dhcp_ranges.append(dhcp_range)
def append_dhcp_host(self, dhcp_host):
self.dhcp_hosts.append(dhcp_host)
def get_header(self):
res = []
if self.site is not None:
res.append('### Site: ' + self.site)
if self.role is not None:
res.append('### Role: ' + self.role)
if self.vlan_id is not None and self.vlan_name is not None:
res.append('### Vlan: ' + self.vlan_name + ' with ID: ' + str(self.vlan_id))
elif self.vlan_id is not None:
res.append('### Vlan ID: ' + str(self.vlan_id))
elif self.vlan_name is not None:
res.append('### Vlan: ' + self.vlan_name)
if self.vrf_name is not None:
res.append('### VRF: ' + self.vrf_name)
if self.prefix is not None:
res.append('### Prefix: ' + self.prefix)
return '\n'.join(res)
def get_options(self):
return self.dhcp_options
def get_ranges(self):
return self.dhcp_ranges
def get_hosts(self):
return self.dhcp_hosts
class Dnsmasq_Dhcp_Config:
def __init__(self):
self.dhcp_config_generic_switches = []
self.dhcp_config_sections = []
def append_to_dhcp_config_generic_switches(self, obj):
self.dhcp_config_generic_switches.append(obj)
def append_to_dhcp_config_sections(self, obj):
self.dhcp_config_sections.append(obj)
def print(self):
print(self)
def __str__(self):
res = []
for sw in self.dhcp_config_generic_switches:
res.append(str(sw))
for sec in self.dhcp_config_sections:
res.append(str(''))
res.append(str(''))
res.append(str(sec.get_header()))
res.append(str(''))
for opts in sec.get_options():
res.append(str(opts))
res.append(str(''))
for ran in sec.get_ranges():
res.append(str(ran))
res.append(str(''))
for host in sec.get_hosts():
res.append(str(host))
return '\n'.join(res)
|
df4 = pandas.read_csv('supermarkets-commas.txt')
df4
df5 = pandas.read_csv('supermarkets-semi-colons.txt',sep=';')
df5
|
df4 = pandas.read_csv('supermarkets-commas.txt')
df4
df5 = pandas.read_csv('supermarkets-semi-colons.txt', sep=';')
df5
|
class DummyScheduler(object):
def __init__(self, optimizer):
pass
def step(self):
pass
|
class Dummyscheduler(object):
def __init__(self, optimizer):
pass
def step(self):
pass
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 8 12:07:42 2019
@author: DiPu
"""
for i in range(1,6):
print("*"*i)
for j in range(4,0,-1):
print("*"*j)
|
"""
Created on Wed May 8 12:07:42 2019
@author: DiPu
"""
for i in range(1, 6):
print('*' * i)
for j in range(4, 0, -1):
print('*' * j)
|
a = [int(x) for x in input().split()]
aset = set()
for i in range(5):
for j in range(i+1, 5):
for k in range(j+1, 5):
aset.add(a[i] + a[j] + a[k])
print(sorted(aset, reverse=True)[2])
|
a = [int(x) for x in input().split()]
aset = set()
for i in range(5):
for j in range(i + 1, 5):
for k in range(j + 1, 5):
aset.add(a[i] + a[j] + a[k])
print(sorted(aset, reverse=True)[2])
|
""" Store a person's name, and include some whitespace
characters at beginning and end of the name. Make sure you
use each character combination "\t" and "\n" at least one. """
name = ' James '
print(name.lstrip())
print(name.rstrip())
print(name.strip())
print('\tJames Noria')
print('Name:\nJames Noria')
|
""" Store a person's name, and include some whitespace
characters at beginning and end of the name. Make sure you
use each character combination " " and "
" at least one. """
name = ' James '
print(name.lstrip())
print(name.rstrip())
print(name.strip())
print('\tJames Noria')
print('Name:\nJames Noria')
|
def drive(start, end, step, parameters):
step_results = {
"P:sir.out.S": list(),
"P:sir.out.I": list(),
"P:sir.out.R": list(),
"P:sir.in.dt": list(),
}
S = parameters["P:sir.in.S"]
I = parameters["P:sir.in.I"]
R = parameters["P:sir.in.R"]
for i in range(start, end + 1, step):
(S, I, R) = sir(
S,
I,
R,
parameters["P:sir.in.beta"],
parameters["P:sir.in.gamma"],
step,
)
step_results["P:sir.out.S"].append(S)
step_results["P:sir.out.I"].append(I)
step_results["P:sir.out.R"].append(R)
step_results["P:sir.in.dt"].append(i)
return step_results
"""
Derived from the following:
********************************************************************************
! Input Variables:
! S Amount of susceptible members at the current timestep
! I Amount of infected members at the current timestep
! R Amount of recovered members at the current timestep
! beta Rate of transmission via contact
! gamma Rate of recovery from infection
! dt Next inter-event time
!
! State Variables:
! infected Increase in infected at the current timestep
! recovered Increase in recovered at the current timestep
********************************************************************************
subroutine sir(S, I, R, beta, gamma, dt)
implicit none
double precision S, I, R, beta, gamma, dt
double precision infected, recovered
infected = ((beta*S*I) / (S + I + R)) * dt
recovered = (gamma*I) * dt
S = S - infected
I = I + infected - recovered
R = R + recovered
end subroutine sir
"""
def sir(S: float, I: float, R: float, beta: float, gamma: float, dt: float):
"""
! Input Variables:
! S Amount of susceptible members at the current timestep
! I Amount of infected members at the current timestep
! R Amount of recovered members at the current timestep
! beta Rate of transmission via contact
! gamma Rate of recovery from infection
! dt Next inter-event time
!
! State Variables:
! infected Increase in infected at the current timestep
! recovered Increase in recovered at the current timestep
"""
infected = ((beta * S * I) / (S + I + R)) * dt
recovered = (gamma * I) * dt
S = S - infected
I = I + infected - recovered
R = R + recovered
return (S, I, R)
|
def drive(start, end, step, parameters):
step_results = {'P:sir.out.S': list(), 'P:sir.out.I': list(), 'P:sir.out.R': list(), 'P:sir.in.dt': list()}
s = parameters['P:sir.in.S']
i = parameters['P:sir.in.I']
r = parameters['P:sir.in.R']
for i in range(start, end + 1, step):
(s, i, r) = sir(S, I, R, parameters['P:sir.in.beta'], parameters['P:sir.in.gamma'], step)
step_results['P:sir.out.S'].append(S)
step_results['P:sir.out.I'].append(I)
step_results['P:sir.out.R'].append(R)
step_results['P:sir.in.dt'].append(i)
return step_results
'\nDerived from the following:\n\n ********************************************************************************\n ! Input Variables:\n ! S Amount of susceptible members at the current timestep\n ! I Amount of infected members at the current timestep\n ! R Amount of recovered members at the current timestep\n ! beta Rate of transmission via contact\n ! gamma Rate of recovery from infection\n ! dt Next inter-event time\n !\n ! State Variables:\n ! infected Increase in infected at the current timestep\n ! recovered Increase in recovered at the current timestep\n ********************************************************************************\n subroutine sir(S, I, R, beta, gamma, dt)\n implicit none\n double precision S, I, R, beta, gamma, dt\n double precision infected, recovered\n\n infected = ((beta*S*I) / (S + I + R)) * dt\n recovered = (gamma*I) * dt\n\n S = S - infected\n I = I + infected - recovered\n R = R + recovered\n end subroutine sir\n'
def sir(S: float, I: float, R: float, beta: float, gamma: float, dt: float):
"""
! Input Variables:
! S Amount of susceptible members at the current timestep
! I Amount of infected members at the current timestep
! R Amount of recovered members at the current timestep
! beta Rate of transmission via contact
! gamma Rate of recovery from infection
! dt Next inter-event time
!
! State Variables:
! infected Increase in infected at the current timestep
! recovered Increase in recovered at the current timestep
"""
infected = beta * S * I / (S + I + R) * dt
recovered = gamma * I * dt
s = S - infected
i = I + infected - recovered
r = R + recovered
return (S, I, R)
|
class RequestParseError(Exception):
"""Error raised when the inbound request could not be parsed."""
pass
class AttachmentTooLargeError(Exception):
"""Error raised when an attachment is too large."""
def __init__(self, email, filename, size):
super(AttachmentTooLargeError, self)
self.email = email
self.filename = filename
self.size = size
class AuthenticationError(Exception):
"""Error raised when the request is not authenticated."""
pass
|
class Requestparseerror(Exception):
"""Error raised when the inbound request could not be parsed."""
pass
class Attachmenttoolargeerror(Exception):
"""Error raised when an attachment is too large."""
def __init__(self, email, filename, size):
super(AttachmentTooLargeError, self)
self.email = email
self.filename = filename
self.size = size
class Authenticationerror(Exception):
"""Error raised when the request is not authenticated."""
pass
|
# Configuration file for interface "rpc". This interface is
# used in conjunction with RPC resource for cage-to-cage RPC calls.
#
# If location discovery at runtime is used (which is recommended),
# then all the cages that wish to share the same RPC "namespace" need
# identical broadcast ports, broadcast addresses that face the same
# subnet and the same flock_id, which is an arbitrary identifier around
# which all the related cages are grouped, same port broadcasts with
# different flock id will be ignored.
#
# The RPC listener is bound to a random port in specified range,
# which is later advertised at runtime to other cages. In case
# such broadcast advertisement are forbidden an exact port number
# can be specified, as a positive number (vs. negative for range).
# In this case other cages will likely have an entry in
# config_resource_rpc.py exact_locations parameter specifying this
# cage's address.
#
# There is no need to make a copy of this file for each cage,
# but you may need to modify the broadcast_address parameter
# if your OS doesn't work with 255.255.255.255 broadcasts,
# for example, under FreeBSD change it to something like
# "192.168.0.1/192.168.255.255".
config = dict \
(
protocol = "rpc", # meta
random_port = -63000, # tcp, negative means "in range 63000..63999"
max_connections = 100, # tcp
broadcast_address = ("0.0.0.0/255.255.255.255", 12480), # rpc, "interface address/broadcast address", port
ssl_ciphers = None, # ssl, optional str
ssl_protocol = None, # ssl, optional "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2" or "TLS"
flock_id = "DEFAULT", # rpc
marshaling_methods = ("msgpack", "pickle"), # rpc, allowed marshaling methods
max_packet_size = 1048576, # rpc, maximum allowed request/response size in bytes
)
# DO NOT TOUCH BELOW THIS LINE
__all__ = [ "get", "copy" ]
get = lambda key, default = None: pmnc.config.get_(config, {}, key, default)
copy = lambda: pmnc.config.copy_(config, {})
# EOF
|
config = dict(protocol='rpc', random_port=-63000, max_connections=100, broadcast_address=('0.0.0.0/255.255.255.255', 12480), ssl_ciphers=None, ssl_protocol=None, flock_id='DEFAULT', marshaling_methods=('msgpack', 'pickle'), max_packet_size=1048576)
__all__ = ['get', 'copy']
get = lambda key, default=None: pmnc.config.get_(config, {}, key, default)
copy = lambda : pmnc.config.copy_(config, {})
|
EXAMPLE_TWEETS = [
"Trump for President!!! #MAGA",
"Trump is the best ever!",
"RT @someuser: Trump is, by far, the best POTUS in history. \n\nBonus: He^s friggin^ awesome!\n\nTrump gave Pelosi and the Dems the ultimate\u2026 ",
"If Clinton is elected, I'm moving to Canada",
"Trump is doing a great job so far. Keep it up man.",
"He is awesome, make american great again. Democrats is taking off. We love democrats.",
"This tweet is about basketball, and I'm watching tonight on CBS Sports",
"Hillary for President!!! #StrongerTogether",
"Trump is the worst ever!",
"If Trump is elected, I'm moving to Canada",
"RT @MotherJones: A scientist who resisted Trump administration censorship of climate report just lost her job",
"Trump is doing a terrible job so far. Vote him out ASAP."
]
|
example_tweets = ['Trump for President!!! #MAGA', 'Trump is the best ever!', 'RT @someuser: Trump is, by far, the best POTUS in history. \n\nBonus: He^s friggin^ awesome!\n\nTrump gave Pelosi and the Dems the ultimate… ', "If Clinton is elected, I'm moving to Canada", 'Trump is doing a great job so far. Keep it up man.', 'He is awesome, make american great again. Democrats is taking off. We love democrats.', "This tweet is about basketball, and I'm watching tonight on CBS Sports", 'Hillary for President!!! #StrongerTogether', 'Trump is the worst ever!', "If Trump is elected, I'm moving to Canada", 'RT @MotherJones: A scientist who resisted Trump administration censorship of climate report just lost her job', 'Trump is doing a terrible job so far. Vote him out ASAP.']
|
"""
1. Use for position param, variable params, keyword argument
"""
def test(a, b, *args, m=1, n=2):
print(a)
print(b)
print(args)
print(m)
print(n)
test(1, 2, 3, 4, 5)
print()
test(1, 2, 3, 4, 5, m=10, n=20)
print()
"""
1. Use **kwargs for dict on keyword arguments
"""
def foo(**kwargs):
print(kwargs)
foo(a=10, b=20)
print()
def test2(a, b, *args, m=1, n=2, **kwargs):
print(a)
print(b)
print(args)
print(m)
print(n)
print(kwargs)
###############################################
# 1. position args, *args, keyword args, dict args
test2(1, 2, 3, 4, 5, m=10, n=20, x=100, y=200)
print()
def test3(m, **kwargs):
print(m)
print(kwargs)
test3(2, x=12, y=24)
|
"""
1. Use for position param, variable params, keyword argument
"""
def test(a, b, *args, m=1, n=2):
print(a)
print(b)
print(args)
print(m)
print(n)
test(1, 2, 3, 4, 5)
print()
test(1, 2, 3, 4, 5, m=10, n=20)
print()
'\n 1. Use **kwargs for dict on keyword arguments\n'
def foo(**kwargs):
print(kwargs)
foo(a=10, b=20)
print()
def test2(a, b, *args, m=1, n=2, **kwargs):
print(a)
print(b)
print(args)
print(m)
print(n)
print(kwargs)
test2(1, 2, 3, 4, 5, m=10, n=20, x=100, y=200)
print()
def test3(m, **kwargs):
print(m)
print(kwargs)
test3(2, x=12, y=24)
|
test_item = {
"stac_version": "1.0.0",
"stac_extensions": [],
"type": "Feature",
"id": "20201211_223832_CS2",
"bbox": [
172.91173669923782,
1.3438851951615003,
172.95469614953714,
1.3690476620161975
],
"geometry": {
"type": "Polygon",
"coordinates": [
[
[
172.91173669923782,
1.3438851951615003
],
[
172.95469614953714,
1.3438851951615003
],
[
172.95469614953714,
1.3690476620161975
],
[
172.91173669923782,
1.3690476620161975
],
[
172.91173669923782,
1.3438851951615003
]
]
]
},
"properties": {
"datetime": "2020-12-11T22:38:32.125000Z"
},
"collection": "simple-collection",
"links": [
{
"rel": "collection",
"href": "./collection.json",
"type": "application/json",
"title": "Simple Example Collection"
},
{
"rel": "root",
"href": "./collection.json",
"type": "application/json",
"title": "Simple Example Collection"
},
{
"rel": "parent",
"href": "./collection.json",
"type": "application/json",
"title": "Simple Example Collection"
}
],
"assets": {
"visual": {
"href": "https://storage.googleapis.com/open-cogs/stac-examples/20201211_223832_CS2.tif",
"type": "image/tiff; application=geotiff; profile=cloud-optimized",
"title": "3-Band Visual",
"roles": [
"visual"
]
},
"thumbnail": {
"href": "https://storage.googleapis.com/open-cogs/stac-examples/20201211_223832_CS2.jpg",
"title": "Thumbnail",
"type": "image/jpeg",
"roles": [
"thumbnail"
]
}
}
}
|
test_item = {'stac_version': '1.0.0', 'stac_extensions': [], 'type': 'Feature', 'id': '20201211_223832_CS2', 'bbox': [172.91173669923782, 1.3438851951615003, 172.95469614953714, 1.3690476620161975], 'geometry': {'type': 'Polygon', 'coordinates': [[[172.91173669923782, 1.3438851951615003], [172.95469614953714, 1.3438851951615003], [172.95469614953714, 1.3690476620161975], [172.91173669923782, 1.3690476620161975], [172.91173669923782, 1.3438851951615003]]]}, 'properties': {'datetime': '2020-12-11T22:38:32.125000Z'}, 'collection': 'simple-collection', 'links': [{'rel': 'collection', 'href': './collection.json', 'type': 'application/json', 'title': 'Simple Example Collection'}, {'rel': 'root', 'href': './collection.json', 'type': 'application/json', 'title': 'Simple Example Collection'}, {'rel': 'parent', 'href': './collection.json', 'type': 'application/json', 'title': 'Simple Example Collection'}], 'assets': {'visual': {'href': 'https://storage.googleapis.com/open-cogs/stac-examples/20201211_223832_CS2.tif', 'type': 'image/tiff; application=geotiff; profile=cloud-optimized', 'title': '3-Band Visual', 'roles': ['visual']}, 'thumbnail': {'href': 'https://storage.googleapis.com/open-cogs/stac-examples/20201211_223832_CS2.jpg', 'title': 'Thumbnail', 'type': 'image/jpeg', 'roles': ['thumbnail']}}}
|
# output: ok
assert([x for x in ()] == [])
assert([x for x in range(0, 3)] == [0, 1, 2])
assert([(x, y) for x in range(0, 2) for y in range(2, 4)] ==
[(0, 2), (0, 3), (1, 2), (1, 3)])
assert([x for x in range(0, 3) if x >= 1] == [1, 2])
def inc(x):
return x + 1
assert([inc(y) for y in (1, 2, 3)] == [2, 3, 4])
a = 1
assert([a for y in (1, 2, 3)] == [1, 1, 1])
assert([(lambda x: x * 2)(y) for y in (1, 2, 3)] == [2, 4, 6])
assert([(lambda x: y * 2)(y) for y in (1, 2, 3)] == [2, 4, 6])
print('ok')
|
assert [x for x in ()] == []
assert [x for x in range(0, 3)] == [0, 1, 2]
assert [(x, y) for x in range(0, 2) for y in range(2, 4)] == [(0, 2), (0, 3), (1, 2), (1, 3)]
assert [x for x in range(0, 3) if x >= 1] == [1, 2]
def inc(x):
return x + 1
assert [inc(y) for y in (1, 2, 3)] == [2, 3, 4]
a = 1
assert [a for y in (1, 2, 3)] == [1, 1, 1]
assert [(lambda x: x * 2)(y) for y in (1, 2, 3)] == [2, 4, 6]
assert [(lambda x: y * 2)(y) for y in (1, 2, 3)] == [2, 4, 6]
print('ok')
|
#
# PySNMP MIB module ASCEND-MIBVDSLNET-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ASCEND-MIBVDSLNET-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:28:54 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
configuration, = mibBuilder.importSymbols("ASCEND-MIB", "configuration")
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsUnion, ValueSizeConstraint, ValueRangeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsUnion", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsIntersection")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Counter32, Counter64, Integer32, Bits, TimeTicks, ObjectIdentity, Unsigned32, iso, NotificationType, MibIdentifier, ModuleIdentity, Gauge32, IpAddress, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "Counter32", "Counter64", "Integer32", "Bits", "TimeTicks", "ObjectIdentity", "Unsigned32", "iso", "NotificationType", "MibIdentifier", "ModuleIdentity", "Gauge32", "IpAddress", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
class DisplayString(OctetString):
pass
mibvdslNetworkProfile = MibIdentifier((1, 3, 6, 1, 4, 1, 529, 23, 9))
mibvdslNetworkProfileTable = MibTable((1, 3, 6, 1, 4, 1, 529, 23, 9, 1), )
if mibBuilder.loadTexts: mibvdslNetworkProfileTable.setStatus('mandatory')
if mibBuilder.loadTexts: mibvdslNetworkProfileTable.setDescription('A list of mibvdslNetworkProfile profile entries.')
mibvdslNetworkProfileEntry = MibTableRow((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1), ).setIndexNames((0, "ASCEND-MIBVDSLNET-MIB", "vdslNetworkProfile-Shelf-o"), (0, "ASCEND-MIBVDSLNET-MIB", "vdslNetworkProfile-Slot-o"), (0, "ASCEND-MIBVDSLNET-MIB", "vdslNetworkProfile-Item-o"))
if mibBuilder.loadTexts: mibvdslNetworkProfileEntry.setStatus('mandatory')
if mibBuilder.loadTexts: mibvdslNetworkProfileEntry.setDescription('A mibvdslNetworkProfile entry containing objects that maps to the parameters of mibvdslNetworkProfile profile.')
vdslNetworkProfile_Shelf_o = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 1), Integer32()).setLabel("vdslNetworkProfile-Shelf-o").setMaxAccess("readonly")
if mibBuilder.loadTexts: vdslNetworkProfile_Shelf_o.setStatus('mandatory')
if mibBuilder.loadTexts: vdslNetworkProfile_Shelf_o.setDescription('')
vdslNetworkProfile_Slot_o = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 2), Integer32()).setLabel("vdslNetworkProfile-Slot-o").setMaxAccess("readonly")
if mibBuilder.loadTexts: vdslNetworkProfile_Slot_o.setStatus('mandatory')
if mibBuilder.loadTexts: vdslNetworkProfile_Slot_o.setDescription('')
vdslNetworkProfile_Item_o = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 3), Integer32()).setLabel("vdslNetworkProfile-Item-o").setMaxAccess("readonly")
if mibBuilder.loadTexts: vdslNetworkProfile_Item_o.setStatus('mandatory')
if mibBuilder.loadTexts: vdslNetworkProfile_Item_o.setDescription('')
vdslNetworkProfile_Name = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 4), DisplayString()).setLabel("vdslNetworkProfile-Name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vdslNetworkProfile_Name.setStatus('mandatory')
if mibBuilder.loadTexts: vdslNetworkProfile_Name.setDescription('For future use. The current design does not use the name field but instead references Vdsl lines by the physical address; we may in the future support referencing Vdsl lines by name as well as by address. The name consists of a null terminated ascii string supplied by the user; it defaults to the ascii form of the Vdsl line physical address.')
vdslNetworkProfile_PhysicalAddress_Shelf = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))).clone(namedValues=NamedValues(("anyShelf", 1), ("shelf1", 2), ("shelf2", 3), ("shelf3", 4), ("shelf4", 5), ("shelf5", 6), ("shelf6", 7), ("shelf7", 8), ("shelf8", 9), ("shelf9", 10)))).setLabel("vdslNetworkProfile-PhysicalAddress-Shelf").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vdslNetworkProfile_PhysicalAddress_Shelf.setStatus('mandatory')
if mibBuilder.loadTexts: vdslNetworkProfile_PhysicalAddress_Shelf.setDescription('The number of the shelf that the addressed physical device resides on.')
vdslNetworkProfile_PhysicalAddress_Slot = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 55, 56, 57, 58, 49, 50, 42, 53, 54, 45, 46, 51, 59))).clone(namedValues=NamedValues(("anySlot", 1), ("slot1", 2), ("slot2", 3), ("slot3", 4), ("slot4", 5), ("slot5", 6), ("slot6", 7), ("slot7", 8), ("slot8", 9), ("slot9", 10), ("slot10", 11), ("slot11", 12), ("slot12", 13), ("slot13", 14), ("slot14", 15), ("slot15", 16), ("slot16", 17), ("slot17", 18), ("slot18", 19), ("slot19", 20), ("slot20", 21), ("slot21", 22), ("slot22", 23), ("slot23", 24), ("slot24", 25), ("slot25", 26), ("slot26", 27), ("slot27", 28), ("slot28", 29), ("slot29", 30), ("slot30", 31), ("slot31", 32), ("slot32", 33), ("slot33", 34), ("slot34", 35), ("slot35", 36), ("slot36", 37), ("slot37", 38), ("slot38", 39), ("slot39", 40), ("slot40", 41), ("aLim", 55), ("bLim", 56), ("cLim", 57), ("dLim", 58), ("leftController", 49), ("rightController", 50), ("controller", 42), ("firstControlModule", 53), ("secondControlModule", 54), ("trunkModule1", 45), ("trunkModule2", 46), ("controlModule", 51), ("slotPrimary", 59)))).setLabel("vdslNetworkProfile-PhysicalAddress-Slot").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vdslNetworkProfile_PhysicalAddress_Slot.setStatus('mandatory')
if mibBuilder.loadTexts: vdslNetworkProfile_PhysicalAddress_Slot.setDescription('The number of the slot that the addressed physical device resides on.')
vdslNetworkProfile_PhysicalAddress_ItemNumber = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 13), Integer32()).setLabel("vdslNetworkProfile-PhysicalAddress-ItemNumber").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vdslNetworkProfile_PhysicalAddress_ItemNumber.setStatus('mandatory')
if mibBuilder.loadTexts: vdslNetworkProfile_PhysicalAddress_ItemNumber.setDescription('A number that specifies an addressable entity within the context of shelf and slot.')
vdslNetworkProfile_Enabled = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("vdslNetworkProfile-Enabled").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vdslNetworkProfile_Enabled.setStatus('mandatory')
if mibBuilder.loadTexts: vdslNetworkProfile_Enabled.setDescription('TRUE if the line is enabled, otherwise FALSE.')
vdslNetworkProfile_SparingMode = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("inactive", 1), ("manual", 2), ("automatic", 3)))).setLabel("vdslNetworkProfile-SparingMode").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vdslNetworkProfile_SparingMode.setStatus('mandatory')
if mibBuilder.loadTexts: vdslNetworkProfile_SparingMode.setDescription('Port sparing operational mode for this port.')
vdslNetworkProfile_IgnoreLineup = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("systemDefined", 1), ("no", 2), ("yes", 3)))).setLabel("vdslNetworkProfile-IgnoreLineup").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vdslNetworkProfile_IgnoreLineup.setStatus('mandatory')
if mibBuilder.loadTexts: vdslNetworkProfile_IgnoreLineup.setDescription('Ignore line up value for this port.')
vdslNetworkProfile_LineConfig_NailedGroup = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 19), Integer32()).setLabel("vdslNetworkProfile-LineConfig-NailedGroup").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vdslNetworkProfile_LineConfig_NailedGroup.setStatus('mandatory')
if mibBuilder.loadTexts: vdslNetworkProfile_LineConfig_NailedGroup.setDescription('A number that identifies the this unique physical DSL line.')
vdslNetworkProfile_LineConfig_VpSwitchingVpi = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 20), Integer32()).setLabel("vdslNetworkProfile-LineConfig-VpSwitchingVpi").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vdslNetworkProfile_LineConfig_VpSwitchingVpi.setStatus('mandatory')
if mibBuilder.loadTexts: vdslNetworkProfile_LineConfig_VpSwitchingVpi.setDescription('The Vpi to be used for the VP switching. Rest of the VPIs within valid vpi-vci-range will be used for the VC switching. Changes in this range will take effect immediately. THE USER SHOULD BE VERY CAREFUL WHILE CHANGING THIS VALUE BECAUSE ALL CONNECTIONS ON THE LIM WHERE THIS PORT BELONGS WILL BE DROPPED IN ORDER TO MAKE THIS NEW VALUE EFFECTIVE IMMEDIATELY.')
vdslNetworkProfile_LineConfig_UpStreamFixedRate = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 22), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("n-1206667", 1), ("n-965333", 2), ("n-1930667", 3), ("n-3861333", 4)))).setLabel("vdslNetworkProfile-LineConfig-UpStreamFixedRate").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vdslNetworkProfile_LineConfig_UpStreamFixedRate.setStatus('mandatory')
if mibBuilder.loadTexts: vdslNetworkProfile_LineConfig_UpStreamFixedRate.setDescription('The following Up/Down stream rate relationships are supported: (0.965Mbps/19.306Mbps); (1.930Mbps/11.463Mbps); (3.861Mbps/11.463Mbps); (3.861Mbps/15.626Mbps). Up Stream range: 0.965Mbps - 3.861Mbps.')
vdslNetworkProfile_LineConfig_DownStreamFixedRate = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 23), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("n-1206667", 1), ("n-11463333", 2), ("n-15626333", 3), ("n-19306667", 4)))).setLabel("vdslNetworkProfile-LineConfig-DownStreamFixedRate").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vdslNetworkProfile_LineConfig_DownStreamFixedRate.setStatus('mandatory')
if mibBuilder.loadTexts: vdslNetworkProfile_LineConfig_DownStreamFixedRate.setDescription('The following Up/Down stream rate relationships are supported: (0.965Mbps/19.306Mbps); (1.930Mbps/11.463Mbps); (3.861Mbps/11.463Mbps); (3.861Mbps/15.626Mbps). Down Stream range: 11.463Mbps - 15.626Mbps.')
vdslNetworkProfile_LineConfig_ConfigLoopback = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 24), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("disable", 1), ("digital", 2), ("analog", 3)))).setLabel("vdslNetworkProfile-LineConfig-ConfigLoopback").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vdslNetworkProfile_LineConfig_ConfigLoopback.setStatus('mandatory')
if mibBuilder.loadTexts: vdslNetworkProfile_LineConfig_ConfigLoopback.setDescription('Configuration of different modem loopbacks.')
vdslNetworkProfile_LineConfig_PsdValue = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 25), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("n-53dbm", 1), ("n-60dbm", 2)))).setLabel("vdslNetworkProfile-LineConfig-PsdValue").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vdslNetworkProfile_LineConfig_PsdValue.setStatus('mandatory')
if mibBuilder.loadTexts: vdslNetworkProfile_LineConfig_PsdValue.setDescription('Configuration of PSD parameter. It defines the power that is allowed to be sent to the line.')
vdslNetworkProfile_LineConfig_LinkStatecmd = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 26), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(16, 1, 2, 3, 4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("autoConnectCmd", 16), ("disconnectState", 1), ("connectState", 2), ("quietState", 3), ("idleReqState", 4), ("backToServState", 5), ("changeIdleParamState", 6), ("changeWarmStartParamState", 7), ("changeCurrentParamState", 8)))).setLabel("vdslNetworkProfile-LineConfig-LinkStatecmd").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vdslNetworkProfile_LineConfig_LinkStatecmd.setStatus('mandatory')
if mibBuilder.loadTexts: vdslNetworkProfile_LineConfig_LinkStatecmd.setDescription('Sets the link connect state. Use this to control status of the VDSL link connect state machine. The auto-connect-cmd will train modem up to the final service. All the other commands are used to manualy operate the VDSL link connect state machine.')
vdslNetworkProfile_Action_o = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("noAction", 1), ("createProfile", 2), ("deleteProfile", 3)))).setLabel("vdslNetworkProfile-Action-o").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vdslNetworkProfile_Action_o.setStatus('mandatory')
if mibBuilder.loadTexts: vdslNetworkProfile_Action_o.setDescription('')
mibBuilder.exportSymbols("ASCEND-MIBVDSLNET-MIB", vdslNetworkProfile_Slot_o=vdslNetworkProfile_Slot_o, vdslNetworkProfile_Name=vdslNetworkProfile_Name, vdslNetworkProfile_LineConfig_LinkStatecmd=vdslNetworkProfile_LineConfig_LinkStatecmd, vdslNetworkProfile_LineConfig_VpSwitchingVpi=vdslNetworkProfile_LineConfig_VpSwitchingVpi, vdslNetworkProfile_PhysicalAddress_Slot=vdslNetworkProfile_PhysicalAddress_Slot, vdslNetworkProfile_PhysicalAddress_Shelf=vdslNetworkProfile_PhysicalAddress_Shelf, mibvdslNetworkProfileTable=mibvdslNetworkProfileTable, vdslNetworkProfile_IgnoreLineup=vdslNetworkProfile_IgnoreLineup, vdslNetworkProfile_SparingMode=vdslNetworkProfile_SparingMode, vdslNetworkProfile_PhysicalAddress_ItemNumber=vdslNetworkProfile_PhysicalAddress_ItemNumber, vdslNetworkProfile_LineConfig_PsdValue=vdslNetworkProfile_LineConfig_PsdValue, vdslNetworkProfile_LineConfig_DownStreamFixedRate=vdslNetworkProfile_LineConfig_DownStreamFixedRate, vdslNetworkProfile_Enabled=vdslNetworkProfile_Enabled, vdslNetworkProfile_LineConfig_NailedGroup=vdslNetworkProfile_LineConfig_NailedGroup, DisplayString=DisplayString, vdslNetworkProfile_Action_o=vdslNetworkProfile_Action_o, vdslNetworkProfile_Shelf_o=vdslNetworkProfile_Shelf_o, mibvdslNetworkProfile=mibvdslNetworkProfile, mibvdslNetworkProfileEntry=mibvdslNetworkProfileEntry, vdslNetworkProfile_Item_o=vdslNetworkProfile_Item_o, vdslNetworkProfile_LineConfig_UpStreamFixedRate=vdslNetworkProfile_LineConfig_UpStreamFixedRate, vdslNetworkProfile_LineConfig_ConfigLoopback=vdslNetworkProfile_LineConfig_ConfigLoopback)
|
(configuration,) = mibBuilder.importSymbols('ASCEND-MIB', 'configuration')
(octet_string, integer, object_identifier) = mibBuilder.importSymbols('ASN1', 'OctetString', 'Integer', 'ObjectIdentifier')
(named_values,) = mibBuilder.importSymbols('ASN1-ENUMERATION', 'NamedValues')
(single_value_constraint, constraints_union, value_size_constraint, value_range_constraint, constraints_intersection) = mibBuilder.importSymbols('ASN1-REFINEMENT', 'SingleValueConstraint', 'ConstraintsUnion', 'ValueSizeConstraint', 'ValueRangeConstraint', 'ConstraintsIntersection')
(module_compliance, notification_group) = mibBuilder.importSymbols('SNMPv2-CONF', 'ModuleCompliance', 'NotificationGroup')
(counter32, counter64, integer32, bits, time_ticks, object_identity, unsigned32, iso, notification_type, mib_identifier, module_identity, gauge32, ip_address, mib_scalar, mib_table, mib_table_row, mib_table_column) = mibBuilder.importSymbols('SNMPv2-SMI', 'Counter32', 'Counter64', 'Integer32', 'Bits', 'TimeTicks', 'ObjectIdentity', 'Unsigned32', 'iso', 'NotificationType', 'MibIdentifier', 'ModuleIdentity', 'Gauge32', 'IpAddress', 'MibScalar', 'MibTable', 'MibTableRow', 'MibTableColumn')
(textual_convention, display_string) = mibBuilder.importSymbols('SNMPv2-TC', 'TextualConvention', 'DisplayString')
class Displaystring(OctetString):
pass
mibvdsl_network_profile = mib_identifier((1, 3, 6, 1, 4, 1, 529, 23, 9))
mibvdsl_network_profile_table = mib_table((1, 3, 6, 1, 4, 1, 529, 23, 9, 1))
if mibBuilder.loadTexts:
mibvdslNetworkProfileTable.setStatus('mandatory')
if mibBuilder.loadTexts:
mibvdslNetworkProfileTable.setDescription('A list of mibvdslNetworkProfile profile entries.')
mibvdsl_network_profile_entry = mib_table_row((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1)).setIndexNames((0, 'ASCEND-MIBVDSLNET-MIB', 'vdslNetworkProfile-Shelf-o'), (0, 'ASCEND-MIBVDSLNET-MIB', 'vdslNetworkProfile-Slot-o'), (0, 'ASCEND-MIBVDSLNET-MIB', 'vdslNetworkProfile-Item-o'))
if mibBuilder.loadTexts:
mibvdslNetworkProfileEntry.setStatus('mandatory')
if mibBuilder.loadTexts:
mibvdslNetworkProfileEntry.setDescription('A mibvdslNetworkProfile entry containing objects that maps to the parameters of mibvdslNetworkProfile profile.')
vdsl_network_profile__shelf_o = mib_scalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 1), integer32()).setLabel('vdslNetworkProfile-Shelf-o').setMaxAccess('readonly')
if mibBuilder.loadTexts:
vdslNetworkProfile_Shelf_o.setStatus('mandatory')
if mibBuilder.loadTexts:
vdslNetworkProfile_Shelf_o.setDescription('')
vdsl_network_profile__slot_o = mib_scalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 2), integer32()).setLabel('vdslNetworkProfile-Slot-o').setMaxAccess('readonly')
if mibBuilder.loadTexts:
vdslNetworkProfile_Slot_o.setStatus('mandatory')
if mibBuilder.loadTexts:
vdslNetworkProfile_Slot_o.setDescription('')
vdsl_network_profile__item_o = mib_scalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 3), integer32()).setLabel('vdslNetworkProfile-Item-o').setMaxAccess('readonly')
if mibBuilder.loadTexts:
vdslNetworkProfile_Item_o.setStatus('mandatory')
if mibBuilder.loadTexts:
vdslNetworkProfile_Item_o.setDescription('')
vdsl_network_profile__name = mib_scalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 4), display_string()).setLabel('vdslNetworkProfile-Name').setMaxAccess('readwrite')
if mibBuilder.loadTexts:
vdslNetworkProfile_Name.setStatus('mandatory')
if mibBuilder.loadTexts:
vdslNetworkProfile_Name.setDescription('For future use. The current design does not use the name field but instead references Vdsl lines by the physical address; we may in the future support referencing Vdsl lines by name as well as by address. The name consists of a null terminated ascii string supplied by the user; it defaults to the ascii form of the Vdsl line physical address.')
vdsl_network_profile__physical_address__shelf = mib_scalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 11), integer32().subtype(subtypeSpec=constraints_union(single_value_constraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))).clone(namedValues=named_values(('anyShelf', 1), ('shelf1', 2), ('shelf2', 3), ('shelf3', 4), ('shelf4', 5), ('shelf5', 6), ('shelf6', 7), ('shelf7', 8), ('shelf8', 9), ('shelf9', 10)))).setLabel('vdslNetworkProfile-PhysicalAddress-Shelf').setMaxAccess('readwrite')
if mibBuilder.loadTexts:
vdslNetworkProfile_PhysicalAddress_Shelf.setStatus('mandatory')
if mibBuilder.loadTexts:
vdslNetworkProfile_PhysicalAddress_Shelf.setDescription('The number of the shelf that the addressed physical device resides on.')
vdsl_network_profile__physical_address__slot = mib_scalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 12), integer32().subtype(subtypeSpec=constraints_union(single_value_constraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 55, 56, 57, 58, 49, 50, 42, 53, 54, 45, 46, 51, 59))).clone(namedValues=named_values(('anySlot', 1), ('slot1', 2), ('slot2', 3), ('slot3', 4), ('slot4', 5), ('slot5', 6), ('slot6', 7), ('slot7', 8), ('slot8', 9), ('slot9', 10), ('slot10', 11), ('slot11', 12), ('slot12', 13), ('slot13', 14), ('slot14', 15), ('slot15', 16), ('slot16', 17), ('slot17', 18), ('slot18', 19), ('slot19', 20), ('slot20', 21), ('slot21', 22), ('slot22', 23), ('slot23', 24), ('slot24', 25), ('slot25', 26), ('slot26', 27), ('slot27', 28), ('slot28', 29), ('slot29', 30), ('slot30', 31), ('slot31', 32), ('slot32', 33), ('slot33', 34), ('slot34', 35), ('slot35', 36), ('slot36', 37), ('slot37', 38), ('slot38', 39), ('slot39', 40), ('slot40', 41), ('aLim', 55), ('bLim', 56), ('cLim', 57), ('dLim', 58), ('leftController', 49), ('rightController', 50), ('controller', 42), ('firstControlModule', 53), ('secondControlModule', 54), ('trunkModule1', 45), ('trunkModule2', 46), ('controlModule', 51), ('slotPrimary', 59)))).setLabel('vdslNetworkProfile-PhysicalAddress-Slot').setMaxAccess('readwrite')
if mibBuilder.loadTexts:
vdslNetworkProfile_PhysicalAddress_Slot.setStatus('mandatory')
if mibBuilder.loadTexts:
vdslNetworkProfile_PhysicalAddress_Slot.setDescription('The number of the slot that the addressed physical device resides on.')
vdsl_network_profile__physical_address__item_number = mib_scalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 13), integer32()).setLabel('vdslNetworkProfile-PhysicalAddress-ItemNumber').setMaxAccess('readwrite')
if mibBuilder.loadTexts:
vdslNetworkProfile_PhysicalAddress_ItemNumber.setStatus('mandatory')
if mibBuilder.loadTexts:
vdslNetworkProfile_PhysicalAddress_ItemNumber.setDescription('A number that specifies an addressable entity within the context of shelf and slot.')
vdsl_network_profile__enabled = mib_scalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 16), integer32().subtype(subtypeSpec=constraints_union(single_value_constraint(1, 2))).clone(namedValues=named_values(('no', 1), ('yes', 2)))).setLabel('vdslNetworkProfile-Enabled').setMaxAccess('readwrite')
if mibBuilder.loadTexts:
vdslNetworkProfile_Enabled.setStatus('mandatory')
if mibBuilder.loadTexts:
vdslNetworkProfile_Enabled.setDescription('TRUE if the line is enabled, otherwise FALSE.')
vdsl_network_profile__sparing_mode = mib_scalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 17), integer32().subtype(subtypeSpec=constraints_union(single_value_constraint(1, 2, 3))).clone(namedValues=named_values(('inactive', 1), ('manual', 2), ('automatic', 3)))).setLabel('vdslNetworkProfile-SparingMode').setMaxAccess('readwrite')
if mibBuilder.loadTexts:
vdslNetworkProfile_SparingMode.setStatus('mandatory')
if mibBuilder.loadTexts:
vdslNetworkProfile_SparingMode.setDescription('Port sparing operational mode for this port.')
vdsl_network_profile__ignore_lineup = mib_scalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 18), integer32().subtype(subtypeSpec=constraints_union(single_value_constraint(1, 2, 3))).clone(namedValues=named_values(('systemDefined', 1), ('no', 2), ('yes', 3)))).setLabel('vdslNetworkProfile-IgnoreLineup').setMaxAccess('readwrite')
if mibBuilder.loadTexts:
vdslNetworkProfile_IgnoreLineup.setStatus('mandatory')
if mibBuilder.loadTexts:
vdslNetworkProfile_IgnoreLineup.setDescription('Ignore line up value for this port.')
vdsl_network_profile__line_config__nailed_group = mib_scalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 19), integer32()).setLabel('vdslNetworkProfile-LineConfig-NailedGroup').setMaxAccess('readwrite')
if mibBuilder.loadTexts:
vdslNetworkProfile_LineConfig_NailedGroup.setStatus('mandatory')
if mibBuilder.loadTexts:
vdslNetworkProfile_LineConfig_NailedGroup.setDescription('A number that identifies the this unique physical DSL line.')
vdsl_network_profile__line_config__vp_switching_vpi = mib_scalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 20), integer32()).setLabel('vdslNetworkProfile-LineConfig-VpSwitchingVpi').setMaxAccess('readwrite')
if mibBuilder.loadTexts:
vdslNetworkProfile_LineConfig_VpSwitchingVpi.setStatus('mandatory')
if mibBuilder.loadTexts:
vdslNetworkProfile_LineConfig_VpSwitchingVpi.setDescription('The Vpi to be used for the VP switching. Rest of the VPIs within valid vpi-vci-range will be used for the VC switching. Changes in this range will take effect immediately. THE USER SHOULD BE VERY CAREFUL WHILE CHANGING THIS VALUE BECAUSE ALL CONNECTIONS ON THE LIM WHERE THIS PORT BELONGS WILL BE DROPPED IN ORDER TO MAKE THIS NEW VALUE EFFECTIVE IMMEDIATELY.')
vdsl_network_profile__line_config__up_stream_fixed_rate = mib_scalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 22), integer32().subtype(subtypeSpec=constraints_union(single_value_constraint(1, 2, 3, 4))).clone(namedValues=named_values(('n-1206667', 1), ('n-965333', 2), ('n-1930667', 3), ('n-3861333', 4)))).setLabel('vdslNetworkProfile-LineConfig-UpStreamFixedRate').setMaxAccess('readwrite')
if mibBuilder.loadTexts:
vdslNetworkProfile_LineConfig_UpStreamFixedRate.setStatus('mandatory')
if mibBuilder.loadTexts:
vdslNetworkProfile_LineConfig_UpStreamFixedRate.setDescription('The following Up/Down stream rate relationships are supported: (0.965Mbps/19.306Mbps); (1.930Mbps/11.463Mbps); (3.861Mbps/11.463Mbps); (3.861Mbps/15.626Mbps). Up Stream range: 0.965Mbps - 3.861Mbps.')
vdsl_network_profile__line_config__down_stream_fixed_rate = mib_scalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 23), integer32().subtype(subtypeSpec=constraints_union(single_value_constraint(1, 2, 3, 4))).clone(namedValues=named_values(('n-1206667', 1), ('n-11463333', 2), ('n-15626333', 3), ('n-19306667', 4)))).setLabel('vdslNetworkProfile-LineConfig-DownStreamFixedRate').setMaxAccess('readwrite')
if mibBuilder.loadTexts:
vdslNetworkProfile_LineConfig_DownStreamFixedRate.setStatus('mandatory')
if mibBuilder.loadTexts:
vdslNetworkProfile_LineConfig_DownStreamFixedRate.setDescription('The following Up/Down stream rate relationships are supported: (0.965Mbps/19.306Mbps); (1.930Mbps/11.463Mbps); (3.861Mbps/11.463Mbps); (3.861Mbps/15.626Mbps). Down Stream range: 11.463Mbps - 15.626Mbps.')
vdsl_network_profile__line_config__config_loopback = mib_scalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 24), integer32().subtype(subtypeSpec=constraints_union(single_value_constraint(1, 2, 3))).clone(namedValues=named_values(('disable', 1), ('digital', 2), ('analog', 3)))).setLabel('vdslNetworkProfile-LineConfig-ConfigLoopback').setMaxAccess('readwrite')
if mibBuilder.loadTexts:
vdslNetworkProfile_LineConfig_ConfigLoopback.setStatus('mandatory')
if mibBuilder.loadTexts:
vdslNetworkProfile_LineConfig_ConfigLoopback.setDescription('Configuration of different modem loopbacks.')
vdsl_network_profile__line_config__psd_value = mib_scalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 25), integer32().subtype(subtypeSpec=constraints_union(single_value_constraint(1, 2))).clone(namedValues=named_values(('n-53dbm', 1), ('n-60dbm', 2)))).setLabel('vdslNetworkProfile-LineConfig-PsdValue').setMaxAccess('readwrite')
if mibBuilder.loadTexts:
vdslNetworkProfile_LineConfig_PsdValue.setStatus('mandatory')
if mibBuilder.loadTexts:
vdslNetworkProfile_LineConfig_PsdValue.setDescription('Configuration of PSD parameter. It defines the power that is allowed to be sent to the line.')
vdsl_network_profile__line_config__link_statecmd = mib_scalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 26), integer32().subtype(subtypeSpec=constraints_union(single_value_constraint(16, 1, 2, 3, 4, 5, 6, 7, 8))).clone(namedValues=named_values(('autoConnectCmd', 16), ('disconnectState', 1), ('connectState', 2), ('quietState', 3), ('idleReqState', 4), ('backToServState', 5), ('changeIdleParamState', 6), ('changeWarmStartParamState', 7), ('changeCurrentParamState', 8)))).setLabel('vdslNetworkProfile-LineConfig-LinkStatecmd').setMaxAccess('readwrite')
if mibBuilder.loadTexts:
vdslNetworkProfile_LineConfig_LinkStatecmd.setStatus('mandatory')
if mibBuilder.loadTexts:
vdslNetworkProfile_LineConfig_LinkStatecmd.setDescription('Sets the link connect state. Use this to control status of the VDSL link connect state machine. The auto-connect-cmd will train modem up to the final service. All the other commands are used to manualy operate the VDSL link connect state machine.')
vdsl_network_profile__action_o = mib_scalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 14), integer32().subtype(subtypeSpec=constraints_union(single_value_constraint(1, 2, 3))).clone(namedValues=named_values(('noAction', 1), ('createProfile', 2), ('deleteProfile', 3)))).setLabel('vdslNetworkProfile-Action-o').setMaxAccess('readwrite')
if mibBuilder.loadTexts:
vdslNetworkProfile_Action_o.setStatus('mandatory')
if mibBuilder.loadTexts:
vdslNetworkProfile_Action_o.setDescription('')
mibBuilder.exportSymbols('ASCEND-MIBVDSLNET-MIB', vdslNetworkProfile_Slot_o=vdslNetworkProfile_Slot_o, vdslNetworkProfile_Name=vdslNetworkProfile_Name, vdslNetworkProfile_LineConfig_LinkStatecmd=vdslNetworkProfile_LineConfig_LinkStatecmd, vdslNetworkProfile_LineConfig_VpSwitchingVpi=vdslNetworkProfile_LineConfig_VpSwitchingVpi, vdslNetworkProfile_PhysicalAddress_Slot=vdslNetworkProfile_PhysicalAddress_Slot, vdslNetworkProfile_PhysicalAddress_Shelf=vdslNetworkProfile_PhysicalAddress_Shelf, mibvdslNetworkProfileTable=mibvdslNetworkProfileTable, vdslNetworkProfile_IgnoreLineup=vdslNetworkProfile_IgnoreLineup, vdslNetworkProfile_SparingMode=vdslNetworkProfile_SparingMode, vdslNetworkProfile_PhysicalAddress_ItemNumber=vdslNetworkProfile_PhysicalAddress_ItemNumber, vdslNetworkProfile_LineConfig_PsdValue=vdslNetworkProfile_LineConfig_PsdValue, vdslNetworkProfile_LineConfig_DownStreamFixedRate=vdslNetworkProfile_LineConfig_DownStreamFixedRate, vdslNetworkProfile_Enabled=vdslNetworkProfile_Enabled, vdslNetworkProfile_LineConfig_NailedGroup=vdslNetworkProfile_LineConfig_NailedGroup, DisplayString=DisplayString, vdslNetworkProfile_Action_o=vdslNetworkProfile_Action_o, vdslNetworkProfile_Shelf_o=vdslNetworkProfile_Shelf_o, mibvdslNetworkProfile=mibvdslNetworkProfile, mibvdslNetworkProfileEntry=mibvdslNetworkProfileEntry, vdslNetworkProfile_Item_o=vdslNetworkProfile_Item_o, vdslNetworkProfile_LineConfig_UpStreamFixedRate=vdslNetworkProfile_LineConfig_UpStreamFixedRate, vdslNetworkProfile_LineConfig_ConfigLoopback=vdslNetworkProfile_LineConfig_ConfigLoopback)
|
def __init__(self):
self.meetings = []
def book(self, start: int, end: int) -> bool:
for s, e in self.meetings:
if s < end and start < e:
return False
self.meetings.append([start, end])
return True
|
def __init__(self):
self.meetings = []
def book(self, start: int, end: int) -> bool:
for (s, e) in self.meetings:
if s < end and start < e:
return False
self.meetings.append([start, end])
return True
|
# Given two sorted integer arrays nums1 and nums2, merge nums2 into nums1 as one sorted array.
#
# The number of elements initialized in nums1 and nums2 are m and n respectively.
# You may assume that nums1 has enough space (size that is equal to m + n) to hold additional elements from nums2.
# Source - https://leetcode.com/problems/merge-sorted-array/
# 2 pointers approach
class Solution:
def merge(self, nums1, m: int, nums2, n: int):
i = m - 1
j = n - 1
k = m + n - 1
while i >= 0 and j >= 0:
if nums1[i] > nums2[j]:
nums1[k] = nums1[i]
i -= 1
else:
nums1[k] = nums2[j]
j -= 1
k -= 1
if j >= 0:
nums1[:k + 1] = nums2[:j + 1]
print(nums1)
#
# Time complexity : O(m+n)log(m+n)
# Space complexity : O(1)
nums1 = [1,2,3,0,0,0]
m = 3
nums2 = [2,5,6]
n = 3
s = Solution().merge(nums1, m, nums2, n)
|
class Solution:
def merge(self, nums1, m: int, nums2, n: int):
i = m - 1
j = n - 1
k = m + n - 1
while i >= 0 and j >= 0:
if nums1[i] > nums2[j]:
nums1[k] = nums1[i]
i -= 1
else:
nums1[k] = nums2[j]
j -= 1
k -= 1
if j >= 0:
nums1[:k + 1] = nums2[:j + 1]
print(nums1)
nums1 = [1, 2, 3, 0, 0, 0]
m = 3
nums2 = [2, 5, 6]
n = 3
s = solution().merge(nums1, m, nums2, n)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Advent of Code 2020, day five."""
INPUT_FILE = 'data/day_05.txt'
def main() -> None:
"""Identify missing ticket."""
with open(INPUT_FILE, encoding='utf-8') as input_file:
tkt = sorted([int(x.strip().replace('F', '0').replace('B', '1')
.replace('L', '0').replace('R', '1'), 2)
for x in input_file])
print(f'Part One: Highest Seat Id: {tkt[-1]}')
# Finding the missing ticket.
# Using triangular numbers to get sum from 0 to last seat
# Removing sum from 0 to seat before first one.
sum_all = (tkt[-1] * (tkt[-1] + 1) - (tkt[0] - 1) * tkt[0]) // 2
missing_ticket = sum_all - sum(tkt)
print(f'Part Two: Missing Seat Id: {missing_ticket}')
main()
|
"""Advent of Code 2020, day five."""
input_file = 'data/day_05.txt'
def main() -> None:
"""Identify missing ticket."""
with open(INPUT_FILE, encoding='utf-8') as input_file:
tkt = sorted([int(x.strip().replace('F', '0').replace('B', '1').replace('L', '0').replace('R', '1'), 2) for x in input_file])
print(f'Part One: Highest Seat Id: {tkt[-1]}')
sum_all = (tkt[-1] * (tkt[-1] + 1) - (tkt[0] - 1) * tkt[0]) // 2
missing_ticket = sum_all - sum(tkt)
print(f'Part Two: Missing Seat Id: {missing_ticket}')
main()
|
"""Python3 Code to solve problem 1253: Reconstruct a 2-Row Binary Matrix. """
class Solution(object):
def reconstructMatrix(self, upper: int, lower: int, colsum: list) -> list:
zero_col = set()
two_col = set()
col_num = len(colsum)
for col_id, col_sum in enumerate(colsum):
if col_sum == 0:
zero_col.add(col_id)
elif col_sum == 2:
two_col.add(col_id)
one_col_num = col_num - len(zero_col) - len(two_col)
one_col_upper_num = upper - len(two_col)
one_col_lower_num = lower - len(two_col)
if (one_col_upper_num < 0
or one_col_lower_num < 0
or one_col_upper_num + one_col_lower_num != one_col_num):
return []
result = [[0] * col_num for _ in range(2)]
one_added_upper_num = 0
for i in range(col_num):
if i in zero_col:
continue
elif i in two_col:
result[0][i] = 1
result[1][i] = 1
elif one_added_upper_num < one_col_upper_num:
result[0][i] = 1
one_added_upper_num += 1
else:
result[1][i] = 1
return result
|
"""Python3 Code to solve problem 1253: Reconstruct a 2-Row Binary Matrix. """
class Solution(object):
def reconstruct_matrix(self, upper: int, lower: int, colsum: list) -> list:
zero_col = set()
two_col = set()
col_num = len(colsum)
for (col_id, col_sum) in enumerate(colsum):
if col_sum == 0:
zero_col.add(col_id)
elif col_sum == 2:
two_col.add(col_id)
one_col_num = col_num - len(zero_col) - len(two_col)
one_col_upper_num = upper - len(two_col)
one_col_lower_num = lower - len(two_col)
if one_col_upper_num < 0 or one_col_lower_num < 0 or one_col_upper_num + one_col_lower_num != one_col_num:
return []
result = [[0] * col_num for _ in range(2)]
one_added_upper_num = 0
for i in range(col_num):
if i in zero_col:
continue
elif i in two_col:
result[0][i] = 1
result[1][i] = 1
elif one_added_upper_num < one_col_upper_num:
result[0][i] = 1
one_added_upper_num += 1
else:
result[1][i] = 1
return result
|
"""
You're given a substring s of some cyclic string.
What's the length of the smallest possible string that can be concatenated to itself many times to obtain this cyclic string?
Example
For s = "cabca", the output should be
cyclicString(s) = 3.
"cabca" is a substring of a cycle string "abcabcabcabc..." that can be obtained by concatenating "abc" to itself.
Thus, the answer is 3.
"""
def cyclicString(s1):
for answer in range(1, len(s1)):
correct = True
for position in range(answer, len(s1)):
if s1[position] != s1[position - answer]:
correct = False
if correct:
return answer
return len(s1)
|
"""
You're given a substring s of some cyclic string.
What's the length of the smallest possible string that can be concatenated to itself many times to obtain this cyclic string?
Example
For s = "cabca", the output should be
cyclicString(s) = 3.
"cabca" is a substring of a cycle string "abcabcabcabc..." that can be obtained by concatenating "abc" to itself.
Thus, the answer is 3.
"""
def cyclic_string(s1):
for answer in range(1, len(s1)):
correct = True
for position in range(answer, len(s1)):
if s1[position] != s1[position - answer]:
correct = False
if correct:
return answer
return len(s1)
|
# Example of mutual recursion with even/odd.
#
# Eli Bendersky [http://eli.thegreenplace.net]
# This code is in the public domain.
def is_even(n):
if n == 0:
return True
else:
return is_odd(n - 1)
def is_odd(n):
if n == 0:
return False
else:
return is_even(n - 1)
def is_even_thunked(n):
if n == 0:
return True
else:
return lambda: is_odd_thunked(n - 1)
def is_odd_thunked(n):
if n == 0:
return False
else:
return lambda: is_even_thunked(n - 1)
def trampoline(f, *args):
v = f(*args)
while callable(v):
v = v()
return v
if __name__ == '__main__':
print(is_even(800))
# If I try to run is_even(1000) with the default system recursion limit, it
# blows up. But trampolining keeps the stack depth constant and small.
print(trampoline(is_even_thunked, 1000))
|
def is_even(n):
if n == 0:
return True
else:
return is_odd(n - 1)
def is_odd(n):
if n == 0:
return False
else:
return is_even(n - 1)
def is_even_thunked(n):
if n == 0:
return True
else:
return lambda : is_odd_thunked(n - 1)
def is_odd_thunked(n):
if n == 0:
return False
else:
return lambda : is_even_thunked(n - 1)
def trampoline(f, *args):
v = f(*args)
while callable(v):
v = v()
return v
if __name__ == '__main__':
print(is_even(800))
print(trampoline(is_even_thunked, 1000))
|
X = {}
print(5 in X)
print(X[4])
print(X[5])
|
x = {}
print(5 in X)
print(X[4])
print(X[5])
|
# -*- coding: utf-8 -*-
# Author: Tonio Teran <tonio@stateoftheart.ai>
# Copyright: Stateoftheart AI PBC 2021.
'''NEAR AI's library wrapper.
Dataset information taken from:
'''
SOURCE_METADATA = {
'name': 'nearai',
'original_name': 'NEAR Program Synthesis',
'url': 'https://github.com/nearai/program_synthesis'
}
DATASETS = {'Program Synthesis': ['AlgoLisp', 'Karel', 'NAPS']}
def load_dataset(name: str) -> dict:
return {'name': name, 'source': 'nearai'}
|
"""NEAR AI's library wrapper.
Dataset information taken from:
"""
source_metadata = {'name': 'nearai', 'original_name': 'NEAR Program Synthesis', 'url': 'https://github.com/nearai/program_synthesis'}
datasets = {'Program Synthesis': ['AlgoLisp', 'Karel', 'NAPS']}
def load_dataset(name: str) -> dict:
return {'name': name, 'source': 'nearai'}
|
#!/usr/bin/python
inp = input(">> ")
print(inp)
count = 0
while True:
print("hi")
count += 1
if count == 3:
break
|
inp = input('>> ')
print(inp)
count = 0
while True:
print('hi')
count += 1
if count == 3:
break
|
class Solution:
def longestCommonPrefix(self, strs):
"""
:type strs: List[str]
:rtype: str
"""
if not strs:
return ''
zip_strs = zip(*strs)
for i, letter_group in enumerate(zip_strs):
if len(set(letter_group)) > 1:
return strs[0][:i] # return as there are letter not equal
return min(strs) # return as all letters are equal
|
class Solution:
def longest_common_prefix(self, strs):
"""
:type strs: List[str]
:rtype: str
"""
if not strs:
return ''
zip_strs = zip(*strs)
for (i, letter_group) in enumerate(zip_strs):
if len(set(letter_group)) > 1:
return strs[0][:i]
return min(strs)
|
#VERSION: 1.0
INFO = {"example":("test","This is an example mod")}
RLTS = {"cls":(),"funcs":("echo"),"vars":()}
def test(cmd):
echo(0,cmd)
|
info = {'example': ('test', 'This is an example mod')}
rlts = {'cls': (), 'funcs': 'echo', 'vars': ()}
def test(cmd):
echo(0, cmd)
|
queries = [
"""SELECT * WHERE { ?s ?p ?o }""",
"""SELECT ?point ?point_type WHERE {
?point rdf:type brick:Point .
?point rdf:type ?point_type
}""",
"SELECT ?meter WHERE { ?meter rdf:type brick:Green_Button_Meter }",
""" SELECT ?t WHERE { ?t rdf:type brick:Weather_Temperature_Sensor }""",
"""SELECT ?sensor WHERE {
?sensor rdf:type brick:Zone_Air_Temperature_Sensor .
?sensor brick:isPointOf ?equip
}""",
"""SELECT ?sp WHERE {
?sp rdf:type brick:Zone_Air_Temperature_Setpoint .
?sp brick:isPointOf ?equip
}""",
"SELECT ?meter WHERE { ?meter rdf:type brick:Building_Electric_Meter }",
"SELECT ?point WHERE { ?point rdf:type brick:Occupancy_Sensor }",
"""SELECT ?tstat ?room ?zone ?state ?temp ?hsp ?csp WHERE {
?tstat brick:hasLocation ?room .
?zone brick:hasPart ?room .
?tstat brick:hasPoint ?state .
?tstat brick:hasPoint ?temp .
?tstat brick:hasPoint ?hsp .
?tstat brick:hasPoint ?csp .
?zone rdf:type brick:Zone .
?tstat rdf:type brick:Thermostat .
?state rdf:type brick:Thermostat_Status .
?temp rdf:type brick:Temperature_Sensor .
?hsp rdf:type brick:Supply_Air_Temperature_Heating_Setpoint .
?csp rdf:type brick:Supply_Air_Temperature_Cooling_Setpoint
}
""",
"""SELECT ?sensor ?sp ?equip WHERE {
?sensor rdf:type brick:Air_Flow_Sensor .
?sp rdf:type brick:Air_Flow_Setpoint .
?sensor brick:isPointOf ?equip .
?sp brick:isPointOf ?equip
}""",
"""SELECT ?cooling_point ?heating_point ?ahu WHERE {
?cooling_point rdf:type brick:Cooling_Valve_Command .
?heating_point rdf:type brick:Heating_Valve_Command .
?ahu brick:hasPoint ?cooling_point .
?ahu brick:hasPoint ?heating_point
}""",
"""SELECT * WHERE {
?equip rdf:type brick:VAV .
?equip brick:isFedBy ?ahu .
?ahu brick:hasPoint ?upstream_ta .
?equip brick:hasPoint ?dnstream_ta .
?upstream_ta rdf:type brick:Supply_Air_Temperature_Sensor .
?dnstream_ta rdf:type brick:Supply_Air_Temperature_Sensor .
?equip brick:hasPoint ?vlv .
?vlv rdf:type brick:Valve_Command
}""",
"""SELECT * WHERE {
?equip rdf:type brick:VAV .
?equip brick:hasPoint ?air_flow .
?air_flow rdf:type brick:Supply_Air_Flow_Sensor
}""",
"""SELECT * WHERE {
?vlv rdf:type brick:Valve_Command .
?vlv rdf:type ?vlv_type .
?equip brick:hasPoint ?vlv .
?equip rdf:type brick:Air_Handling_Unit .
?air_temps rdf:type brick:Supply_Air_Temperature_Sensor .
?equip brick:hasPoint ?air_temps .
?air_temps rdf:type ?temp_type
}""",
"""SELECT * WHERE {
?vlv rdf:type brick:Valve_Command .
?vlv rdf:type ?vlv_type .
?equip brick:hasPoint ?vlv .
?equip rdf:type brick:Air_Handling_Unit .
?air_temps rdf:type brick:Return_Air_Temperature_Sensor .
?equip brick:hasPoint ?air_temps .
?air_temps rdf:type ?temp_type
}""",
"""SELECT ?vav WHERE {
?vav rdf:type brick:VAV
}""",
#"""SELECT DISTINCT ?sensor ?room
#WHERE {
#
# ?sensor rdf:type brick:Zone_Temperature_Sensor .
# ?room rdf:type brick:Room .
# ?vav rdf:type brick:VAV .
# ?zone rdf:type brick:HVAC_Zone .
#
# ?vav brick:feeds+ ?zone .
# ?zone brick:hasPart ?room .
#
# {?sensor brick:isPointOf ?vav }
# UNION
# {?sensor brick:isPointOf ?room }
#}""",
"""SELECT ?sensor ?room WHERE {
?sensor rdf:type brick:Zone_Temperature_Sensor .
?room rdf:type brick:Room .
?vav rdf:type brick:VAV .
?zone rdf:type brick:HVAC_Zone .
?vav brick:feeds+ ?zone .
?zone brick:hasPart ?room .
?vav brick:hasPoint ?sensor
}""",
# """SELECT ?vlv_cmd ?vav
# WHERE {
# { ?vlv_cmd rdf:type brick:Reheat_Valve_Command }
# UNION
# { ?vlv_cmd rdf:type brick:Cooling_Valve_Command }
# ?vav rdf:type brick:VAV .
# ?vav brick:hasPoint+ ?vlv_cmd .
# }""",
"""SELECT ?floor ?room ?zone WHERE {
?floor rdf:type brick:Floor .
?room rdf:type brick:Room .
?zone rdf:type brick:HVAC_Zone .
?room brick:isPartOf+ ?floor .
?room brick:isPartOf+ ?zone
}""",
]
|
queries = ['SELECT * WHERE { ?s ?p ?o }', 'SELECT ?point ?point_type WHERE {\n ?point rdf:type brick:Point .\n ?point rdf:type ?point_type \n}', 'SELECT ?meter WHERE { ?meter rdf:type brick:Green_Button_Meter }', ' SELECT ?t WHERE { ?t rdf:type brick:Weather_Temperature_Sensor }', 'SELECT ?sensor WHERE {\n ?sensor rdf:type brick:Zone_Air_Temperature_Sensor .\n ?sensor brick:isPointOf ?equip \n}', 'SELECT ?sp WHERE {\n ?sp rdf:type brick:Zone_Air_Temperature_Setpoint .\n ?sp brick:isPointOf ?equip\n}', 'SELECT ?meter WHERE { ?meter rdf:type brick:Building_Electric_Meter }', 'SELECT ?point WHERE { ?point rdf:type brick:Occupancy_Sensor }', 'SELECT ?tstat ?room ?zone ?state ?temp ?hsp ?csp WHERE {\n ?tstat brick:hasLocation ?room .\n ?zone brick:hasPart ?room .\n ?tstat brick:hasPoint ?state .\n ?tstat brick:hasPoint ?temp .\n ?tstat brick:hasPoint ?hsp .\n ?tstat brick:hasPoint ?csp .\n ?zone rdf:type brick:Zone .\n ?tstat rdf:type brick:Thermostat .\n ?state rdf:type brick:Thermostat_Status .\n ?temp rdf:type brick:Temperature_Sensor .\n ?hsp rdf:type brick:Supply_Air_Temperature_Heating_Setpoint .\n ?csp rdf:type brick:Supply_Air_Temperature_Cooling_Setpoint\n }\n ', 'SELECT ?sensor ?sp ?equip WHERE {\n ?sensor rdf:type brick:Air_Flow_Sensor .\n ?sp rdf:type brick:Air_Flow_Setpoint .\n ?sensor brick:isPointOf ?equip .\n ?sp brick:isPointOf ?equip\n}', 'SELECT ?cooling_point ?heating_point ?ahu WHERE {\n ?cooling_point rdf:type brick:Cooling_Valve_Command .\n ?heating_point rdf:type brick:Heating_Valve_Command .\n ?ahu brick:hasPoint ?cooling_point .\n ?ahu brick:hasPoint ?heating_point\n }', 'SELECT * WHERE {\n ?equip rdf:type brick:VAV .\n ?equip brick:isFedBy ?ahu .\n ?ahu brick:hasPoint ?upstream_ta .\n ?equip brick:hasPoint ?dnstream_ta .\n ?upstream_ta rdf:type brick:Supply_Air_Temperature_Sensor .\n ?dnstream_ta rdf:type brick:Supply_Air_Temperature_Sensor .\n ?equip brick:hasPoint ?vlv .\n ?vlv rdf:type brick:Valve_Command\n}', 'SELECT * WHERE {\n ?equip rdf:type brick:VAV .\n ?equip brick:hasPoint ?air_flow .\n ?air_flow rdf:type brick:Supply_Air_Flow_Sensor\n }', 'SELECT * WHERE {\n ?vlv rdf:type brick:Valve_Command .\n ?vlv rdf:type ?vlv_type .\n ?equip brick:hasPoint ?vlv .\n ?equip rdf:type brick:Air_Handling_Unit .\n ?air_temps rdf:type brick:Supply_Air_Temperature_Sensor .\n ?equip brick:hasPoint ?air_temps .\n ?air_temps rdf:type ?temp_type\n }', 'SELECT * WHERE {\n ?vlv rdf:type brick:Valve_Command .\n ?vlv rdf:type ?vlv_type .\n ?equip brick:hasPoint ?vlv .\n ?equip rdf:type brick:Air_Handling_Unit .\n ?air_temps rdf:type brick:Return_Air_Temperature_Sensor .\n ?equip brick:hasPoint ?air_temps .\n ?air_temps rdf:type ?temp_type\n }', 'SELECT ?vav WHERE {\n ?vav rdf:type brick:VAV\n}', 'SELECT ?sensor ?room WHERE {\n ?sensor rdf:type brick:Zone_Temperature_Sensor .\n ?room rdf:type brick:Room .\n ?vav rdf:type brick:VAV .\n ?zone rdf:type brick:HVAC_Zone .\n ?vav brick:feeds+ ?zone .\n ?zone brick:hasPart ?room .\n ?vav brick:hasPoint ?sensor\n}', 'SELECT ?floor ?room ?zone WHERE {\n ?floor rdf:type brick:Floor .\n ?room rdf:type brick:Room .\n ?zone rdf:type brick:HVAC_Zone .\n\n ?room brick:isPartOf+ ?floor .\n ?room brick:isPartOf+ ?zone\n}']
|
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def getIntersectionNode(self, headA, headB):
"""
:type head1, head1: ListNode
:rtype: ListNode
"""
if headA == headB:
return headA
headA_start = headA
headB_start = headB
len_A = 0
len_B = 0
while headA:
headA = headA.next
len_A += 1
while headB:
headB = headB.next
len_B += 1
headA = headA_start
headB = headB_start
if len_A > len_B:
diff = len_A - len_B
for i in range(diff):
headA = headA.next
else:
diff = len_B - len_A
for i in range(diff):
headB = headB.next
if headA == headB:
return headA
while headA != headB:
if headA.next and headB.next and headA.next == headB.next:
return headA.next
elif headA == headB:
return headA
headA = headA.next
headB = headB.next
return None
|
class Solution(object):
def get_intersection_node(self, headA, headB):
"""
:type head1, head1: ListNode
:rtype: ListNode
"""
if headA == headB:
return headA
head_a_start = headA
head_b_start = headB
len_a = 0
len_b = 0
while headA:
head_a = headA.next
len_a += 1
while headB:
head_b = headB.next
len_b += 1
head_a = headA_start
head_b = headB_start
if len_A > len_B:
diff = len_A - len_B
for i in range(diff):
head_a = headA.next
else:
diff = len_B - len_A
for i in range(diff):
head_b = headB.next
if headA == headB:
return headA
while headA != headB:
if headA.next and headB.next and (headA.next == headB.next):
return headA.next
elif headA == headB:
return headA
head_a = headA.next
head_b = headB.next
return None
|
#so sai quando escrever sair
nome = str(input("Escreva nome :(sair para terminar)"))
while nome != "sair":
nome = str(input("Escreva nome: (sair para terminar)"))
|
nome = str(input('Escreva nome :(sair para terminar)'))
while nome != 'sair':
nome = str(input('Escreva nome: (sair para terminar)'))
|
def heap_sort(l: list):
# flag is init
def sort(num: int, node: int, flag: bool):
# print(str(node) +' '+str(num))
if num == len(l) - 1:
return
if node < 0:
l[0], l[-(num) - 1] = l[-(num) - 1], l[0] #swap topest
num += 1
node = 0
if node * 2 + 1 < len(l) - num:
if l[node] < l[node * 2 + 1]:
l[node], l[node * 2 + 1] = l[node * 2 + 1], l[node]
sort(num, node * 2 + 1, False)
if node * 2 + 2 < len(l) - num:
if l[node] < l[node * 2 + 2]:
l[node], l[node * 2 + 2] = l[node * 2 + 2], l[node]
sort(num, node * 2 + 2, False)
if flag:
sort(num, node - 1, True)
sort(0, int(len(l) / 2) - 1, True) #last non-leaf
if __name__ == "__main__":
l = [6, 1, 17, 4, 20, 15, 33, 10, 194, 54, 99, 1004, 5, 477]
heap_sort(l)
print(l)
|
def heap_sort(l: list):
def sort(num: int, node: int, flag: bool):
if num == len(l) - 1:
return
if node < 0:
(l[0], l[-num - 1]) = (l[-num - 1], l[0])
num += 1
node = 0
if node * 2 + 1 < len(l) - num:
if l[node] < l[node * 2 + 1]:
(l[node], l[node * 2 + 1]) = (l[node * 2 + 1], l[node])
sort(num, node * 2 + 1, False)
if node * 2 + 2 < len(l) - num:
if l[node] < l[node * 2 + 2]:
(l[node], l[node * 2 + 2]) = (l[node * 2 + 2], l[node])
sort(num, node * 2 + 2, False)
if flag:
sort(num, node - 1, True)
sort(0, int(len(l) / 2) - 1, True)
if __name__ == '__main__':
l = [6, 1, 17, 4, 20, 15, 33, 10, 194, 54, 99, 1004, 5, 477]
heap_sort(l)
print(l)
|
# directories where to look for duplicates
dir_list = [
r"C:\Users\Gamer\Documents\Projekte\azzap\azzap-docker-dev\data\html\pub\media\catalog\product",
r"C:\Users\Gamer\Documents\Projekte\azzap\azzap-docker-dev\data\html\pub\media\import\multishopifystoremageconnect",
r"C:\Users\Gamer\Documents\Projekte\azzap\azzap-docker-dev\data\html\pub\media\import\mpmultishopifystoremageconnect",
]
|
dir_list = ['C:\\Users\\Gamer\\Documents\\Projekte\\azzap\\azzap-docker-dev\\data\\html\\pub\\media\\catalog\\product', 'C:\\Users\\Gamer\\Documents\\Projekte\\azzap\\azzap-docker-dev\\data\\html\\pub\\media\\import\\multishopifystoremageconnect', 'C:\\Users\\Gamer\\Documents\\Projekte\\azzap\\azzap-docker-dev\\data\\html\\pub\\media\\import\\mpmultishopifystoremageconnect']
|
""" User assistance/help texts """
AGGREGATE_DATA = """
Here you can see an overview and aggregated data over all batches. Please note that only \
the data from instances that are part of experimental batches is considered here, not data from instances that \
have been started in between batches.
"""
CUSTOMER_CATEGORIES_INPUT = "e.g. public-gov"
DEFAULT_VERSION_INPUT = "The version that is used in between batches"
MIN_DURATION_INPUT = """
Please enter the minimum and maximum duration of the past process data (old version) here. This data \
is used to calculate the reward of new instances more reliably.
"""
DETAILED_DATA = """
Here you can see the details of each and every instance that has been part of an experimental batch.
"""
BATCH_NUMBER_CHOICE = "Number 1 means the first batch set for a process, number 2 means the second, and so on"
BATCH_SIZE_HELP = """
Here you can choose how many of the next incoming process instantiation requests will be part of this experimental \
batch.
"""
HISTORY_UPLOAD_DEFAULT = """
Should be a .json file with this content format:
{
"interarrivalTime": 0.98,
"durations": [
0.198,
0.041,
0.124,
0.04,
0.099,
0.144
]
}
"""
CONTROLS_HELP = "This is where the human process expert can control the experiment."
DEV_MODE_HELP = """
If you are just using the app for development purposes or to try it out \
you can simulate process instantiation requests instead of having real requests from customers/clients. \
An additional client simulator area will pop up in the dashboard if you activate dev mode.
"""
EXPERIMENT_METADATA = "Here, you can find useful metadata about the running experiment."
COOL_OFF_DETAILED = """
*Q: What is Cool-Off/the Cool-Off period?* \n
A: When you want to conclude the experiment, there might still be some long running, \
unevaluated process instances. Since it is important to take them into account for your \
final decision, we have implemented the cool off period. This makes sure, that all the \
instances that were part of any experimental batch are finished and have been evaluated and taken \
into account before we present the final proposal and you make the final decision.
"""
EXPERIMENTAL_INSTANCE = """
'Experimental instance' refers to an instance that has been started as part of a batch. It is called like that,
since only the instances that have been started as part of a batch are evaluated and part of the experiment.
"""
MANUAL_TRIGGER_FETCH_LEARN = """
Trigger polling of process engine
for instance data and learning with that data
(this will also happen automatically periodically, at about every n-th incoming instantiation request - with n being
half of the average batch size)
"""
SUBMIT_CHOICE_BUTTON = """
After submitting the choice/final decision, all incoming instantiation requests for the process will be
routed in accordance with this decision
"""
|
""" User assistance/help texts """
aggregate_data = '\nHere you can see an overview and aggregated data over all batches. Please note that only the data from instances that are part of experimental batches is considered here, not data from instances that have been started in between batches.\n'
customer_categories_input = 'e.g. public-gov'
default_version_input = 'The version that is used in between batches'
min_duration_input = '\nPlease enter the minimum and maximum duration of the past process data (old version) here. This data is used to calculate the reward of new instances more reliably.\n'
detailed_data = '\nHere you can see the details of each and every instance that has been part of an experimental batch.\n'
batch_number_choice = 'Number 1 means the first batch set for a process, number 2 means the second, and so on'
batch_size_help = '\nHere you can choose how many of the next incoming process instantiation requests will be part of this experimental batch.\n'
history_upload_default = '\nShould be a .json file with this content format:\n{\n "interarrivalTime": 0.98,\n "durations": [\n 0.198,\n 0.041,\n 0.124,\n 0.04,\n 0.099,\n 0.144\n ]\n}\n'
controls_help = 'This is where the human process expert can control the experiment.'
dev_mode_help = '\nIf you are just using the app for development purposes or to try it out you can simulate process instantiation requests instead of having real requests from customers/clients. An additional client simulator area will pop up in the dashboard if you activate dev mode.\n'
experiment_metadata = 'Here, you can find useful metadata about the running experiment.'
cool_off_detailed = '\n*Q: What is Cool-Off/the Cool-Off period?* \n\nA: When you want to conclude the experiment, there might still be some long running, unevaluated process instances. Since it is important to take them into account for your final decision, we have implemented the cool off period. This makes sure, that all the instances that were part of any experimental batch are finished and have been evaluated and taken into account before we present the final proposal and you make the final decision.\n'
experimental_instance = "\n'Experimental instance' refers to an instance that has been started as part of a batch. It is called like that, \nsince only the instances that have been started as part of a batch are evaluated and part of the experiment.\n"
manual_trigger_fetch_learn = '\nTrigger polling of process engine\n for instance data and learning with that data\n (this will also happen automatically periodically, at about every n-th incoming instantiation request - with n being\n half of the average batch size)\n'
submit_choice_button = '\nAfter submitting the choice/final decision, all incoming instantiation requests for the process will be \nrouted in accordance with this decision\n'
|
print("NFC West W L T")
print("-----------------------")
print("Seattle 13 3 0")
print("San Francisco 12 4 0")
print("Arizona 10 6 0")
print("St. Louis 7 9 0\n")
print("NFC North W L T")
print("-----------------------")
print("Green Bay 8 7 1")
print("Chicago 8 8 0")
print("Detroit 7 9 0")
print("Minnesota 5 10 1")
|
print('NFC West W L T')
print('-----------------------')
print('Seattle 13 3 0')
print('San Francisco 12 4 0')
print('Arizona 10 6 0')
print('St. Louis 7 9 0\n')
print('NFC North W L T')
print('-----------------------')
print('Green Bay 8 7 1')
print('Chicago 8 8 0')
print('Detroit 7 9 0')
print('Minnesota 5 10 1')
|
pallet_color = [
(231, 234, 238), # 010 White
(252, 166, 0), # 020Golden yellow
(232, 167, 0), # 019 Signal yellow
(254, 198, 0), # 021 Yellow
(242, 203, 0), # 022 Light yellow
(241, 225, 14), # 025 Brimstone yellow
(116, 2, 16), # 312 Burgundy
(145, 8, 20), # 030 Dark red
(175, 0, 11), # 031 Red
(199, 12, 0), # 032 Light red
(211, 48, 0), # 047 Orange red
(221, 68, 0), # 034 Orange
(236, 102, 0), # 036 Light orange
(255, 109, 0), # 035 Pastel orange
(65, 40, 114), # 404 Purple
(93, 43, 104), # 040 Violet
(120, 95, 162), # 043 Lavender
(186, 148, 188), # 042 Lilac
(195, 40, 106), # 041 Pink
(239, 135, 184), # 045 Soft pink
(19, 29, 57), # 562 Deep sea blue
(15, 17, 58), # 518 Steel blue
(28, 47, 94), # 050 Dark blue
(13, 31, 106), # 065 Cobalt blue
(23, 43, 121), # 049 King blue
(27, 47, 170), # 086 Brilliant blue
(0, 58, 120), # 057 Blue
(0, 65, 142), # 057 Traffic blue
(0, 69, 131), # 051 Gentian blue
(0, 79, 159), # 098 Gentian
(0, 94, 173), # 052 Azure blue
(0, 116, 187), # 084 Sky blue
(0, 136, 195), # 053 Light blue
(67, 162, 211), # 056 Ice blue
(0, 131, 142), # 066 Turquoise blue
(0, 155, 151), # 054 Turquoise
(95, 206, 183), # 055 Mint
(0, 60, 36), # 060 Dark green
(0, 82, 54), # 613 Forest green
(0, 122, 77), # 061 Green
(0, 120, 63), # 068 Grass green
(0, 137, 58), # 062 Light green
(35, 155, 17), # 064 Yellow green
(106, 167, 47), # 063 Lime-tree green
(85, 51, 28), # 080 Brown
(175, 89, 30), # 083 Nut brown
(168, 135, 90), # 081 Light brown
(205, 192, 158), # 082 Beige
(231, 210, 147), # 023 Cream
(6, 6, 7), # 070 Black
(75, 76, 76), # 073 Dark grey
(117, 125, 124), # 071 Grey
(128, 133, 136), # 076 Telegrey
(138, 143, 140), # 074 Middle grey
(192, 195, 195), # 072 Light grey
(111, 114, 116), # 090 Silver grey
(121, 101, 50), # 091 Gold
(105, 64, 30), # 092 Copper
]
pallet_color_name = [
"010 White",
"020Golden yellow",
"019 Signal yellow",
"021 Yellow",
"022 Light yellow",
"025 Brimstone yellow",
"312 Burgundy",
"030 Dark red",
"031 Red",
"032 Light red",
"047 Orange red",
"034 Orange",
"036 Light orange",
"035 Pastel orange",
"404 Purple",
"040 Violet",
"043 Lavender",
"042 Lilac",
"041 Pink",
"045 Soft pink",
"562 Deep sea blue",
"518 Steel blue",
"050 Dark blue",
"065 Cobalt blue",
"049 King blue",
"086 Brilliant blue",
"057 Blue",
"057 Traffic blue",
"051 Gentian blue",
"098 Gentian",
"052 Azure blue",
"084 Sky blue",
"053 Light blue",
"056 Ice blue",
"066 Turquoise blue",
"054 Turquoise",
"055 Mint",
"060 Dark green",
"613 Forest green",
"061 Green",
"068 Grass green",
"062 Light green",
"064 Yellow green",
"063 Lime-tree green",
"080 Brown",
"083 Nut brown",
"081 Light brown",
"082 Beige",
"023 Cream",
"070 Black",
"073 Dark grey",
"071 Grey",
"076 Telegrey",
"074 Middle grey",
"072 Light grey",
"090 Silver grey",
"091 Gold",
"092 Copper",
]
|
pallet_color = [(231, 234, 238), (252, 166, 0), (232, 167, 0), (254, 198, 0), (242, 203, 0), (241, 225, 14), (116, 2, 16), (145, 8, 20), (175, 0, 11), (199, 12, 0), (211, 48, 0), (221, 68, 0), (236, 102, 0), (255, 109, 0), (65, 40, 114), (93, 43, 104), (120, 95, 162), (186, 148, 188), (195, 40, 106), (239, 135, 184), (19, 29, 57), (15, 17, 58), (28, 47, 94), (13, 31, 106), (23, 43, 121), (27, 47, 170), (0, 58, 120), (0, 65, 142), (0, 69, 131), (0, 79, 159), (0, 94, 173), (0, 116, 187), (0, 136, 195), (67, 162, 211), (0, 131, 142), (0, 155, 151), (95, 206, 183), (0, 60, 36), (0, 82, 54), (0, 122, 77), (0, 120, 63), (0, 137, 58), (35, 155, 17), (106, 167, 47), (85, 51, 28), (175, 89, 30), (168, 135, 90), (205, 192, 158), (231, 210, 147), (6, 6, 7), (75, 76, 76), (117, 125, 124), (128, 133, 136), (138, 143, 140), (192, 195, 195), (111, 114, 116), (121, 101, 50), (105, 64, 30)]
pallet_color_name = ['010 White', '020Golden yellow', '019 Signal yellow', '021 Yellow', '022 Light yellow', '025 Brimstone yellow', '312 Burgundy', '030 Dark red', '031 Red', '032 Light red', '047 Orange red', '034 Orange', '036 Light orange', '035 Pastel orange', '404 Purple', '040 Violet', '043 Lavender', '042 Lilac', '041 Pink', '045 Soft pink', '562 Deep sea blue', '518 Steel blue', '050 Dark blue', '065 Cobalt blue', '049 King blue', '086 Brilliant blue', '057 Blue', '057 Traffic blue', '051 Gentian blue', '098 Gentian', '052 Azure blue', '084 Sky blue', '053 Light blue', '056 Ice blue', '066 Turquoise blue', '054 Turquoise', '055 Mint', '060 Dark green', '613 Forest green', '061 Green', '068 Grass green', '062 Light green', '064 Yellow green', '063 Lime-tree green', '080 Brown', '083 Nut brown', '081 Light brown', '082 Beige', '023 Cream', '070 Black', '073 Dark grey', '071 Grey', '076 Telegrey', '074 Middle grey', '072 Light grey', '090 Silver grey', '091 Gold', '092 Copper']
|
# All metrics are treated as gauge as the counter instrument don't provide a set
# method and for performance, it's always good to avoid calculation when ever it's possible.
# Using the inc() method require the diff to be calculated!
metrics = [
## Custom metrics
("up", "the status of the node (1=running, 0=down, -1=connection issue, -2=node error)"),
("peers_count", "how many peers the node is seeing"),
("sigs_count", "how many nodes are currently validating"),
("header_nextValidators_count", "how many nodes are in the validators set for the next epoch"),
("header_nextValidators_is_included", "if the node is included in the next validators set"),
("header_nextValidators_stake_min", "the smallest amount of total stake in the validators set for the next epoch"),
("header_nextValidators_stake_max", "the biggest amount of total stake in the validators set for the next epoch"),
## /node/validator
# ("validator_address", "validator_address"),
("validator_stakes_count", "the number of delegators currently staking on the node"),
# ("validator_name", "validator_name"),
("validator_registered", "if the node is registered as validaator node"),
("validator_totalStake", "the total amount staked on the node"),
# ("validator_url", "validator_url"),
## /system/peers
#peers
## /system/epochproof
# ("opaque", "opaque"),
# ("sigs", "sigs"),
# ("header_view", "header_view"),
# ("header_nextValidators", "header_nextValidators"),
# ("header_epoch", "header_epoch"),
# ("header_accumulator", "header_accumulator"),
# ("header_version", "header_version"),
# ("header_timestamp", "header_timestamp"),
## /system/info
# ("transports_0_metadata_port", "transports_0_metadata_port"),
# ("transports_0_metadata_host", "transports_0_metadata_host"),
# ("transports_0_sz", "transports_0_sz"),
# ("transports_0_name", "transports_0_name"),
# ("agent_protocol", "agent_protocol"),
# ("agent_name", "agent_name"),
# ("agent_version", "agent_version"),
# ("hid", "hid"),
# ("sz", "sz"),
# ("nid", "nid"),
# ("key", "key"),
# ("info_counters_ledger_state_version", "info_counters_ledger_state_version"),
("info_counters_ledger_bft_commands_processed", "info_counters_ledger_bft_commands_processed"),
("info_counters_ledger_sync_commands_processed", "info_counters_ledger_sync_commands_processed"),
("info_counters_pacemaker_view", "info_counters_pacemaker_view"),
("info_counters_mempool_maxcount", "info_counters_mempool_maxcount"),
("info_counters_mempool_relayer_sent_count", "info_counters_mempool_relayer_sent_count"),
("info_counters_mempool_count", "info_counters_mempool_count"),
("info_counters_mempool_add_success", "info_counters_mempool_add_success"),
("info_counters_mempool_errors_other", "info_counters_mempool_errors_other"),
("info_counters_mempool_errors_hook", "info_counters_mempool_errors_hook"),
("info_counters_mempool_errors_conflict", "info_counters_mempool_errors_conflict"),
("info_counters_mempool_proposed_transaction", "info_counters_mempool_proposed_transaction"),
("info_counters_radix_engine_invalid_proposed_commands", "info_counters_radix_engine_invalid_proposed_commands"),
("info_counters_radix_engine_system_transactions", "info_counters_radix_engine_system_transactions"),
("info_counters_radix_engine_user_transactions", "info_counters_radix_engine_user_transactions"),
("info_counters_count_bdb_ledger_contains_tx", "info_counters_count_bdb_ledger_contains_tx"),
("info_counters_count_bdb_ledger_deletes", "info_counters_count_bdb_ledger_deletes"),
("info_counters_count_bdb_ledger_commit", "info_counters_count_bdb_ledger_commit"),
("info_counters_count_bdb_ledger_save", "info_counters_count_bdb_ledger_save"),
("info_counters_count_bdb_ledger_last_vertex", "info_counters_count_bdb_ledger_last_vertex"),
("info_counters_count_bdb_ledger_store", "info_counters_count_bdb_ledger_store"),
("info_counters_count_bdb_ledger_create_tx", "info_counters_count_bdb_ledger_create_tx"),
("info_counters_count_bdb_ledger_contains", "info_counters_count_bdb_ledger_contains"),
("info_counters_count_bdb_ledger_entries", "info_counters_count_bdb_ledger_entries"),
("info_counters_count_bdb_ledger_get_last", "info_counters_count_bdb_ledger_get_last"),
("info_counters_count_bdb_ledger_get_next", "info_counters_count_bdb_ledger_get_next"),
("info_counters_count_bdb_ledger_search", "info_counters_count_bdb_ledger_search"),
("info_counters_count_bdb_ledger_total", "info_counters_count_bdb_ledger_total"),
("info_counters_count_bdb_ledger_get_prev", "info_counters_count_bdb_ledger_get_prev"),
("info_counters_count_bdb_ledger_bytes_read", "info_counters_count_bdb_ledger_bytes_read"),
("info_counters_count_bdb_ledger_bytes_write", "info_counters_count_bdb_ledger_bytes_write"),
("info_counters_count_bdb_ledger_proofs_removed", "info_counters_count_bdb_ledger_proofs_removed"),
("info_counters_count_bdb_ledger_proofs_added", "info_counters_count_bdb_ledger_proofs_added"),
("info_counters_count_bdb_ledger_get", "info_counters_count_bdb_ledger_get"),
("info_counters_count_bdb_ledger_last_committed", "info_counters_count_bdb_ledger_last_committed"),
("info_counters_count_bdb_ledger_get_first", "info_counters_count_bdb_ledger_get_first"),
("info_counters_count_bdb_header_bytes_write", "info_counters_count_bdb_header_bytes_write"),
("info_counters_count_bdb_address_book_deletes", "info_counters_count_bdb_address_book_deletes"),
("info_counters_count_bdb_address_book_total", "info_counters_count_bdb_address_book_total"),
("info_counters_count_bdb_address_book_bytes_read", "info_counters_count_bdb_address_book_bytes_read"),
("info_counters_count_bdb_address_book_bytes_write", "info_counters_count_bdb_address_book_bytes_write"),
("info_counters_count_bdb_safety_state_total", "info_counters_count_bdb_safety_state_total"),
("info_counters_count_bdb_safety_state_bytes_read", "info_counters_count_bdb_safety_state_bytes_read"),
("info_counters_count_bdb_safety_state_bytes_write", "info_counters_count_bdb_safety_state_bytes_write"),
("info_counters_count_apidb_balance_total", "info_counters_count_apidb_balance_total"),
("info_counters_count_apidb_balance_read", "info_counters_count_apidb_balance_read"),
("info_counters_count_apidb_balance_bytes_read", "info_counters_count_apidb_balance_bytes_read"),
("info_counters_count_apidb_balance_bytes_write", "info_counters_count_apidb_balance_bytes_write"),
("info_counters_count_apidb_balance_write", "info_counters_count_apidb_balance_write"),
("info_counters_count_apidb_flush_count", "info_counters_count_apidb_flush_count"),
("info_counters_count_apidb_queue_size", "info_counters_count_apidb_queue_size"),
("info_counters_count_apidb_transaction_total", "info_counters_count_apidb_transaction_total"),
("info_counters_count_apidb_transaction_read", "info_counters_count_apidb_transaction_read"),
("info_counters_count_apidb_transaction_bytes_read", "info_counters_count_apidb_transaction_bytes_read"),
("info_counters_count_apidb_transaction_bytes_write", "info_counters_count_apidb_transaction_bytes_write"),
("info_counters_count_apidb_transaction_write", "info_counters_count_apidb_transaction_write"),
("info_counters_count_apidb_token_total", "info_counters_count_apidb_token_total"),
("info_counters_count_apidb_token_read", "info_counters_count_apidb_token_read"),
("info_counters_count_apidb_token_bytes_read", "info_counters_count_apidb_token_bytes_read"),
("info_counters_count_apidb_token_bytes_write", "info_counters_count_apidb_token_bytes_write"),
("info_counters_count_apidb_token_write", "info_counters_count_apidb_token_write"),
("info_counters_epoch_manager_queued_consensus_events", "info_counters_epoch_manager_queued_consensus_events"),
("info_counters_hashed_bytes", "info_counters_hashed_bytes"),
("info_counters_networking_received_bytes", "info_counters_networking_received_bytes"),
("info_counters_networking_tcp_out_opened", "info_counters_networking_tcp_out_opened"),
("info_counters_networking_tcp_dropped_messages", "info_counters_networking_tcp_dropped_messages"),
("info_counters_networking_tcp_in_opened", "info_counters_networking_tcp_in_opened"),
("info_counters_networking_tcp_closed", "info_counters_networking_tcp_closed"),
("info_counters_networking_udp_dropped_messages", "info_counters_networking_udp_dropped_messages"),
("info_counters_networking_sent_bytes", "info_counters_networking_sent_bytes"),
("info_counters_sync_processed", "info_counters_sync_processed"),
("info_counters_sync_target_state_version", "info_counters_sync_target_state_version"),
("info_counters_sync_remote_requests_processed", "info_counters_sync_remote_requests_processed"),
("info_counters_sync_invalid_commands_received", "info_counters_sync_invalid_commands_received"),
("info_counters_sync_last_read_millis", "info_counters_sync_last_read_millis"),
("info_counters_sync_target_current_diff", "info_counters_sync_target_current_diff"),
("info_counters_signatures_verified", "info_counters_signatures_verified"),
("info_counters_signatures_signed", "info_counters_signatures_signed"),
("info_counters_elapsed_bdb_ledger_contains_tx", "info_counters_elapsed_bdb_ledger_contains_tx"),
("info_counters_elapsed_bdb_ledger_commit", "info_counters_elapsed_bdb_ledger_commit"),
("info_counters_elapsed_bdb_ledger_save", "info_counters_elapsed_bdb_ledger_save"),
("info_counters_elapsed_bdb_ledger_last_vertex", "info_counters_elapsed_bdb_ledger_last_vertex"),
("info_counters_elapsed_bdb_ledger_store", "info_counters_elapsed_bdb_ledger_store"),
("info_counters_elapsed_bdb_ledger_create_tx", "info_counters_elapsed_bdb_ledger_create_tx"),
("info_counters_elapsed_bdb_ledger_contains", "info_counters_elapsed_bdb_ledger_contains"),
("info_counters_elapsed_bdb_ledger_entries", "info_counters_elapsed_bdb_ledger_entries"),
("info_counters_elapsed_bdb_ledger_get_last", "info_counters_elapsed_bdb_ledger_get_last"),
("info_counters_elapsed_bdb_ledger_search", "info_counters_elapsed_bdb_ledger_search"),
("info_counters_elapsed_bdb_ledger_total", "info_counters_elapsed_bdb_ledger_total"),
("info_counters_elapsed_bdb_ledger_get", "info_counters_elapsed_bdb_ledger_get"),
("info_counters_elapsed_bdb_ledger_last_committed", "info_counters_elapsed_bdb_ledger_last_committed"),
("info_counters_elapsed_bdb_ledger_get_first", "info_counters_elapsed_bdb_ledger_get_first"),
("info_counters_elapsed_bdb_address_book", "info_counters_elapsed_bdb_address_book"),
("info_counters_elapsed_bdb_safety_state", "info_counters_elapsed_bdb_safety_state"),
("info_counters_elapsed_apidb_balance_read", "info_counters_elapsed_apidb_balance_read"),
("info_counters_elapsed_apidb_balance_write", "info_counters_elapsed_apidb_balance_write"),
("info_counters_elapsed_apidb_flush_time", "info_counters_elapsed_apidb_flush_time"),
("info_counters_elapsed_apidb_transaction_read", "info_counters_elapsed_apidb_transaction_read"),
("info_counters_elapsed_apidb_transaction_write", "info_counters_elapsed_apidb_transaction_write"),
("info_counters_elapsed_apidb_token_read", "info_counters_elapsed_apidb_token_read"),
("info_counters_elapsed_apidb_token_write", "info_counters_elapsed_apidb_token_write"),
("info_counters_bft_state_version", "info_counters_bft_state_version"),
("info_counters_bft_vote_quorums", "info_counters_bft_vote_quorums"),
("info_counters_bft_rejected", "info_counters_bft_rejected"),
("info_counters_bft_vertex_store_rebuilds", "info_counters_bft_vertex_store_rebuilds"),
("info_counters_bft_vertex_store_forks", "info_counters_bft_vertex_store_forks"),
("info_counters_bft_sync_request_timeouts", "info_counters_bft_sync_request_timeouts"),
("info_counters_bft_sync_requests_sent", "info_counters_bft_sync_requests_sent"),
("info_counters_bft_timeout", "info_counters_bft_timeout"),
("info_counters_bft_vertex_store_size", "info_counters_bft_vertex_store_size"),
("info_counters_bft_processed", "info_counters_bft_processed"),
("info_counters_bft_consensus_events", "info_counters_bft_consensus_events"),
("info_counters_bft_indirect_parent", "info_counters_bft_indirect_parent"),
("info_counters_bft_proposals_made", "info_counters_bft_proposals_made"),
("info_counters_bft_timed_out_views", "info_counters_bft_timed_out_views"),
("info_counters_bft_timeout_quorums", "info_counters_bft_timeout_quorums"),
("info_counters_startup_time_ms", "info_counters_startup_time_ms"),
("info_counters_messages_inbound_processed", "info_counters_messages_inbound_processed"),
("info_counters_messages_inbound_discarded", "info_counters_messages_inbound_discarded"),
("info_counters_messages_inbound_badsignature", "info_counters_messages_inbound_badsignature"),
("info_counters_messages_inbound_received", "info_counters_messages_inbound_received"),
("info_counters_messages_outbound_processed", "info_counters_messages_outbound_processed"),
("info_counters_messages_outbound_aborted", "info_counters_messages_outbound_aborted"),
("info_counters_messages_outbound_pending", "info_counters_messages_outbound_pending"),
("info_counters_messages_outbound_sent", "info_counters_messages_outbound_sent"),
("info_counters_persistence_safety_store_saves", "info_counters_persistence_safety_store_saves"),
("info_counters_persistence_vertex_store_saves", "info_counters_persistence_vertex_store_saves"),
("info_counters_persistence_atom_log_write_bytes", "info_counters_persistence_atom_log_write_bytes"),
("info_counters_persistence_atom_log_write_compressed", "info_counters_persistence_atom_log_write_compressed"),
("info_counters_time_duration", "info_counters_time_duration"),
# ("info_counters_time_since", "info_counters_time_since"),
("info_configuration_pacemakerRate", "info_configuration_pacemakerRate"),
("info_configuration_pacemakerTimeout", "info_configuration_pacemakerTimeout"),
("info_configuration_pacemakerMaxExponent", "info_configuration_pacemakerMaxExponent"),
("info_system_version_system_version_agent_version", "info_system_version_system_version_agent_version"),
("info_system_version_system_version_protocol_version", "info_system_version_system_version_protocol_version"),
# ("info_system_version_system_version_display", "info_system_version_system_version_display"),
# ("info_system_version_system_version_commit", "info_system_version_system_version_commit"),
# ("info_system_version_system_version_branch", "info_system_version_system_version_branch"),
("info_epochManager_currentView_view", "info_epochManager_currentView_view"),
("info_epochManager_currentView_epoch", "info_epochManager_currentView_epoch"),
]
|
metrics = [('up', 'the status of the node (1=running, 0=down, -1=connection issue, -2=node error)'), ('peers_count', 'how many peers the node is seeing'), ('sigs_count', 'how many nodes are currently validating'), ('header_nextValidators_count', 'how many nodes are in the validators set for the next epoch'), ('header_nextValidators_is_included', 'if the node is included in the next validators set'), ('header_nextValidators_stake_min', 'the smallest amount of total stake in the validators set for the next epoch'), ('header_nextValidators_stake_max', 'the biggest amount of total stake in the validators set for the next epoch'), ('validator_stakes_count', 'the number of delegators currently staking on the node'), ('validator_registered', 'if the node is registered as validaator node'), ('validator_totalStake', 'the total amount staked on the node'), ('info_counters_ledger_bft_commands_processed', 'info_counters_ledger_bft_commands_processed'), ('info_counters_ledger_sync_commands_processed', 'info_counters_ledger_sync_commands_processed'), ('info_counters_pacemaker_view', 'info_counters_pacemaker_view'), ('info_counters_mempool_maxcount', 'info_counters_mempool_maxcount'), ('info_counters_mempool_relayer_sent_count', 'info_counters_mempool_relayer_sent_count'), ('info_counters_mempool_count', 'info_counters_mempool_count'), ('info_counters_mempool_add_success', 'info_counters_mempool_add_success'), ('info_counters_mempool_errors_other', 'info_counters_mempool_errors_other'), ('info_counters_mempool_errors_hook', 'info_counters_mempool_errors_hook'), ('info_counters_mempool_errors_conflict', 'info_counters_mempool_errors_conflict'), ('info_counters_mempool_proposed_transaction', 'info_counters_mempool_proposed_transaction'), ('info_counters_radix_engine_invalid_proposed_commands', 'info_counters_radix_engine_invalid_proposed_commands'), ('info_counters_radix_engine_system_transactions', 'info_counters_radix_engine_system_transactions'), ('info_counters_radix_engine_user_transactions', 'info_counters_radix_engine_user_transactions'), ('info_counters_count_bdb_ledger_contains_tx', 'info_counters_count_bdb_ledger_contains_tx'), ('info_counters_count_bdb_ledger_deletes', 'info_counters_count_bdb_ledger_deletes'), ('info_counters_count_bdb_ledger_commit', 'info_counters_count_bdb_ledger_commit'), ('info_counters_count_bdb_ledger_save', 'info_counters_count_bdb_ledger_save'), ('info_counters_count_bdb_ledger_last_vertex', 'info_counters_count_bdb_ledger_last_vertex'), ('info_counters_count_bdb_ledger_store', 'info_counters_count_bdb_ledger_store'), ('info_counters_count_bdb_ledger_create_tx', 'info_counters_count_bdb_ledger_create_tx'), ('info_counters_count_bdb_ledger_contains', 'info_counters_count_bdb_ledger_contains'), ('info_counters_count_bdb_ledger_entries', 'info_counters_count_bdb_ledger_entries'), ('info_counters_count_bdb_ledger_get_last', 'info_counters_count_bdb_ledger_get_last'), ('info_counters_count_bdb_ledger_get_next', 'info_counters_count_bdb_ledger_get_next'), ('info_counters_count_bdb_ledger_search', 'info_counters_count_bdb_ledger_search'), ('info_counters_count_bdb_ledger_total', 'info_counters_count_bdb_ledger_total'), ('info_counters_count_bdb_ledger_get_prev', 'info_counters_count_bdb_ledger_get_prev'), ('info_counters_count_bdb_ledger_bytes_read', 'info_counters_count_bdb_ledger_bytes_read'), ('info_counters_count_bdb_ledger_bytes_write', 'info_counters_count_bdb_ledger_bytes_write'), ('info_counters_count_bdb_ledger_proofs_removed', 'info_counters_count_bdb_ledger_proofs_removed'), ('info_counters_count_bdb_ledger_proofs_added', 'info_counters_count_bdb_ledger_proofs_added'), ('info_counters_count_bdb_ledger_get', 'info_counters_count_bdb_ledger_get'), ('info_counters_count_bdb_ledger_last_committed', 'info_counters_count_bdb_ledger_last_committed'), ('info_counters_count_bdb_ledger_get_first', 'info_counters_count_bdb_ledger_get_first'), ('info_counters_count_bdb_header_bytes_write', 'info_counters_count_bdb_header_bytes_write'), ('info_counters_count_bdb_address_book_deletes', 'info_counters_count_bdb_address_book_deletes'), ('info_counters_count_bdb_address_book_total', 'info_counters_count_bdb_address_book_total'), ('info_counters_count_bdb_address_book_bytes_read', 'info_counters_count_bdb_address_book_bytes_read'), ('info_counters_count_bdb_address_book_bytes_write', 'info_counters_count_bdb_address_book_bytes_write'), ('info_counters_count_bdb_safety_state_total', 'info_counters_count_bdb_safety_state_total'), ('info_counters_count_bdb_safety_state_bytes_read', 'info_counters_count_bdb_safety_state_bytes_read'), ('info_counters_count_bdb_safety_state_bytes_write', 'info_counters_count_bdb_safety_state_bytes_write'), ('info_counters_count_apidb_balance_total', 'info_counters_count_apidb_balance_total'), ('info_counters_count_apidb_balance_read', 'info_counters_count_apidb_balance_read'), ('info_counters_count_apidb_balance_bytes_read', 'info_counters_count_apidb_balance_bytes_read'), ('info_counters_count_apidb_balance_bytes_write', 'info_counters_count_apidb_balance_bytes_write'), ('info_counters_count_apidb_balance_write', 'info_counters_count_apidb_balance_write'), ('info_counters_count_apidb_flush_count', 'info_counters_count_apidb_flush_count'), ('info_counters_count_apidb_queue_size', 'info_counters_count_apidb_queue_size'), ('info_counters_count_apidb_transaction_total', 'info_counters_count_apidb_transaction_total'), ('info_counters_count_apidb_transaction_read', 'info_counters_count_apidb_transaction_read'), ('info_counters_count_apidb_transaction_bytes_read', 'info_counters_count_apidb_transaction_bytes_read'), ('info_counters_count_apidb_transaction_bytes_write', 'info_counters_count_apidb_transaction_bytes_write'), ('info_counters_count_apidb_transaction_write', 'info_counters_count_apidb_transaction_write'), ('info_counters_count_apidb_token_total', 'info_counters_count_apidb_token_total'), ('info_counters_count_apidb_token_read', 'info_counters_count_apidb_token_read'), ('info_counters_count_apidb_token_bytes_read', 'info_counters_count_apidb_token_bytes_read'), ('info_counters_count_apidb_token_bytes_write', 'info_counters_count_apidb_token_bytes_write'), ('info_counters_count_apidb_token_write', 'info_counters_count_apidb_token_write'), ('info_counters_epoch_manager_queued_consensus_events', 'info_counters_epoch_manager_queued_consensus_events'), ('info_counters_hashed_bytes', 'info_counters_hashed_bytes'), ('info_counters_networking_received_bytes', 'info_counters_networking_received_bytes'), ('info_counters_networking_tcp_out_opened', 'info_counters_networking_tcp_out_opened'), ('info_counters_networking_tcp_dropped_messages', 'info_counters_networking_tcp_dropped_messages'), ('info_counters_networking_tcp_in_opened', 'info_counters_networking_tcp_in_opened'), ('info_counters_networking_tcp_closed', 'info_counters_networking_tcp_closed'), ('info_counters_networking_udp_dropped_messages', 'info_counters_networking_udp_dropped_messages'), ('info_counters_networking_sent_bytes', 'info_counters_networking_sent_bytes'), ('info_counters_sync_processed', 'info_counters_sync_processed'), ('info_counters_sync_target_state_version', 'info_counters_sync_target_state_version'), ('info_counters_sync_remote_requests_processed', 'info_counters_sync_remote_requests_processed'), ('info_counters_sync_invalid_commands_received', 'info_counters_sync_invalid_commands_received'), ('info_counters_sync_last_read_millis', 'info_counters_sync_last_read_millis'), ('info_counters_sync_target_current_diff', 'info_counters_sync_target_current_diff'), ('info_counters_signatures_verified', 'info_counters_signatures_verified'), ('info_counters_signatures_signed', 'info_counters_signatures_signed'), ('info_counters_elapsed_bdb_ledger_contains_tx', 'info_counters_elapsed_bdb_ledger_contains_tx'), ('info_counters_elapsed_bdb_ledger_commit', 'info_counters_elapsed_bdb_ledger_commit'), ('info_counters_elapsed_bdb_ledger_save', 'info_counters_elapsed_bdb_ledger_save'), ('info_counters_elapsed_bdb_ledger_last_vertex', 'info_counters_elapsed_bdb_ledger_last_vertex'), ('info_counters_elapsed_bdb_ledger_store', 'info_counters_elapsed_bdb_ledger_store'), ('info_counters_elapsed_bdb_ledger_create_tx', 'info_counters_elapsed_bdb_ledger_create_tx'), ('info_counters_elapsed_bdb_ledger_contains', 'info_counters_elapsed_bdb_ledger_contains'), ('info_counters_elapsed_bdb_ledger_entries', 'info_counters_elapsed_bdb_ledger_entries'), ('info_counters_elapsed_bdb_ledger_get_last', 'info_counters_elapsed_bdb_ledger_get_last'), ('info_counters_elapsed_bdb_ledger_search', 'info_counters_elapsed_bdb_ledger_search'), ('info_counters_elapsed_bdb_ledger_total', 'info_counters_elapsed_bdb_ledger_total'), ('info_counters_elapsed_bdb_ledger_get', 'info_counters_elapsed_bdb_ledger_get'), ('info_counters_elapsed_bdb_ledger_last_committed', 'info_counters_elapsed_bdb_ledger_last_committed'), ('info_counters_elapsed_bdb_ledger_get_first', 'info_counters_elapsed_bdb_ledger_get_first'), ('info_counters_elapsed_bdb_address_book', 'info_counters_elapsed_bdb_address_book'), ('info_counters_elapsed_bdb_safety_state', 'info_counters_elapsed_bdb_safety_state'), ('info_counters_elapsed_apidb_balance_read', 'info_counters_elapsed_apidb_balance_read'), ('info_counters_elapsed_apidb_balance_write', 'info_counters_elapsed_apidb_balance_write'), ('info_counters_elapsed_apidb_flush_time', 'info_counters_elapsed_apidb_flush_time'), ('info_counters_elapsed_apidb_transaction_read', 'info_counters_elapsed_apidb_transaction_read'), ('info_counters_elapsed_apidb_transaction_write', 'info_counters_elapsed_apidb_transaction_write'), ('info_counters_elapsed_apidb_token_read', 'info_counters_elapsed_apidb_token_read'), ('info_counters_elapsed_apidb_token_write', 'info_counters_elapsed_apidb_token_write'), ('info_counters_bft_state_version', 'info_counters_bft_state_version'), ('info_counters_bft_vote_quorums', 'info_counters_bft_vote_quorums'), ('info_counters_bft_rejected', 'info_counters_bft_rejected'), ('info_counters_bft_vertex_store_rebuilds', 'info_counters_bft_vertex_store_rebuilds'), ('info_counters_bft_vertex_store_forks', 'info_counters_bft_vertex_store_forks'), ('info_counters_bft_sync_request_timeouts', 'info_counters_bft_sync_request_timeouts'), ('info_counters_bft_sync_requests_sent', 'info_counters_bft_sync_requests_sent'), ('info_counters_bft_timeout', 'info_counters_bft_timeout'), ('info_counters_bft_vertex_store_size', 'info_counters_bft_vertex_store_size'), ('info_counters_bft_processed', 'info_counters_bft_processed'), ('info_counters_bft_consensus_events', 'info_counters_bft_consensus_events'), ('info_counters_bft_indirect_parent', 'info_counters_bft_indirect_parent'), ('info_counters_bft_proposals_made', 'info_counters_bft_proposals_made'), ('info_counters_bft_timed_out_views', 'info_counters_bft_timed_out_views'), ('info_counters_bft_timeout_quorums', 'info_counters_bft_timeout_quorums'), ('info_counters_startup_time_ms', 'info_counters_startup_time_ms'), ('info_counters_messages_inbound_processed', 'info_counters_messages_inbound_processed'), ('info_counters_messages_inbound_discarded', 'info_counters_messages_inbound_discarded'), ('info_counters_messages_inbound_badsignature', 'info_counters_messages_inbound_badsignature'), ('info_counters_messages_inbound_received', 'info_counters_messages_inbound_received'), ('info_counters_messages_outbound_processed', 'info_counters_messages_outbound_processed'), ('info_counters_messages_outbound_aborted', 'info_counters_messages_outbound_aborted'), ('info_counters_messages_outbound_pending', 'info_counters_messages_outbound_pending'), ('info_counters_messages_outbound_sent', 'info_counters_messages_outbound_sent'), ('info_counters_persistence_safety_store_saves', 'info_counters_persistence_safety_store_saves'), ('info_counters_persistence_vertex_store_saves', 'info_counters_persistence_vertex_store_saves'), ('info_counters_persistence_atom_log_write_bytes', 'info_counters_persistence_atom_log_write_bytes'), ('info_counters_persistence_atom_log_write_compressed', 'info_counters_persistence_atom_log_write_compressed'), ('info_counters_time_duration', 'info_counters_time_duration'), ('info_configuration_pacemakerRate', 'info_configuration_pacemakerRate'), ('info_configuration_pacemakerTimeout', 'info_configuration_pacemakerTimeout'), ('info_configuration_pacemakerMaxExponent', 'info_configuration_pacemakerMaxExponent'), ('info_system_version_system_version_agent_version', 'info_system_version_system_version_agent_version'), ('info_system_version_system_version_protocol_version', 'info_system_version_system_version_protocol_version'), ('info_epochManager_currentView_view', 'info_epochManager_currentView_view'), ('info_epochManager_currentView_epoch', 'info_epochManager_currentView_epoch')]
|
"""
Reference: https://leetcode.com/problems/strange-printer/discuss/106810/Java-O(n3)-DP-Solution-with-Explanation-and-Simple-Optimization
"""
class Solution:
def strangePrinter(self, s: str) -> int:
str_size = len(s)
if str_size == 0:
return 0
# init with the value 'n', size of the square
dp = [[str_size]*str_size for i in range(str_size)]
for i in range(str_size):
# reset the diagnoal values
dp[i][i] = 1
# complexity O(N^3)
for end in range(0, str_size):
for start in range(end, -1, -1):
for mid in range(start, end):
default_print_num = dp[start][mid] + dp[mid+1][end]
if s[mid] == s[end]:
default_print_num -= 1
dp[start][end] = min(dp[start][end], default_print_num)
return dp[0][str_size-1]
|
"""
Reference: https://leetcode.com/problems/strange-printer/discuss/106810/Java-O(n3)-DP-Solution-with-Explanation-and-Simple-Optimization
"""
class Solution:
def strange_printer(self, s: str) -> int:
str_size = len(s)
if str_size == 0:
return 0
dp = [[str_size] * str_size for i in range(str_size)]
for i in range(str_size):
dp[i][i] = 1
for end in range(0, str_size):
for start in range(end, -1, -1):
for mid in range(start, end):
default_print_num = dp[start][mid] + dp[mid + 1][end]
if s[mid] == s[end]:
default_print_num -= 1
dp[start][end] = min(dp[start][end], default_print_num)
return dp[0][str_size - 1]
|
def test_slash_request_forbidden(client):
assert client.get("/").status_code == 404
def test_api_root_request_forbidden(client):
assert client.get("/api").status_code == 404
assert client.get("/api/").status_code == 404
def test_auth_root_request_forbidden(client):
assert client.get("/auth").status_code == 404
assert client.get("/auth/").status_code == 404
|
def test_slash_request_forbidden(client):
assert client.get('/').status_code == 404
def test_api_root_request_forbidden(client):
assert client.get('/api').status_code == 404
assert client.get('/api/').status_code == 404
def test_auth_root_request_forbidden(client):
assert client.get('/auth').status_code == 404
assert client.get('/auth/').status_code == 404
|
def search_staff():
query = """
query ($id: Int, $search: String, $type: MediaType) {
Media(search: $search, id: $id, type: $type) {
id
idMal
type
title {
romaji
english
}
staff {
edges {
node {
primaryOccupations
siteUrl
image {
large
}
name {
full
}
}
}
}
}
}
"""
return query
|
def search_staff():
query = '\n query ($id: Int, $search: String, $type: MediaType) {\n Media(search: $search, id: $id, type: $type) {\n id\n idMal\n type\n title {\n romaji\n english\n }\n staff {\n edges {\n node {\n primaryOccupations\n siteUrl\n image {\n large\n }\n name {\n full\n }\n }\n }\n }\n }\n }\n '
return query
|
a, b, c = map(int, input().split())
if (a == b and a != c) or (a ==c and a != b) or (b == c and b != a):
print("Yes")
else:
print("No")
|
(a, b, c) = map(int, input().split())
if a == b and a != c or (a == c and a != b) or (b == c and b != a):
print('Yes')
else:
print('No')
|
#Check for the existence of file
no_of_items=0
try:
f=open("./TODO (CLI-VER)/todolist.txt")
p=0
for i in f.readlines():#Counting the number of items if the file exists already
p+=1
no_of_items=p-2
except:
f=open("./TODO (CLI-VER)/todolist.txt",'w')
f.write("_________TODO LIST__________\n")
f.write(" TIME WORK")
finally:
f.close()
#Todo list
print("Press 1: Add Item \nPress 2: Delete Item \nPress 3: Update item \nPress 4: Display Items\nPress 5: Exit")
n=int(input())
while n==1 or n==2 or n==3 or n==4:
if n==1:
todo=[]
print("Enter the time in HH:MM format(24 hours format)")
time=input()
print("Enter your Work")
work=input()
no_of_items+=1
with open('./TODO (CLI-VER)/todolist.txt','a') as f:
f.write("\n"+str(no_of_items)+" "+time+" "+work)
elif n==2:
if(no_of_items<=0):
print("There is no item in the list kindly add some items")
else:
print("____________________________________________________________")
print("Your Current List: ")
todo=[]
with open('./TODO (CLI-VER)/todolist.txt') as f:
for i in f.readlines():
print(i)
todo.append(i)
print("____________________________________________________________")
print("Enter the position of the item you want to delete : ")
pos=int(input())
if(pos<=0):
print("Please enter a valid position")
elif (pos>(no_of_items)):
print("Please enter the position <= {}".format(no_of_items))
else:
todo.pop(pos+1)
no_of_items-=1
if(no_of_items<=0):
print("Congratulations your todo list is empty!")
with open('./TODO (CLI-VER)/todolist.txt','w') as f:
for i in range(len(todo)):
if i>=(pos+1):
f.write(str(pos)+todo[i][1:])
pos+=1
else:
f.write(todo[i])
elif n==3:
print("____________________________________________________________")
print("Your Current List: ")
todo=[]
with open('./TODO (CLI-VER)/todolist.txt') as f:
for i in f.readlines():
print(i)
todo.append(i)
print("____________________________________________________________")
print("Enter the position of the items you want to update : ")
pos=int(input())
if(pos<=0):
print("Please enter a valid position")
elif (pos>(no_of_items)):
print("Please enter the position <= {}".format(no_of_items))
else:
print("What you want to update : ")
print("Press 1: Time\nPress 2: Work")
choice=int(input())
if choice==1:
print("Enter your updated time :")
time=input()
p=todo[pos+1].index(":")
y=0
with open('./TODO (CLI-VER)/todolist.txt','w') as f:
for i in range(len(todo)):
if i==pos+1:
f.write(str(pos)+" "+time+""+''.join(todo[pos+1][p+3:]))
else:
f.write(todo[i])
elif choice==2:
print("Enter your updated work :")
work=input()
p=todo[pos+1].index(":")
y=0
with open('./TODO (CLI-VER)/todolist.txt','w') as f:
for i in range(len(todo)):
if i==pos+1:
f.write(str(pos)+" "+''.join(todo[pos+1][p-2:p+3])+" "+work)
else:
f.write(todo[i])
elif n==4:
print("Your Current List: ")
todo=[]
print("____________________________________________________________")
with open('./TODO (CLI-VER)/todolist.txt') as f:
for i in f.readlines():
print(i)
todo.append(i)
print("____________________________________________________________")
print("Press 1: Add Item \nPress 2: Delete the Item\nPress 3: Update item\nPress 4:Display Items\nPress 5:Exit")
n=int(input())
print("Thank you for using our application")
|
no_of_items = 0
try:
f = open('./TODO (CLI-VER)/todolist.txt')
p = 0
for i in f.readlines():
p += 1
no_of_items = p - 2
except:
f = open('./TODO (CLI-VER)/todolist.txt', 'w')
f.write('_________TODO LIST__________\n')
f.write(' TIME WORK')
finally:
f.close()
print('Press 1: Add Item \nPress 2: Delete Item \nPress 3: Update item \nPress 4: Display Items\nPress 5: Exit')
n = int(input())
while n == 1 or n == 2 or n == 3 or (n == 4):
if n == 1:
todo = []
print('Enter the time in HH:MM format(24 hours format)')
time = input()
print('Enter your Work')
work = input()
no_of_items += 1
with open('./TODO (CLI-VER)/todolist.txt', 'a') as f:
f.write('\n' + str(no_of_items) + ' ' + time + ' ' + work)
elif n == 2:
if no_of_items <= 0:
print('There is no item in the list kindly add some items')
else:
print('____________________________________________________________')
print('Your Current List: ')
todo = []
with open('./TODO (CLI-VER)/todolist.txt') as f:
for i in f.readlines():
print(i)
todo.append(i)
print('____________________________________________________________')
print('Enter the position of the item you want to delete : ')
pos = int(input())
if pos <= 0:
print('Please enter a valid position')
elif pos > no_of_items:
print('Please enter the position <= {}'.format(no_of_items))
else:
todo.pop(pos + 1)
no_of_items -= 1
if no_of_items <= 0:
print('Congratulations your todo list is empty!')
with open('./TODO (CLI-VER)/todolist.txt', 'w') as f:
for i in range(len(todo)):
if i >= pos + 1:
f.write(str(pos) + todo[i][1:])
pos += 1
else:
f.write(todo[i])
elif n == 3:
print('____________________________________________________________')
print('Your Current List: ')
todo = []
with open('./TODO (CLI-VER)/todolist.txt') as f:
for i in f.readlines():
print(i)
todo.append(i)
print('____________________________________________________________')
print('Enter the position of the items you want to update : ')
pos = int(input())
if pos <= 0:
print('Please enter a valid position')
elif pos > no_of_items:
print('Please enter the position <= {}'.format(no_of_items))
else:
print('What you want to update : ')
print('Press 1: Time\nPress 2: Work')
choice = int(input())
if choice == 1:
print('Enter your updated time :')
time = input()
p = todo[pos + 1].index(':')
y = 0
with open('./TODO (CLI-VER)/todolist.txt', 'w') as f:
for i in range(len(todo)):
if i == pos + 1:
f.write(str(pos) + ' ' + time + '' + ''.join(todo[pos + 1][p + 3:]))
else:
f.write(todo[i])
elif choice == 2:
print('Enter your updated work :')
work = input()
p = todo[pos + 1].index(':')
y = 0
with open('./TODO (CLI-VER)/todolist.txt', 'w') as f:
for i in range(len(todo)):
if i == pos + 1:
f.write(str(pos) + ' ' + ''.join(todo[pos + 1][p - 2:p + 3]) + ' ' + work)
else:
f.write(todo[i])
elif n == 4:
print('Your Current List: ')
todo = []
print('____________________________________________________________')
with open('./TODO (CLI-VER)/todolist.txt') as f:
for i in f.readlines():
print(i)
todo.append(i)
print('____________________________________________________________')
print('Press 1: Add Item \nPress 2: Delete the Item\nPress 3: Update item\nPress 4:Display Items\nPress 5:Exit')
n = int(input())
print('Thank you for using our application')
|
class Solution:
def minFlips(self, target: str) -> int:
nflips = 0
for ison in map(lambda x : x == '1', target):
if ((not ison) and nflips % 2 == 1) or (ison and nflips % 2 == 0):
nflips += 1
return nflips
|
class Solution:
def min_flips(self, target: str) -> int:
nflips = 0
for ison in map(lambda x: x == '1', target):
if not ison and nflips % 2 == 1 or (ison and nflips % 2 == 0):
nflips += 1
return nflips
|
"""
Leetcode #464
"""
class Solution:
def canIWin(self, maxChoosableInteger: int, desiredTotal: int) -> bool:
seen = {}
def helper(choices, total):
if choices[-1] >= total:
return True
# check if subproblem is already solved
key = tuple(choices)
if key in seen:
return seen[key]
for i in range(len(choices)):
# if second player can win for ith round
if not helper(choices[:i] + choices[i+1:], total - choices[i]):
# we won
seen[i] = True
return True
# if we reach here then it means, second player won in loop above
# so set it false
seen[key] = False
return False
choices = list(range(1, maxChoosableInteger + 1))
# sum of consecutive integers
_sum = (maxChoosableInteger * (maxChoosableInteger + 1)) // 2
if _sum < desiredTotal:
return False
# if len is odd, means first player's turn
if _sum == desiredTotal and len(choices) % 2:
return True
return helper(choices, desiredTotal)
if __name__ == "__main__":
solution = Solution()
assert solution.canIWin(10, 11) == False
|
"""
Leetcode #464
"""
class Solution:
def can_i_win(self, maxChoosableInteger: int, desiredTotal: int) -> bool:
seen = {}
def helper(choices, total):
if choices[-1] >= total:
return True
key = tuple(choices)
if key in seen:
return seen[key]
for i in range(len(choices)):
if not helper(choices[:i] + choices[i + 1:], total - choices[i]):
seen[i] = True
return True
seen[key] = False
return False
choices = list(range(1, maxChoosableInteger + 1))
_sum = maxChoosableInteger * (maxChoosableInteger + 1) // 2
if _sum < desiredTotal:
return False
if _sum == desiredTotal and len(choices) % 2:
return True
return helper(choices, desiredTotal)
if __name__ == '__main__':
solution = solution()
assert solution.canIWin(10, 11) == False
|
class Solution:
def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:
for i in matrix:
for j in i:
if j==target:return True
return False
|
class Solution:
def search_matrix(self, matrix: List[List[int]], target: int) -> bool:
for i in matrix:
for j in i:
if j == target:
return True
return False
|
class FermiDataGetter(object):
def __init__(self) -> None:
raise NotImplementedError()
|
class Fermidatagetter(object):
def __init__(self) -> None:
raise not_implemented_error()
|
# -*- coding: utf-8 -*-
'''
>>> from pycm import *
>>> import os
>>> import json
>>> y_actu = [2, 0, 2, 2, 0, 1, 1, 2, 2, 0, 1, 2]
>>> y_pred = [0, 0, 2, 1, 0, 2, 1, 0, 2, 0, 2, 2]
>>> cm = ConfusionMatrix(y_actu, y_pred)
>>> cm
pycm.ConfusionMatrix(classes: [0, 1, 2])
>>> len(cm)
3
>>> print(cm)
Predict 0 1 2
Actual
0 3 0 0
<BLANKLINE>
1 0 1 2
<BLANKLINE>
2 2 1 3
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
Overall Statistics :
<BLANKLINE>
95% CI (0.30439,0.86228)
AUNP 0.66667
AUNU 0.69444
Bennett S 0.375
CBA 0.47778
Chi-Squared 6.6
Chi-Squared DF 4
Conditional Entropy 0.95915
Cramer V 0.5244
Cross Entropy 1.59352
Gwet AC1 0.38931
Hamming Loss 0.41667
Joint Entropy 2.45915
KL Divergence 0.09352
Kappa 0.35484
Kappa 95% CI (-0.07708,0.78675)
Kappa No Prevalence 0.16667
Kappa Standard Error 0.22036
Kappa Unbiased 0.34426
Lambda A 0.16667
Lambda B 0.42857
Mutual Information 0.52421
NIR 0.5
Overall ACC 0.58333
Overall CEN 0.46381
Overall J (1.225,0.40833)
Overall MCC 0.36667
Overall MCEN 0.51894
Overall RACC 0.35417
Overall RACCU 0.36458
P-Value 0.38721
PPV Macro 0.56667
PPV Micro 0.58333
Phi-Squared 0.55
RCI 0.34947
RR 4.0
Reference Entropy 1.5
Response Entropy 1.48336
SOA1(Landis & Koch) Fair
SOA2(Fleiss) Poor
SOA3(Altman) Fair
SOA4(Cicchetti) Poor
Scott PI 0.34426
Standard Error 0.14232
TPR Macro 0.61111
TPR Micro 0.58333
Zero-one Loss 5
<BLANKLINE>
Class Statistics :
<BLANKLINE>
Classes 0 1 2
ACC(Accuracy) 0.83333 0.75 0.58333
AUC(Area under the roc curve) 0.88889 0.61111 0.58333
AUCI(Auc value interpretation) Very Good Fair Poor
BM(Informedness or bookmaker informedness) 0.77778 0.22222 0.16667
CEN(Confusion entropy) 0.25 0.49658 0.60442
DOR(Diagnostic odds ratio) None 4.0 2.0
DP(Discriminant power) None 0.33193 0.16597
DPI(Discriminant power interpretation) None Poor Poor
ERR(Error rate) 0.16667 0.25 0.41667
F0.5(F0.5 score) 0.65217 0.45455 0.57692
F1(F1 score - harmonic mean of precision and sensitivity) 0.75 0.4 0.54545
F2(F2 score) 0.88235 0.35714 0.51724
FDR(False discovery rate) 0.4 0.5 0.4
FN(False negative/miss/type 2 error) 0 2 3
FNR(Miss rate or false negative rate) 0.0 0.66667 0.5
FOR(False omission rate) 0.0 0.2 0.42857
FP(False positive/type 1 error/false alarm) 2 1 2
FPR(Fall-out or false positive rate) 0.22222 0.11111 0.33333
G(G-measure geometric mean of precision and sensitivity) 0.7746 0.40825 0.54772
GI(Gini index) 0.77778 0.22222 0.16667
IS(Information score) 1.26303 1.0 0.26303
J(Jaccard index) 0.6 0.25 0.375
LS(Lift score) 2.4 2.0 1.2
MCC(Matthews correlation coefficient) 0.68313 0.2582 0.16903
MCEN(Modified confusion entropy) 0.26439 0.5 0.6875
MK(Markedness) 0.6 0.3 0.17143
N(Condition negative) 9 9 6
NLR(Negative likelihood ratio) 0.0 0.75 0.75
NPV(Negative predictive value) 1.0 0.8 0.57143
P(Condition positive or support) 3 3 6
PLR(Positive likelihood ratio) 4.5 3.0 1.5
PLRI(Positive likelihood ratio interpretation) Poor Poor Poor
POP(Population) 12 12 12
PPV(Precision or positive predictive value) 0.6 0.5 0.6
PRE(Prevalence) 0.25 0.25 0.5
RACC(Random accuracy) 0.10417 0.04167 0.20833
RACCU(Random accuracy unbiased) 0.11111 0.0434 0.21007
TN(True negative/correct rejection) 7 8 4
TNR(Specificity or true negative rate) 0.77778 0.88889 0.66667
TON(Test outcome negative) 7 10 7
TOP(Test outcome positive) 5 2 5
TP(True positive/hit) 3 1 3
TPR(Sensitivity, recall, hit rate, or true positive rate) 1.0 0.33333 0.5
Y(Youden index) 0.77778 0.22222 0.16667
dInd(Distance index) 0.22222 0.67586 0.60093
sInd(Similarity index) 0.84287 0.52209 0.57508
<BLANKLINE>
>>> cm.relabel({0:"L1",1:"L2",2:"L3"})
>>> print(cm)
Predict L1 L2 L3
Actual
L1 3 0 0
<BLANKLINE>
L2 0 1 2
<BLANKLINE>
L3 2 1 3
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
Overall Statistics :
<BLANKLINE>
95% CI (0.30439,0.86228)
AUNP 0.66667
AUNU 0.69444
Bennett S 0.375
CBA 0.47778
Chi-Squared 6.6
Chi-Squared DF 4
Conditional Entropy 0.95915
Cramer V 0.5244
Cross Entropy 1.59352
Gwet AC1 0.38931
Hamming Loss 0.41667
Joint Entropy 2.45915
KL Divergence 0.09352
Kappa 0.35484
Kappa 95% CI (-0.07708,0.78675)
Kappa No Prevalence 0.16667
Kappa Standard Error 0.22036
Kappa Unbiased 0.34426
Lambda A 0.16667
Lambda B 0.42857
Mutual Information 0.52421
NIR 0.5
Overall ACC 0.58333
Overall CEN 0.46381
Overall J (1.225,0.40833)
Overall MCC 0.36667
Overall MCEN 0.51894
Overall RACC 0.35417
Overall RACCU 0.36458
P-Value 0.38721
PPV Macro 0.56667
PPV Micro 0.58333
Phi-Squared 0.55
RCI 0.34947
RR 4.0
Reference Entropy 1.5
Response Entropy 1.48336
SOA1(Landis & Koch) Fair
SOA2(Fleiss) Poor
SOA3(Altman) Fair
SOA4(Cicchetti) Poor
Scott PI 0.34426
Standard Error 0.14232
TPR Macro 0.61111
TPR Micro 0.58333
Zero-one Loss 5
<BLANKLINE>
Class Statistics :
<BLANKLINE>
Classes L1 L2 L3
ACC(Accuracy) 0.83333 0.75 0.58333
AUC(Area under the roc curve) 0.88889 0.61111 0.58333
AUCI(Auc value interpretation) Very Good Fair Poor
BM(Informedness or bookmaker informedness) 0.77778 0.22222 0.16667
CEN(Confusion entropy) 0.25 0.49658 0.60442
DOR(Diagnostic odds ratio) None 4.0 2.0
DP(Discriminant power) None 0.33193 0.16597
DPI(Discriminant power interpretation) None Poor Poor
ERR(Error rate) 0.16667 0.25 0.41667
F0.5(F0.5 score) 0.65217 0.45455 0.57692
F1(F1 score - harmonic mean of precision and sensitivity) 0.75 0.4 0.54545
F2(F2 score) 0.88235 0.35714 0.51724
FDR(False discovery rate) 0.4 0.5 0.4
FN(False negative/miss/type 2 error) 0 2 3
FNR(Miss rate or false negative rate) 0.0 0.66667 0.5
FOR(False omission rate) 0.0 0.2 0.42857
FP(False positive/type 1 error/false alarm) 2 1 2
FPR(Fall-out or false positive rate) 0.22222 0.11111 0.33333
G(G-measure geometric mean of precision and sensitivity) 0.7746 0.40825 0.54772
GI(Gini index) 0.77778 0.22222 0.16667
IS(Information score) 1.26303 1.0 0.26303
J(Jaccard index) 0.6 0.25 0.375
LS(Lift score) 2.4 2.0 1.2
MCC(Matthews correlation coefficient) 0.68313 0.2582 0.16903
MCEN(Modified confusion entropy) 0.26439 0.5 0.6875
MK(Markedness) 0.6 0.3 0.17143
N(Condition negative) 9 9 6
NLR(Negative likelihood ratio) 0.0 0.75 0.75
NPV(Negative predictive value) 1.0 0.8 0.57143
P(Condition positive or support) 3 3 6
PLR(Positive likelihood ratio) 4.5 3.0 1.5
PLRI(Positive likelihood ratio interpretation) Poor Poor Poor
POP(Population) 12 12 12
PPV(Precision or positive predictive value) 0.6 0.5 0.6
PRE(Prevalence) 0.25 0.25 0.5
RACC(Random accuracy) 0.10417 0.04167 0.20833
RACCU(Random accuracy unbiased) 0.11111 0.0434 0.21007
TN(True negative/correct rejection) 7 8 4
TNR(Specificity or true negative rate) 0.77778 0.88889 0.66667
TON(Test outcome negative) 7 10 7
TOP(Test outcome positive) 5 2 5
TP(True positive/hit) 3 1 3
TPR(Sensitivity, recall, hit rate, or true positive rate) 1.0 0.33333 0.5
Y(Youden index) 0.77778 0.22222 0.16667
dInd(Distance index) 0.22222 0.67586 0.60093
sInd(Similarity index) 0.84287 0.52209 0.57508
<BLANKLINE>
>>> cm.Y["L2"]
0.2222222222222221
>>> cm_2 = ConfusionMatrix(y_actu, 2)
Traceback (most recent call last):
...
pycm.pycm_obj.pycmVectorError: The type of input vectors is assumed to be a list or a NumPy array
>>> cm_3 = ConfusionMatrix(y_actu, [1,2])
Traceback (most recent call last):
...
pycm.pycm_obj.pycmVectorError: Input vectors must have same length
>>> cm_4 = ConfusionMatrix([], [])
Traceback (most recent call last):
...
pycm.pycm_obj.pycmVectorError: Input vectors are empty
>>> cm_5 = ConfusionMatrix([1,1,1,], [1,1,1,1])
Traceback (most recent call last):
...
pycm.pycm_obj.pycmVectorError: Input vectors must have same length
>>> pycm_help()
<BLANKLINE>
PyCM is a multi-class confusion matrix library written in Python that
supports both input data vectors and direct matrix, and a proper tool for
post-classification model evaluation that supports most classes and overall
statistics parameters.
PyCM is the swiss-army knife of confusion matrices, targeted mainly at
data scientists that need a broad array of metrics for predictive models
and an accurate evaluation of large variety of classifiers.
<BLANKLINE>
Repo : https://github.com/sepandhaghighi/pycm
Webpage : http://www.pycm.ir
<BLANKLINE>
<BLANKLINE>
>>> RCI_calc(24,0)
'None'
>>> CBA_calc([1,2], {1:{1:0,2:0},2:{1:0,2:0}}, {1:0,2:0}, {1:0,2:0})
'None'
>>> RR_calc([], {1:0,2:0})
'None'
>>> overall_MCC_calc([1,2], {1:{1:0,2:0},2:{1:0,2:0}}, {1:0,2:0}, {1:0,2:0})
'None'
>>> CEN_misclassification_calc({1:{1:0,2:0},2:{1:0,2:0}},{1:0,2:0},{1:0,2:0},1,1,2)
'None'
>>> vector_check([1,2,3,0.4])
False
>>> vector_check([1,2,3,-2])
False
>>> matrix_check({1:{1:0.5,2:0},2:{1:0,2:0}})
False
>>> matrix_check([])
False
>>> TTPN_calc(0,0)
'None'
>>> TTPN_calc(1,4)
0.2
>>> FXR_calc(None)
'None'
>>> FXR_calc(0.2)
0.8
>>> ACC_calc(0,0,0,0)
'None'
>>> ACC_calc(1,1,3,4)
0.2222222222222222
>>> MCC_calc(0,2,0,2)
'None'
>>> MCC_calc(1,2,3,4)
-0.408248290463863
>>> LR_calc(1,2)
0.5
>>> LR_calc(1,0)
'None'
>>> MK_BM_calc(2,"None")
'None'
>>> MK_BM_calc(1,2)
2
>>> PRE_calc(None,2)
'None'
>>> PRE_calc(1,5)
0.2
>>> PRE_calc(1,0)
'None'
>>> G_calc(None,2)
'None'
>>> G_calc(1,2)
1.4142135623730951
>>> RACC_calc(2,3,4)
0.375
>>> reliability_calc(1,None)
'None'
>>> reliability_calc(2,0.3)
1.7
>>> micro_calc({1:2,2:3},{1:1,2:4})
0.5
>>> micro_calc({1:2,2:3},None)
'None'
>>> macro_calc(None)
'None'
>>> macro_calc({1:2,2:3})
2.5
>>> F_calc(TP=0,FP=0,FN=0,Beta=1)
'None'
>>> F_calc(TP=3,FP=2,FN=1,Beta=5)
0.7428571428571429
>>> save_stat=cm.save_stat("test",address=False)
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_stat("test_filtered",address=False,overall_param=["Kappa","Scott PI"],class_param=["TPR","TNR","ACC","AUC"])
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_stat("test_filtered2",address=False,overall_param=["Kappa","Scott PI"],class_param=["TPR","TNR","ACC","AUC"],class_name=["L1","L2"])
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_stat("test_filtered3",address=False,overall_param=["Kappa","Scott PI"],class_param=["TPR","TNR","ACC","AUC"],class_name=[])
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_stat("/asdasd,qweqwe.eo/",address=True)
>>> save_stat=={'Status': False, 'Message': "[Errno 2] No such file or directory: '/asdasd,qweqwe.eo/.pycm'"}
True
>>> ERR_calc(None)
'None'
>>> ERR_calc(0.1)
0.9
>>> cm.F_beta(4)["L1"]
0.9622641509433962
>>> cm.F_beta(4)["L2"]
0.34
>>> cm.F_beta(4)["L3"]
0.504950495049505
>>> import numpy as np
>>> y_test = np.array([600, 200, 200, 200, 200, 200, 200, 200, 500, 500, 500, 200, 200, 200, 200, 200, 200, 200, 200, 200])
>>> y_pred = np.array([100, 200, 200, 100, 100, 200, 200, 200, 100, 200, 500, 100, 100, 100, 100, 100, 100, 100, 500, 200])
>>> cm=ConfusionMatrix(y_test, y_pred)
>>> print(cm)
Predict 100 200 500 600
Actual
100 0 0 0 0
<BLANKLINE>
200 9 6 1 0
<BLANKLINE>
500 1 1 1 0
<BLANKLINE>
600 1 0 0 0
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
Overall Statistics :
<BLANKLINE>
95% CI (0.14096,0.55904)
AUNP None
AUNU None
Bennett S 0.13333
CBA 0.17708
Chi-Squared None
Chi-Squared DF 9
Conditional Entropy 1.23579
Cramer V None
Cross Entropy 1.70995
Gwet AC1 0.19505
Hamming Loss 0.65
Joint Entropy 2.11997
KL Divergence None
Kappa 0.07801
Kappa 95% CI (-0.2185,0.37453)
Kappa No Prevalence -0.3
Kappa Standard Error 0.15128
Kappa Unbiased -0.12554
Lambda A 0.0
Lambda B 0.0
Mutual Information 0.10088
NIR 0.8
Overall ACC 0.35
Overall CEN 0.3648
Overall J (0.60294,0.15074)
Overall MCC 0.12642
Overall MCEN 0.37463
Overall RACC 0.295
Overall RACCU 0.4225
P-Value 1.0
PPV Macro None
PPV Micro 0.35
Phi-Squared None
RCI 0.11409
RR 5.0
Reference Entropy 0.88418
Response Entropy 1.33667
SOA1(Landis & Koch) Slight
SOA2(Fleiss) Poor
SOA3(Altman) Poor
SOA4(Cicchetti) Poor
Scott PI -0.12554
Standard Error 0.10665
TPR Macro None
TPR Micro 0.35
Zero-one Loss 13
<BLANKLINE>
Class Statistics :
<BLANKLINE>
Classes 100 200 500 600
ACC(Accuracy) 0.45 0.45 0.85 0.95
AUC(Area under the roc curve) None 0.5625 0.63725 0.5
AUCI(Auc value interpretation) None Poor Fair Poor
BM(Informedness or bookmaker informedness) None 0.125 0.27451 0.0
CEN(Confusion entropy) 0.33496 0.35708 0.53895 0.0
DOR(Diagnostic odds ratio) None 1.8 8.0 None
DP(Discriminant power) None 0.14074 0.4979 None
DPI(Discriminant power interpretation) None Poor Poor None
ERR(Error rate) 0.55 0.55 0.15 0.05
F0.5(F0.5 score) 0.0 0.68182 0.45455 0.0
F1(F1 score - harmonic mean of precision and sensitivity) 0.0 0.52174 0.4 0.0
F2(F2 score) 0.0 0.42254 0.35714 0.0
FDR(False discovery rate) 1.0 0.14286 0.5 None
FN(False negative/miss/type 2 error) 0 10 2 1
FNR(Miss rate or false negative rate) None 0.625 0.66667 1.0
FOR(False omission rate) 0.0 0.76923 0.11111 0.05
FP(False positive/type 1 error/false alarm) 11 1 1 0
FPR(Fall-out or false positive rate) 0.55 0.25 0.05882 0.0
G(G-measure geometric mean of precision and sensitivity) None 0.56695 0.40825 None
GI(Gini index) None 0.125 0.27451 0.0
IS(Information score) None 0.09954 1.73697 None
J(Jaccard index) 0.0 0.35294 0.25 0.0
LS(Lift score) None 1.07143 3.33333 None
MCC(Matthews correlation coefficient) None 0.10483 0.32673 None
MCEN(Modified confusion entropy) 0.33496 0.37394 0.58028 0.0
MK(Markedness) 0.0 0.08791 0.38889 None
N(Condition negative) 20 4 17 19
NLR(Negative likelihood ratio) None 0.83333 0.70833 1.0
NPV(Negative predictive value) 1.0 0.23077 0.88889 0.95
P(Condition positive or support) 0 16 3 1
PLR(Positive likelihood ratio) None 1.5 5.66667 None
PLRI(Positive likelihood ratio interpretation) None Poor Fair None
POP(Population) 20 20 20 20
PPV(Precision or positive predictive value) 0.0 0.85714 0.5 None
PRE(Prevalence) 0.0 0.8 0.15 0.05
RACC(Random accuracy) 0.0 0.28 0.015 0.0
RACCU(Random accuracy unbiased) 0.07563 0.33062 0.01562 0.00063
TN(True negative/correct rejection) 9 3 16 19
TNR(Specificity or true negative rate) 0.45 0.75 0.94118 1.0
TON(Test outcome negative) 9 13 18 20
TOP(Test outcome positive) 11 7 2 0
TP(True positive/hit) 0 6 1 0
TPR(Sensitivity, recall, hit rate, or true positive rate) None 0.375 0.33333 0.0
Y(Youden index) None 0.125 0.27451 0.0
dInd(Distance index) None 0.67315 0.66926 1.0
sInd(Similarity index) None 0.52401 0.52676 0.29289
<BLANKLINE>
>>> cm.stat()
Overall Statistics :
<BLANKLINE>
95% CI (0.14096,0.55904)
AUNP None
AUNU None
Bennett S 0.13333
CBA 0.17708
Chi-Squared None
Chi-Squared DF 9
Conditional Entropy 1.23579
Cramer V None
Cross Entropy 1.70995
Gwet AC1 0.19505
Hamming Loss 0.65
Joint Entropy 2.11997
KL Divergence None
Kappa 0.07801
Kappa 95% CI (-0.2185,0.37453)
Kappa No Prevalence -0.3
Kappa Standard Error 0.15128
Kappa Unbiased -0.12554
Lambda A 0.0
Lambda B 0.0
Mutual Information 0.10088
NIR 0.8
Overall ACC 0.35
Overall CEN 0.3648
Overall J (0.60294,0.15074)
Overall MCC 0.12642
Overall MCEN 0.37463
Overall RACC 0.295
Overall RACCU 0.4225
P-Value 1.0
PPV Macro None
PPV Micro 0.35
Phi-Squared None
RCI 0.11409
RR 5.0
Reference Entropy 0.88418
Response Entropy 1.33667
SOA1(Landis & Koch) Slight
SOA2(Fleiss) Poor
SOA3(Altman) Poor
SOA4(Cicchetti) Poor
Scott PI -0.12554
Standard Error 0.10665
TPR Macro None
TPR Micro 0.35
Zero-one Loss 13
<BLANKLINE>
Class Statistics :
<BLANKLINE>
Classes 100 200 500 600
ACC(Accuracy) 0.45 0.45 0.85 0.95
AUC(Area under the roc curve) None 0.5625 0.63725 0.5
AUCI(Auc value interpretation) None Poor Fair Poor
BM(Informedness or bookmaker informedness) None 0.125 0.27451 0.0
CEN(Confusion entropy) 0.33496 0.35708 0.53895 0.0
DOR(Diagnostic odds ratio) None 1.8 8.0 None
DP(Discriminant power) None 0.14074 0.4979 None
DPI(Discriminant power interpretation) None Poor Poor None
ERR(Error rate) 0.55 0.55 0.15 0.05
F0.5(F0.5 score) 0.0 0.68182 0.45455 0.0
F1(F1 score - harmonic mean of precision and sensitivity) 0.0 0.52174 0.4 0.0
F2(F2 score) 0.0 0.42254 0.35714 0.0
FDR(False discovery rate) 1.0 0.14286 0.5 None
FN(False negative/miss/type 2 error) 0 10 2 1
FNR(Miss rate or false negative rate) None 0.625 0.66667 1.0
FOR(False omission rate) 0.0 0.76923 0.11111 0.05
FP(False positive/type 1 error/false alarm) 11 1 1 0
FPR(Fall-out or false positive rate) 0.55 0.25 0.05882 0.0
G(G-measure geometric mean of precision and sensitivity) None 0.56695 0.40825 None
GI(Gini index) None 0.125 0.27451 0.0
IS(Information score) None 0.09954 1.73697 None
J(Jaccard index) 0.0 0.35294 0.25 0.0
LS(Lift score) None 1.07143 3.33333 None
MCC(Matthews correlation coefficient) None 0.10483 0.32673 None
MCEN(Modified confusion entropy) 0.33496 0.37394 0.58028 0.0
MK(Markedness) 0.0 0.08791 0.38889 None
N(Condition negative) 20 4 17 19
NLR(Negative likelihood ratio) None 0.83333 0.70833 1.0
NPV(Negative predictive value) 1.0 0.23077 0.88889 0.95
P(Condition positive or support) 0 16 3 1
PLR(Positive likelihood ratio) None 1.5 5.66667 None
PLRI(Positive likelihood ratio interpretation) None Poor Fair None
POP(Population) 20 20 20 20
PPV(Precision or positive predictive value) 0.0 0.85714 0.5 None
PRE(Prevalence) 0.0 0.8 0.15 0.05
RACC(Random accuracy) 0.0 0.28 0.015 0.0
RACCU(Random accuracy unbiased) 0.07563 0.33062 0.01562 0.00063
TN(True negative/correct rejection) 9 3 16 19
TNR(Specificity or true negative rate) 0.45 0.75 0.94118 1.0
TON(Test outcome negative) 9 13 18 20
TOP(Test outcome positive) 11 7 2 0
TP(True positive/hit) 0 6 1 0
TPR(Sensitivity, recall, hit rate, or true positive rate) None 0.375 0.33333 0.0
Y(Youden index) None 0.125 0.27451 0.0
dInd(Distance index) None 0.67315 0.66926 1.0
sInd(Similarity index) None 0.52401 0.52676 0.29289
<BLANKLINE>
>>> cm.stat(overall_param=["Kappa","Scott PI"],class_param=["TPR","TNR","ACC","AUC"])
Overall Statistics :
<BLANKLINE>
Kappa 0.07801
Scott PI -0.12554
<BLANKLINE>
Class Statistics :
<BLANKLINE>
Classes 100 200 500 600
ACC(Accuracy) 0.45 0.45 0.85 0.95
AUC(Area under the roc curve) None 0.5625 0.63725 0.5
TNR(Specificity or true negative rate) 0.45 0.75 0.94118 1.0
TPR(Sensitivity, recall, hit rate, or true positive rate) None 0.375 0.33333 0.0
<BLANKLINE>
>>> cm.stat(overall_param=["Kappa","Scott PI"],class_param=["TPR","TNR","ACC","AUC"],class_name=[100])
Overall Statistics :
<BLANKLINE>
Kappa 0.07801
Scott PI -0.12554
<BLANKLINE>
Class Statistics :
<BLANKLINE>
Classes 100
ACC(Accuracy) 0.45
AUC(Area under the roc curve) None
TNR(Specificity or true negative rate) 0.45
TPR(Sensitivity, recall, hit rate, or true positive rate) None
<BLANKLINE>
>>> cm.stat(overall_param=["Kappa","Scott PI"],class_param=["TPR","TNR","ACC","AUC"],class_name=[])
Overall Statistics :
<BLANKLINE>
Kappa 0.07801
Scott PI -0.12554
<BLANKLINE>
>>> cm.stat(overall_param=["Kappa","Scott PI"],class_param=[],class_name=[100])
Overall Statistics :
<BLANKLINE>
Kappa 0.07801
Scott PI -0.12554
<BLANKLINE>
>>> cm.stat(overall_param=["Kappa","Scott PI"],class_param=["TPR"],class_name=[100])
Overall Statistics :
<BLANKLINE>
Kappa 0.07801
Scott PI -0.12554
<BLANKLINE>
Class Statistics :
<BLANKLINE>
Classes 100
TPR(Sensitivity, recall, hit rate, or true positive rate) None
<BLANKLINE>
>>> cm.stat(overall_param=[],class_param=["TPR"],class_name=[100])
<BLANKLINE>
Class Statistics :
<BLANKLINE>
Classes 100
TPR(Sensitivity, recall, hit rate, or true positive rate) None
<BLANKLINE>
>>> cm.print_normalized_matrix()
Predict 100 200 500 600
Actual
100 0.0 0.0 0.0 0.0
200 0.5625 0.375 0.0625 0.0
500 0.33333 0.33333 0.33333 0.0
600 1.0 0.0 0.0 0.0
<BLANKLINE>
>>> cm.print_matrix()
Predict 100 200 500 600
Actual
100 0 0 0 0
200 9 6 1 0
500 1 1 1 0
600 1 0 0 0
<BLANKLINE>
>>> cm.print_matrix(one_vs_all=True,class_name=200)
Predict 200 ~
Actual
200 6 10
~ 1 3
<BLANKLINE>
>>> cm.print_normalized_matrix(one_vs_all=True,class_name=200)
Predict 200 ~
Actual
200 0.375 0.625
~ 0.25 0.75
<BLANKLINE>
>>> kappa_analysis_koch(-0.1)
'Poor'
>>> kappa_analysis_koch(0)
'Slight'
>>> kappa_analysis_koch(0.2)
'Fair'
>>> kappa_analysis_koch(0.4)
'Moderate'
>>> kappa_analysis_koch(0.6)
'Substantial'
>>> kappa_analysis_koch(0.8)
'Almost Perfect'
>>> kappa_analysis_koch(1.2)
'None'
>>> kappa_analysis_fleiss(0.4)
'Intermediate to Good'
>>> kappa_analysis_fleiss(0.75)
'Excellent'
>>> kappa_analysis_fleiss(1.2)
'Excellent'
>>> kappa_analysis_altman(-0.2)
'Poor'
>>> kappa_analysis_altman(0.2)
'Fair'
>>> kappa_analysis_altman(0.4)
'Moderate'
>>> kappa_analysis_altman(0.6)
'Good'
>>> kappa_analysis_altman(0.8)
'Very Good'
>>> kappa_analysis_altman(1.2)
'None'
>>> kappa_analysis_fleiss(0.2)
'Poor'
>>> kappa_analysis_cicchetti(0.3)
'Poor'
>>> kappa_analysis_cicchetti(0.5)
'Fair'
>>> kappa_analysis_cicchetti(0.65)
'Good'
>>> kappa_analysis_cicchetti(0.8)
'Excellent'
>>> PLR_analysis(1)
'Negligible'
>>> PLR_analysis(3)
'Poor'
>>> PLR_analysis(7)
'Fair'
>>> PLR_analysis(11)
'Good'
>>> DP_analysis(0.2)
'Poor'
>>> DP_analysis(1.5)
'Limited'
>>> DP_analysis(2.5)
'Fair'
>>> DP_analysis(10)
'Good'
>>> AUC_analysis(0.5)
'Poor'
>>> AUC_analysis(0.65)
'Fair'
>>> AUC_analysis(0.75)
'Good'
>>> AUC_analysis(0.86)
'Very Good'
>>> AUC_analysis(0.97)
'Excellent'
>>> AUC_analysis(1.0)
'Excellent'
>>> PC_PI_calc(1,1,1)
'None'
>>> PC_PI_calc({1:12},{1:6},{1:45})
0.04000000000000001
>>> PC_AC1_calc(1,1,1)
'None'
>>> PC_AC1_calc({1:123,2:2},{1:120,2:5},{1:125,2:125})
0.05443200000000002
>>> y_act=[0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,2,2,2,2,2,2]
>>> y_pre=[0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,1,1,1,1,1,2,0,1,2,2,2,2]
>>> cm2=ConfusionMatrix(y_act,y_pre)
>>> chi_squared=chi_square_calc(cm2.classes,cm2.table,cm2.TOP,cm2.P,cm2.POP)
>>> chi_squared
15.525641025641026
>>> population = list(cm2.POP.values())[0]
>>> phi_squared=phi_square_calc(chi_squared,population)
>>> phi_squared
0.5750237416904084
>>> V=cramers_V_calc(phi_squared,cm2.classes)
>>> V
0.5362013342441477
>>> DF=DF_calc(cm2.classes)
>>> DF
4
>>> SE=se_calc(cm2.Overall_ACC,population)
>>> SE
0.09072184232530289
>>> CI=CI_calc(cm2.Overall_ACC,SE)
>>> CI
(0.48885185570907297, 0.8444814776242603)
>>> response_entropy=entropy_calc(cm2.TOP,cm2.POP)
>>> response_entropy
1.486565953154142
>>> reference_entropy=entropy_calc(cm2.P,cm2.POP)
>>> reference_entropy
1.5304930567574824
>>> cross_entropy = cross_entropy_calc(cm2.TOP,cm2.P,cm2.POP)
>>> cross_entropy
1.5376219392005763
>>> join_entropy = joint_entropy_calc(cm2.classes,cm2.table,cm2.POP)
>>> join_entropy
2.619748965432189
>>> conditional_entropy = conditional_entropy_calc(cm2.classes,cm2.table,cm2.P,cm2.POP)
>>> conditional_entropy
1.089255908674706
>>> kl_divergence=kl_divergence_calc(cm2.P,cm2.TOP,cm2.POP)
>>> kl_divergence
0.007128882443093773
>>> lambda_B=lambda_B_calc(cm2.classes,cm2.table,cm2.TOP,population)
>>> lambda_B
0.35714285714285715
>>> lambda_A=lambda_A_calc(cm2.classes,cm2.table,cm2.P,population)
>>> lambda_A
0.4
>>> IS_calc(13,0,0,38)
1.5474877953024933
>>> kappa_no_prevalence_calc(cm2.Overall_ACC)
0.33333333333333326
>>> reliability_calc(cm2.Overall_RACC,cm2.Overall_ACC)
0.4740259740259741
>>> mutual_information_calc(cm2.ResponseEntropy,cm2.ConditionalEntropy)
0.39731004447943596
>>> cm3=ConfusionMatrix(matrix=cm2.table)
>>> cm3
pycm.ConfusionMatrix(classes: [0, 1, 2])
>>> cm3.CI
(0.48885185570907297, 0.8444814776242603)
>>> cm3.Chi_Squared
15.525641025641026
>>> cm3.Phi_Squared
0.5750237416904084
>>> cm3.V
0.5362013342441477
>>> cm3.DF
4
>>> cm3.ResponseEntropy
1.486565953154142
>>> cm3.ReferenceEntropy
1.5304930567574824
>>> cm3.CrossEntropy
1.5376219392005763
>>> cm3.JointEntropy
2.619748965432189
>>> cm3.ConditionalEntropy
1.089255908674706
>>> cm3.KL
0.007128882443093773
>>> cm3.LambdaA
0.4
>>> cm3.LambdaB
0.35714285714285715
>>> cm3=ConfusionMatrix(matrix={})
Traceback (most recent call last):
...
pycm.pycm_obj.pycmMatrixError: Input confusion matrix format error
>>> cm_4=ConfusionMatrix(matrix={1:{1:2,"1":2},"1":{1:2,"1":3}})
Traceback (most recent call last):
...
pycm.pycm_obj.pycmMatrixError: Type of the input matrix classes is assumed be the same
>>> cm_5=ConfusionMatrix(matrix={1:{1:2}})
Traceback (most recent call last):
...
pycm.pycm_obj.pycmVectorError: Number of the classes is lower than 2
>>> save_stat=cm.save_html("test",address=False)
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_html("test_filtered",address=False,overall_param=["Kappa","Scott PI"],class_param=["TPR","TNR","ACC","AUC"])
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_html("test_filtered2",address=False,overall_param=["Kappa","Scott PI"],class_param=["TPR","TNR","ACC","AUC"],class_name=[100])
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_html("test_filtered3",address=False,overall_param=["Kappa","Scott PI"],class_param=["TPR","TNR","ACC","AUC"],class_name=[])
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_html("test_filtered4",address=False,overall_param=["Kappa","Scott PI"],class_param=[],class_name=[100])
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_html("test_filtered5",address=False,overall_param=[],class_param=["TPR","TNR","ACC","AUC"],class_name=[100])
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_html("test_colored",address=False,color=(130,100,200))
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_csv("test",address=False)
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_csv("test_filtered",address=False,class_param=["TPR","TNR","ACC","AUC"])
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_csv("test_filtered2",address=False,class_param=["TPR","TNR","ACC","AUC"],class_name=[100])
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_csv("test_filtered3",address=False,class_param=["TPR","TNR","ACC","AUC"],class_name=[])
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_csv("test_filtered4",address=False,class_param=[],class_name=[100])
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_html("/asdasd,qweqwe.eo/",address=True)
>>> save_stat=={'Status': False, 'Message': "[Errno 2] No such file or directory: '/asdasd,qweqwe.eo/.html'"}
True
>>> save_stat=cm.save_csv("/asdasd,qweqwe.eo/",address=True)
>>> save_stat=={'Status': False, 'Message': "[Errno 2] No such file or directory: '/asdasd,qweqwe.eo/.csv'"}
True
>>> def activation(i):
... if i<0.7:
... return 1
... else:
... return 0
>>> cm_6 = ConfusionMatrix([0,0,1,0],[0.87,0.34,0.9,0.12],threshold=activation)
>>> cm_6.print_matrix()
Predict 0 1
Actual
0 1 2
1 1 0
>>> save_obj=cm.save_obj("test",address=False)
>>> save_obj=={'Status': True, 'Message': None}
True
>>> cm_file=ConfusionMatrix(file=open("test.obj","r"))
>>> print(cm_file)
Predict 100 200 500 600
Actual
100 0 0 0 0
<BLANKLINE>
200 9 6 1 0
<BLANKLINE>
500 1 1 1 0
<BLANKLINE>
600 1 0 0 0
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
Overall Statistics :
<BLANKLINE>
95% CI (0.14096,0.55904)
AUNP None
AUNU None
Bennett S 0.13333
CBA 0.17708
Chi-Squared None
Chi-Squared DF 9
Conditional Entropy 1.23579
Cramer V None
Cross Entropy 1.70995
Gwet AC1 0.19505
Hamming Loss 0.65
Joint Entropy 2.11997
KL Divergence None
Kappa 0.07801
Kappa 95% CI (-0.2185,0.37453)
Kappa No Prevalence -0.3
Kappa Standard Error 0.15128
Kappa Unbiased -0.12554
Lambda A 0.0
Lambda B 0.0
Mutual Information 0.10088
NIR 0.8
Overall ACC 0.35
Overall CEN 0.3648
Overall J (0.60294,0.15074)
Overall MCC 0.12642
Overall MCEN 0.37463
Overall RACC 0.295
Overall RACCU 0.4225
P-Value 1.0
PPV Macro None
PPV Micro 0.35
Phi-Squared None
RCI 0.11409
RR 5.0
Reference Entropy 0.88418
Response Entropy 1.33667
SOA1(Landis & Koch) Slight
SOA2(Fleiss) Poor
SOA3(Altman) Poor
SOA4(Cicchetti) Poor
Scott PI -0.12554
Standard Error 0.10665
TPR Macro None
TPR Micro 0.35
Zero-one Loss 13
<BLANKLINE>
Class Statistics :
<BLANKLINE>
Classes 100 200 500 600
ACC(Accuracy) 0.45 0.45 0.85 0.95
AUC(Area under the roc curve) None 0.5625 0.63725 0.5
AUCI(Auc value interpretation) None Poor Fair Poor
BM(Informedness or bookmaker informedness) None 0.125 0.27451 0.0
CEN(Confusion entropy) 0.33496 0.35708 0.53895 0.0
DOR(Diagnostic odds ratio) None 1.8 8.0 None
DP(Discriminant power) None 0.14074 0.4979 None
DPI(Discriminant power interpretation) None Poor Poor None
ERR(Error rate) 0.55 0.55 0.15 0.05
F0.5(F0.5 score) 0.0 0.68182 0.45455 0.0
F1(F1 score - harmonic mean of precision and sensitivity) 0.0 0.52174 0.4 0.0
F2(F2 score) 0.0 0.42254 0.35714 0.0
FDR(False discovery rate) 1.0 0.14286 0.5 None
FN(False negative/miss/type 2 error) 0 10 2 1
FNR(Miss rate or false negative rate) None 0.625 0.66667 1.0
FOR(False omission rate) 0.0 0.76923 0.11111 0.05
FP(False positive/type 1 error/false alarm) 11 1 1 0
FPR(Fall-out or false positive rate) 0.55 0.25 0.05882 0.0
G(G-measure geometric mean of precision and sensitivity) None 0.56695 0.40825 None
GI(Gini index) None 0.125 0.27451 0.0
IS(Information score) None 0.09954 1.73697 None
J(Jaccard index) 0.0 0.35294 0.25 0.0
LS(Lift score) None 1.07143 3.33333 None
MCC(Matthews correlation coefficient) None 0.10483 0.32673 None
MCEN(Modified confusion entropy) 0.33496 0.37394 0.58028 0.0
MK(Markedness) 0.0 0.08791 0.38889 None
N(Condition negative) 20 4 17 19
NLR(Negative likelihood ratio) None 0.83333 0.70833 1.0
NPV(Negative predictive value) 1.0 0.23077 0.88889 0.95
P(Condition positive or support) 0 16 3 1
PLR(Positive likelihood ratio) None 1.5 5.66667 None
PLRI(Positive likelihood ratio interpretation) None Poor Fair None
POP(Population) 20 20 20 20
PPV(Precision or positive predictive value) 0.0 0.85714 0.5 None
PRE(Prevalence) 0.0 0.8 0.15 0.05
RACC(Random accuracy) 0.0 0.28 0.015 0.0
RACCU(Random accuracy unbiased) 0.07563 0.33062 0.01562 0.00063
TN(True negative/correct rejection) 9 3 16 19
TNR(Specificity or true negative rate) 0.45 0.75 0.94118 1.0
TON(Test outcome negative) 9 13 18 20
TOP(Test outcome positive) 11 7 2 0
TP(True positive/hit) 0 6 1 0
TPR(Sensitivity, recall, hit rate, or true positive rate) None 0.375 0.33333 0.0
Y(Youden index) None 0.125 0.27451 0.0
dInd(Distance index) None 0.67315 0.66926 1.0
sInd(Similarity index) None 0.52401 0.52676 0.29289
<BLANKLINE>
>>> save_obj=cm_6.save_obj("test2",address=False)
>>> save_obj=={'Status': True, 'Message': None}
True
>>> cm_file_2=ConfusionMatrix(file=open("test2.obj","r"))
>>> cm_file_2.print_matrix()
Predict 0 1
Actual
0 1 2
1 1 0
>>> cm = ConfusionMatrix(matrix={"Class1":{"Class1":9,"Class2":3,"Class3":0},"Class2":{"Class1":3,"Class2":5,"Class3":1},"Class3":{"Class1":1,"Class2":1,"Class3":4}})
>>> print(cm)
Predict Class1 Class2 Class3
Actual
Class1 9 3 0
<BLANKLINE>
Class2 3 5 1
<BLANKLINE>
Class3 1 1 4
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
Overall Statistics :
<BLANKLINE>
95% CI (0.48885,0.84448)
AUNP 0.73175
AUNU 0.73929
Bennett S 0.5
CBA 0.63818
Chi-Squared 15.52564
Chi-Squared DF 4
Conditional Entropy 1.08926
Cramer V 0.5362
Cross Entropy 1.53762
Gwet AC1 0.51229
Hamming Loss 0.33333
Joint Entropy 2.61975
KL Divergence 0.00713
Kappa 0.47403
Kappa 95% CI (0.19345,0.7546)
Kappa No Prevalence 0.33333
Kappa Standard Error 0.14315
Kappa Unbiased 0.47346
Lambda A 0.4
Lambda B 0.35714
Mutual Information 0.39731
NIR 0.44444
Overall ACC 0.66667
Overall CEN 0.52986
Overall J (1.51854,0.50618)
Overall MCC 0.47511
Overall MCEN 0.65286
Overall RACC 0.36626
Overall RACCU 0.36694
P-Value 0.01667
PPV Macro 0.68262
PPV Micro 0.66667
Phi-Squared 0.57502
RCI 0.2596
RR 9.0
Reference Entropy 1.53049
Response Entropy 1.48657
SOA1(Landis & Koch) Moderate
SOA2(Fleiss) Intermediate to Good
SOA3(Altman) Moderate
SOA4(Cicchetti) Fair
Scott PI 0.47346
Standard Error 0.09072
TPR Macro 0.65741
TPR Micro 0.66667
Zero-one Loss 9
<BLANKLINE>
Class Statistics :
<BLANKLINE>
Classes Class1 Class2 Class3
ACC(Accuracy) 0.74074 0.7037 0.88889
AUC(Area under the roc curve) 0.74167 0.66667 0.80952
AUCI(Auc value interpretation) Good Fair Very Good
BM(Informedness or bookmaker informedness) 0.48333 0.33333 0.61905
CEN(Confusion entropy) 0.45994 0.66249 0.47174
DOR(Diagnostic odds ratio) 8.25 4.375 40.0
DP(Discriminant power) 0.50527 0.35339 0.88326
DPI(Discriminant power interpretation) Poor Poor Poor
ERR(Error rate) 0.25926 0.2963 0.11111
F0.5(F0.5 score) 0.70312 0.55556 0.76923
F1(F1 score - harmonic mean of precision and sensitivity) 0.72 0.55556 0.72727
F2(F2 score) 0.7377 0.55556 0.68966
FDR(False discovery rate) 0.30769 0.44444 0.2
FN(False negative/miss/type 2 error) 3 4 2
FNR(Miss rate or false negative rate) 0.25 0.44444 0.33333
FOR(False omission rate) 0.21429 0.22222 0.09091
FP(False positive/type 1 error/false alarm) 4 4 1
FPR(Fall-out or false positive rate) 0.26667 0.22222 0.04762
G(G-measure geometric mean of precision and sensitivity) 0.72058 0.55556 0.7303
GI(Gini index) 0.48333 0.33333 0.61905
IS(Information score) 0.63941 0.73697 1.848
J(Jaccard index) 0.5625 0.38462 0.57143
LS(Lift score) 1.55769 1.66667 3.6
MCC(Matthews correlation coefficient) 0.48067 0.33333 0.66254
MCEN(Modified confusion entropy) 0.57782 0.77284 0.60158
MK(Markedness) 0.47802 0.33333 0.70909
N(Condition negative) 15 18 21
NLR(Negative likelihood ratio) 0.34091 0.57143 0.35
NPV(Negative predictive value) 0.78571 0.77778 0.90909
P(Condition positive or support) 12 9 6
PLR(Positive likelihood ratio) 2.8125 2.5 14.0
PLRI(Positive likelihood ratio interpretation) Poor Poor Good
POP(Population) 27 27 27
PPV(Precision or positive predictive value) 0.69231 0.55556 0.8
PRE(Prevalence) 0.44444 0.33333 0.22222
RACC(Random accuracy) 0.21399 0.11111 0.04115
RACCU(Random accuracy unbiased) 0.21433 0.11111 0.0415
TN(True negative/correct rejection) 11 14 20
TNR(Specificity or true negative rate) 0.73333 0.77778 0.95238
TON(Test outcome negative) 14 18 22
TOP(Test outcome positive) 13 9 5
TP(True positive/hit) 9 5 4
TPR(Sensitivity, recall, hit rate, or true positive rate) 0.75 0.55556 0.66667
Y(Youden index) 0.48333 0.33333 0.61905
dInd(Distance index) 0.36553 0.4969 0.33672
sInd(Similarity index) 0.74153 0.64864 0.7619
<BLANKLINE>
>>> cm = ConfusionMatrix(matrix={"Class1":{"Class1":9,"Class2":3,"Class3":1},"Class2":{"Class1":3,"Class2":5,"Class3":1},"Class3":{"Class1":0,"Class2":1,"Class3":4}},transpose=True)
>>> print(cm)
Predict Class1 Class2 Class3
Actual
Class1 9 3 0
<BLANKLINE>
Class2 3 5 1
<BLANKLINE>
Class3 1 1 4
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
Overall Statistics :
<BLANKLINE>
95% CI (0.48885,0.84448)
AUNP 0.73175
AUNU 0.73929
Bennett S 0.5
CBA 0.63818
Chi-Squared 15.52564
Chi-Squared DF 4
Conditional Entropy 1.08926
Cramer V 0.5362
Cross Entropy 1.53762
Gwet AC1 0.51229
Hamming Loss 0.33333
Joint Entropy 2.61975
KL Divergence 0.00713
Kappa 0.47403
Kappa 95% CI (0.19345,0.7546)
Kappa No Prevalence 0.33333
Kappa Standard Error 0.14315
Kappa Unbiased 0.47346
Lambda A 0.4
Lambda B 0.35714
Mutual Information 0.39731
NIR 0.44444
Overall ACC 0.66667
Overall CEN 0.52986
Overall J (1.51854,0.50618)
Overall MCC 0.47511
Overall MCEN 0.65286
Overall RACC 0.36626
Overall RACCU 0.36694
P-Value 0.01667
PPV Macro 0.68262
PPV Micro 0.66667
Phi-Squared 0.57502
RCI 0.2596
RR 9.0
Reference Entropy 1.53049
Response Entropy 1.48657
SOA1(Landis & Koch) Moderate
SOA2(Fleiss) Intermediate to Good
SOA3(Altman) Moderate
SOA4(Cicchetti) Fair
Scott PI 0.47346
Standard Error 0.09072
TPR Macro 0.65741
TPR Micro 0.66667
Zero-one Loss 9
<BLANKLINE>
Class Statistics :
<BLANKLINE>
Classes Class1 Class2 Class3
ACC(Accuracy) 0.74074 0.7037 0.88889
AUC(Area under the roc curve) 0.74167 0.66667 0.80952
AUCI(Auc value interpretation) Good Fair Very Good
BM(Informedness or bookmaker informedness) 0.48333 0.33333 0.61905
CEN(Confusion entropy) 0.45994 0.66249 0.47174
DOR(Diagnostic odds ratio) 8.25 4.375 40.0
DP(Discriminant power) 0.50527 0.35339 0.88326
DPI(Discriminant power interpretation) Poor Poor Poor
ERR(Error rate) 0.25926 0.2963 0.11111
F0.5(F0.5 score) 0.70312 0.55556 0.76923
F1(F1 score - harmonic mean of precision and sensitivity) 0.72 0.55556 0.72727
F2(F2 score) 0.7377 0.55556 0.68966
FDR(False discovery rate) 0.30769 0.44444 0.2
FN(False negative/miss/type 2 error) 3 4 2
FNR(Miss rate or false negative rate) 0.25 0.44444 0.33333
FOR(False omission rate) 0.21429 0.22222 0.09091
FP(False positive/type 1 error/false alarm) 4 4 1
FPR(Fall-out or false positive rate) 0.26667 0.22222 0.04762
G(G-measure geometric mean of precision and sensitivity) 0.72058 0.55556 0.7303
GI(Gini index) 0.48333 0.33333 0.61905
IS(Information score) 0.63941 0.73697 1.848
J(Jaccard index) 0.5625 0.38462 0.57143
LS(Lift score) 1.55769 1.66667 3.6
MCC(Matthews correlation coefficient) 0.48067 0.33333 0.66254
MCEN(Modified confusion entropy) 0.57782 0.77284 0.60158
MK(Markedness) 0.47802 0.33333 0.70909
N(Condition negative) 15 18 21
NLR(Negative likelihood ratio) 0.34091 0.57143 0.35
NPV(Negative predictive value) 0.78571 0.77778 0.90909
P(Condition positive or support) 12 9 6
PLR(Positive likelihood ratio) 2.8125 2.5 14.0
PLRI(Positive likelihood ratio interpretation) Poor Poor Good
POP(Population) 27 27 27
PPV(Precision or positive predictive value) 0.69231 0.55556 0.8
PRE(Prevalence) 0.44444 0.33333 0.22222
RACC(Random accuracy) 0.21399 0.11111 0.04115
RACCU(Random accuracy unbiased) 0.21433 0.11111 0.0415
TN(True negative/correct rejection) 11 14 20
TNR(Specificity or true negative rate) 0.73333 0.77778 0.95238
TON(Test outcome negative) 14 18 22
TOP(Test outcome positive) 13 9 5
TP(True positive/hit) 9 5 4
TPR(Sensitivity, recall, hit rate, or true positive rate) 0.75 0.55556 0.66667
Y(Youden index) 0.48333 0.33333 0.61905
dInd(Distance index) 0.36553 0.4969 0.33672
sInd(Similarity index) 0.74153 0.64864 0.7619
<BLANKLINE>
>>> online_help(param=None)
Please choose one parameter :
<BLANKLINE>
Example : online_help("J") or online_help(2)
<BLANKLINE>
1-95% CI
2-ACC
3-AUC
4-AUCI
5-AUNP
6-AUNU
7-BM
8-Bennett S
9-CBA
10-CEN
11-Chi-Squared
12-Chi-Squared DF
13-Conditional Entropy
14-Cramer V
15-Cross Entropy
16-DOR
17-DP
18-DPI
19-ERR
20-F0.5
21-F1
22-F2
23-FDR
24-FN
25-FNR
26-FOR
27-FP
28-FPR
29-G
30-GI
31-Gwet AC1
32-Hamming Loss
33-IS
34-J
35-Joint Entropy
36-KL Divergence
37-Kappa
38-Kappa 95% CI
39-Kappa No Prevalence
40-Kappa Standard Error
41-Kappa Unbiased
42-LS
43-Lambda A
44-Lambda B
45-MCC
46-MCEN
47-MK
48-Mutual Information
49-N
50-NIR
51-NLR
52-NPV
53-Overall ACC
54-Overall CEN
55-Overall J
56-Overall MCC
57-Overall MCEN
58-Overall RACC
59-Overall RACCU
60-P
61-P-Value
62-PLR
63-PLRI
64-POP
65-PPV
66-PPV Macro
67-PPV Micro
68-PRE
69-Phi-Squared
70-RACC
71-RACCU
72-RCI
73-RR
74-Reference Entropy
75-Response Entropy
76-SOA1(Landis & Koch)
77-SOA2(Fleiss)
78-SOA3(Altman)
79-SOA4(Cicchetti)
80-Scott PI
81-Standard Error
82-TN
83-TNR
84-TON
85-TOP
86-TP
87-TPR
88-TPR Macro
89-TPR Micro
90-Y
91-Zero-one Loss
92-dInd
93-sInd
>>> online_help("J")
...
>>> online_help(4)
...
>>> y_actu = [2, 0, 2, 2, 0, 1, 1, 2, 2, 0, 1, 2]
>>> y_pred = [0, 0, 2, 1, 0, 2, 1, 0, 2, 0, 2, 2]
>>> cm = ConfusionMatrix(y_actu, y_pred, sample_weight=[2, 2, 2, 2, 3, 1, 1, 2, 2, 1, 1, 2])
>>> print(cm)
Predict 0 1 2
Actual
0 6 0 0
<BLANKLINE>
1 0 1 2
<BLANKLINE>
2 4 2 6
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
Overall Statistics :
<BLANKLINE>
95% CI (0.41134,0.82675)
AUNP 0.7
AUNU 0.70556
Bennett S 0.42857
CBA 0.47778
Chi-Squared 10.44167
Chi-Squared DF 4
Conditional Entropy 0.96498
Cramer V 0.49861
Cross Entropy 1.50249
Gwet AC1 0.45277
Hamming Loss 0.38095
Joint Entropy 2.34377
KL Divergence 0.1237
Kappa 0.3913
Kappa 95% CI (0.05943,0.72318)
Kappa No Prevalence 0.2381
Kappa Standard Error 0.16932
Kappa Unbiased 0.37313
Lambda A 0.22222
Lambda B 0.36364
Mutual Information 0.47618
NIR 0.57143
Overall ACC 0.61905
Overall CEN 0.43947
Overall J (1.22857,0.40952)
Overall MCC 0.41558
Overall MCEN 0.50059
Overall RACC 0.37415
Overall RACCU 0.39229
P-Value 0.41709
PPV Macro 0.56111
PPV Micro 0.61905
Phi-Squared 0.49722
RCI 0.34536
RR 7.0
Reference Entropy 1.37878
Response Entropy 1.44117
SOA1(Landis & Koch) Fair
SOA2(Fleiss) Poor
SOA3(Altman) Fair
SOA4(Cicchetti) Poor
Scott PI 0.37313
Standard Error 0.10597
TPR Macro 0.61111
TPR Micro 0.61905
Zero-one Loss 8
<BLANKLINE>
Class Statistics :
<BLANKLINE>
Classes 0 1 2
ACC(Accuracy) 0.80952 0.80952 0.61905
AUC(Area under the roc curve) 0.86667 0.61111 0.63889
AUCI(Auc value interpretation) Very Good Fair Fair
BM(Informedness or bookmaker informedness) 0.73333 0.22222 0.27778
CEN(Confusion entropy) 0.25 0.52832 0.56439
DOR(Diagnostic odds ratio) None 4.0 3.5
DP(Discriminant power) None 0.33193 0.29996
DPI(Discriminant power interpretation) None Poor Poor
ERR(Error rate) 0.19048 0.19048 0.38095
F0.5(F0.5 score) 0.65217 0.33333 0.68182
F1(F1 score - harmonic mean of precision and sensitivity) 0.75 0.33333 0.6
F2(F2 score) 0.88235 0.33333 0.53571
FDR(False discovery rate) 0.4 0.66667 0.25
FN(False negative/miss/type 2 error) 0 2 6
FNR(Miss rate or false negative rate) 0.0 0.66667 0.5
FOR(False omission rate) 0.0 0.11111 0.46154
FP(False positive/type 1 error/false alarm) 4 2 2
FPR(Fall-out or false positive rate) 0.26667 0.11111 0.22222
G(G-measure geometric mean of precision and sensitivity) 0.7746 0.33333 0.61237
GI(Gini index) 0.73333 0.22222 0.27778
IS(Information score) 1.07039 1.22239 0.39232
J(Jaccard index) 0.6 0.2 0.42857
LS(Lift score) 2.1 2.33333 1.3125
MCC(Matthews correlation coefficient) 0.66332 0.22222 0.28307
MCEN(Modified confusion entropy) 0.26439 0.52877 0.65924
MK(Markedness) 0.6 0.22222 0.28846
N(Condition negative) 15 18 9
NLR(Negative likelihood ratio) 0.0 0.75 0.64286
NPV(Negative predictive value) 1.0 0.88889 0.53846
P(Condition positive or support) 6 3 12
PLR(Positive likelihood ratio) 3.75 3.0 2.25
PLRI(Positive likelihood ratio interpretation) Poor Poor Poor
POP(Population) 21 21 21
PPV(Precision or positive predictive value) 0.6 0.33333 0.75
PRE(Prevalence) 0.28571 0.14286 0.57143
RACC(Random accuracy) 0.13605 0.02041 0.21769
RACCU(Random accuracy unbiased) 0.14512 0.02041 0.22676
TN(True negative/correct rejection) 11 16 7
TNR(Specificity or true negative rate) 0.73333 0.88889 0.77778
TON(Test outcome negative) 11 18 13
TOP(Test outcome positive) 10 3 8
TP(True positive/hit) 6 1 6
TPR(Sensitivity, recall, hit rate, or true positive rate) 1.0 0.33333 0.5
Y(Youden index) 0.73333 0.22222 0.27778
dInd(Distance index) 0.26667 0.67586 0.54716
sInd(Similarity index) 0.81144 0.52209 0.6131
<BLANKLINE>
>>> save_obj=cm.save_obj("test3",address=False)
>>> save_obj=={'Status': True, 'Message': None}
True
>>> cm_file_3=ConfusionMatrix(file=open("test3.obj","r"))
>>> cm_file_3.print_matrix()
Predict 0 1 2
Actual
0 6 0 0
1 0 1 2
2 4 2 6
<BLANKLINE>
>>> cm_file_3.stat()
Overall Statistics :
<BLANKLINE>
95% CI (0.41134,0.82675)
AUNP 0.7
AUNU 0.70556
Bennett S 0.42857
CBA 0.47778
Chi-Squared 10.44167
Chi-Squared DF 4
Conditional Entropy 0.96498
Cramer V 0.49861
Cross Entropy 1.50249
Gwet AC1 0.45277
Hamming Loss 0.38095
Joint Entropy 2.34377
KL Divergence 0.1237
Kappa 0.3913
Kappa 95% CI (0.05943,0.72318)
Kappa No Prevalence 0.2381
Kappa Standard Error 0.16932
Kappa Unbiased 0.37313
Lambda A 0.22222
Lambda B 0.36364
Mutual Information 0.47618
NIR 0.57143
Overall ACC 0.61905
Overall CEN 0.43947
Overall J (1.22857,0.40952)
Overall MCC 0.41558
Overall MCEN 0.50059
Overall RACC 0.37415
Overall RACCU 0.39229
P-Value 0.41709
PPV Macro 0.56111
PPV Micro 0.61905
Phi-Squared 0.49722
RCI 0.34536
RR 7.0
Reference Entropy 1.37878
Response Entropy 1.44117
SOA1(Landis & Koch) Fair
SOA2(Fleiss) Poor
SOA3(Altman) Fair
SOA4(Cicchetti) Poor
Scott PI 0.37313
Standard Error 0.10597
TPR Macro 0.61111
TPR Micro 0.61905
Zero-one Loss 8
<BLANKLINE>
Class Statistics :
<BLANKLINE>
Classes 0 1 2
ACC(Accuracy) 0.80952 0.80952 0.61905
AUC(Area under the roc curve) 0.86667 0.61111 0.63889
AUCI(Auc value interpretation) Very Good Fair Fair
BM(Informedness or bookmaker informedness) 0.73333 0.22222 0.27778
CEN(Confusion entropy) 0.25 0.52832 0.56439
DOR(Diagnostic odds ratio) None 4.0 3.5
DP(Discriminant power) None 0.33193 0.29996
DPI(Discriminant power interpretation) None Poor Poor
ERR(Error rate) 0.19048 0.19048 0.38095
F0.5(F0.5 score) 0.65217 0.33333 0.68182
F1(F1 score - harmonic mean of precision and sensitivity) 0.75 0.33333 0.6
F2(F2 score) 0.88235 0.33333 0.53571
FDR(False discovery rate) 0.4 0.66667 0.25
FN(False negative/miss/type 2 error) 0 2 6
FNR(Miss rate or false negative rate) 0.0 0.66667 0.5
FOR(False omission rate) 0.0 0.11111 0.46154
FP(False positive/type 1 error/false alarm) 4 2 2
FPR(Fall-out or false positive rate) 0.26667 0.11111 0.22222
G(G-measure geometric mean of precision and sensitivity) 0.7746 0.33333 0.61237
GI(Gini index) 0.73333 0.22222 0.27778
IS(Information score) 1.07039 1.22239 0.39232
J(Jaccard index) 0.6 0.2 0.42857
LS(Lift score) 2.1 2.33333 1.3125
MCC(Matthews correlation coefficient) 0.66332 0.22222 0.28307
MCEN(Modified confusion entropy) 0.26439 0.52877 0.65924
MK(Markedness) 0.6 0.22222 0.28846
N(Condition negative) 15 18 9
NLR(Negative likelihood ratio) 0.0 0.75 0.64286
NPV(Negative predictive value) 1.0 0.88889 0.53846
P(Condition positive or support) 6 3 12
PLR(Positive likelihood ratio) 3.75 3.0 2.25
PLRI(Positive likelihood ratio interpretation) Poor Poor Poor
POP(Population) 21 21 21
PPV(Precision or positive predictive value) 0.6 0.33333 0.75
PRE(Prevalence) 0.28571 0.14286 0.57143
RACC(Random accuracy) 0.13605 0.02041 0.21769
RACCU(Random accuracy unbiased) 0.14512 0.02041 0.22676
TN(True negative/correct rejection) 11 16 7
TNR(Specificity or true negative rate) 0.73333 0.88889 0.77778
TON(Test outcome negative) 11 18 13
TOP(Test outcome positive) 10 3 8
TP(True positive/hit) 6 1 6
TPR(Sensitivity, recall, hit rate, or true positive rate) 1.0 0.33333 0.5
Y(Youden index) 0.73333 0.22222 0.27778
dInd(Distance index) 0.26667 0.67586 0.54716
sInd(Similarity index) 0.81144 0.52209 0.6131
>>> NIR_calc({'Class2': 804, 'Class1': 196},1000) # Verified Case
0.804
>>> cm = ConfusionMatrix(matrix={0:{0:3,1:1},1:{0:4,1:2}}) # Verified Case
>>> cm.LS[1]
1.1111111111111112
>>> cm.LS[0]
1.0714285714285714
>>> cm = ConfusionMatrix(matrix={"Class1":{"Class1":183,"Class2":13},"Class2":{"Class1":141,"Class2":663}}) # Verified Case
>>> cm.PValue
0.000342386296143693
>>> cm = ConfusionMatrix(matrix={"Class1":{"Class1":4,"Class2":2},"Class2":{"Class1":2,"Class2":4}}) # Verified Case
>>> cm.Overall_CEN
0.861654166907052
>>> cm.Overall_MCEN
0.6666666666666666
>>> cm.IS["Class1"]
0.4150374992788437
>>> cm.IS["Class2"]
0.4150374992788437
>>> cm = ConfusionMatrix(matrix={1:{1:5,2:0,3:0},2:{1:0,2:10,3:0},3:{1:0,2:300,3:0}}) # Verified Case
>>> cm.Overall_CEN
0.022168905807495587
>>> cm.Overall_MCC
0.3012440235352457
>>> cm.CBA
0.3440860215053763
>>> cm = ConfusionMatrix(matrix={1:{1:1,2:3,3:0,4:0},2:{1:9,2:1,3:0,4:0},3:{1:0,2:0,3:100,4:0},4:{1:0,2:0,3:0,4:200}}) # Verified Case
>>> cm.RCI
0.9785616782831341
>>> cm = ConfusionMatrix(matrix={1:{1:1,2:0,3:3},2:{1:0,2:100,3:0},3:{1:0,2:0,3:200}}) # Verified Case
>>> cm.RCI
0.9264007150415143
>>> cm = ConfusionMatrix(matrix={1:{1:5,2:0,3:0},2:{1:0,2:10,3:0},3:{1:0,2:300,3:0}})
>>> cm.RCI
0.3675708571923818
>>> cm = ConfusionMatrix(matrix={1:{1:12806,2:26332},2:{1:5484,2:299777}},transpose=True) # Verified Case
>>> cm.AUC[1]
0.8097090079101759
>>> cm.GI[1]
0.6194180158203517
>>> cm.Overall_ACC
0.9076187793808925
>>> cm.DP[1]
0.7854399677022138
>>> cm.Y[1]
0.6194180158203517
>>> cm.BM[1]
0.6194180158203517
>>> cm = ConfusionMatrix(matrix={1:{1:13182,2:30516},2:{1:5108,2:295593}},transpose=True) # Verified Case
>>> cm.AUC[1]
0.8135728157964055
>>> cm.GI[1]
0.627145631592811
>>> cm.Overall_ACC
0.896561836706843
>>> cm.DP[1]
0.770700985610517
>>> cm.Y[1]
0.627145631592811
>>> cm.BM[1]
0.627145631592811
>>> save_obj = cm.save_obj("test4",address=False)
>>> save_obj=={'Status': True, 'Message': None}
True
>>> cm_file=ConfusionMatrix(file=open("test4.obj","r"))
>>> cm_file.DP[1]
0.770700985610517
>>> cm_file.Y[1]
0.627145631592811
>>> cm_file.BM[1]
0.627145631592811
>>> cm_file.transpose
True
>>> json.dump({"Actual-Vector": None, "Digit": 5, "Predict-Vector": None, "Matrix": {"0": {"0": 3, "1": 0, "2": 2}, "1": {"0": 0, "1": 1, "2": 1}, "2": {"0": 0, "1": 2, "2": 3}}, "Transpose": True,"Sample-Weight": None},open("test5.obj","w"))
>>> cm_file=ConfusionMatrix(file=open("test5.obj","r"))
>>> cm_file.transpose
True
>>> cm_file.matrix == {"0": {"0": 3, "1": 0, "2": 2}, "1": {"0": 0, "1": 1, "2": 1}, "2": {"0": 0, "1": 2, "2": 3}}
True
>>> cm = ConfusionMatrix([1,2,3,4],[1,2,3,"4"])
>>> cm
pycm.ConfusionMatrix(classes: ['1', '2', '3', '4'])
>>> os.remove("test.csv")
>>> os.remove("test.obj")
>>> os.remove("test.html")
>>> os.remove("test_filtered.html")
>>> os.remove("test_filtered.csv")
>>> os.remove("test_filtered.pycm")
>>> os.remove("test_filtered2.html")
>>> os.remove("test_filtered3.html")
>>> os.remove("test_filtered4.html")
>>> os.remove("test_filtered5.html")
>>> os.remove("test_colored.html")
>>> os.remove("test_filtered2.csv")
>>> os.remove("test_filtered3.csv")
>>> os.remove("test_filtered4.csv")
>>> os.remove("test_filtered2.pycm")
>>> os.remove("test_filtered3.pycm")
>>> os.remove("test2.obj")
>>> os.remove("test3.obj")
>>> os.remove("test4.obj")
>>> os.remove("test5.obj")
>>> os.remove("test.pycm")
'''
|
"""
>>> from pycm import *
>>> import os
>>> import json
>>> y_actu = [2, 0, 2, 2, 0, 1, 1, 2, 2, 0, 1, 2]
>>> y_pred = [0, 0, 2, 1, 0, 2, 1, 0, 2, 0, 2, 2]
>>> cm = ConfusionMatrix(y_actu, y_pred)
>>> cm
pycm.ConfusionMatrix(classes: [0, 1, 2])
>>> len(cm)
3
>>> print(cm)
Predict 0 1 2
Actual
0 3 0 0
<BLANKLINE>
1 0 1 2
<BLANKLINE>
2 2 1 3
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
Overall Statistics :
<BLANKLINE>
95% CI (0.30439,0.86228)
AUNP 0.66667
AUNU 0.69444
Bennett S 0.375
CBA 0.47778
Chi-Squared 6.6
Chi-Squared DF 4
Conditional Entropy 0.95915
Cramer V 0.5244
Cross Entropy 1.59352
Gwet AC1 0.38931
Hamming Loss 0.41667
Joint Entropy 2.45915
KL Divergence 0.09352
Kappa 0.35484
Kappa 95% CI (-0.07708,0.78675)
Kappa No Prevalence 0.16667
Kappa Standard Error 0.22036
Kappa Unbiased 0.34426
Lambda A 0.16667
Lambda B 0.42857
Mutual Information 0.52421
NIR 0.5
Overall ACC 0.58333
Overall CEN 0.46381
Overall J (1.225,0.40833)
Overall MCC 0.36667
Overall MCEN 0.51894
Overall RACC 0.35417
Overall RACCU 0.36458
P-Value 0.38721
PPV Macro 0.56667
PPV Micro 0.58333
Phi-Squared 0.55
RCI 0.34947
RR 4.0
Reference Entropy 1.5
Response Entropy 1.48336
SOA1(Landis & Koch) Fair
SOA2(Fleiss) Poor
SOA3(Altman) Fair
SOA4(Cicchetti) Poor
Scott PI 0.34426
Standard Error 0.14232
TPR Macro 0.61111
TPR Micro 0.58333
Zero-one Loss 5
<BLANKLINE>
Class Statistics :
<BLANKLINE>
Classes 0 1 2
ACC(Accuracy) 0.83333 0.75 0.58333
AUC(Area under the roc curve) 0.88889 0.61111 0.58333
AUCI(Auc value interpretation) Very Good Fair Poor
BM(Informedness or bookmaker informedness) 0.77778 0.22222 0.16667
CEN(Confusion entropy) 0.25 0.49658 0.60442
DOR(Diagnostic odds ratio) None 4.0 2.0
DP(Discriminant power) None 0.33193 0.16597
DPI(Discriminant power interpretation) None Poor Poor
ERR(Error rate) 0.16667 0.25 0.41667
F0.5(F0.5 score) 0.65217 0.45455 0.57692
F1(F1 score - harmonic mean of precision and sensitivity) 0.75 0.4 0.54545
F2(F2 score) 0.88235 0.35714 0.51724
FDR(False discovery rate) 0.4 0.5 0.4
FN(False negative/miss/type 2 error) 0 2 3
FNR(Miss rate or false negative rate) 0.0 0.66667 0.5
FOR(False omission rate) 0.0 0.2 0.42857
FP(False positive/type 1 error/false alarm) 2 1 2
FPR(Fall-out or false positive rate) 0.22222 0.11111 0.33333
G(G-measure geometric mean of precision and sensitivity) 0.7746 0.40825 0.54772
GI(Gini index) 0.77778 0.22222 0.16667
IS(Information score) 1.26303 1.0 0.26303
J(Jaccard index) 0.6 0.25 0.375
LS(Lift score) 2.4 2.0 1.2
MCC(Matthews correlation coefficient) 0.68313 0.2582 0.16903
MCEN(Modified confusion entropy) 0.26439 0.5 0.6875
MK(Markedness) 0.6 0.3 0.17143
N(Condition negative) 9 9 6
NLR(Negative likelihood ratio) 0.0 0.75 0.75
NPV(Negative predictive value) 1.0 0.8 0.57143
P(Condition positive or support) 3 3 6
PLR(Positive likelihood ratio) 4.5 3.0 1.5
PLRI(Positive likelihood ratio interpretation) Poor Poor Poor
POP(Population) 12 12 12
PPV(Precision or positive predictive value) 0.6 0.5 0.6
PRE(Prevalence) 0.25 0.25 0.5
RACC(Random accuracy) 0.10417 0.04167 0.20833
RACCU(Random accuracy unbiased) 0.11111 0.0434 0.21007
TN(True negative/correct rejection) 7 8 4
TNR(Specificity or true negative rate) 0.77778 0.88889 0.66667
TON(Test outcome negative) 7 10 7
TOP(Test outcome positive) 5 2 5
TP(True positive/hit) 3 1 3
TPR(Sensitivity, recall, hit rate, or true positive rate) 1.0 0.33333 0.5
Y(Youden index) 0.77778 0.22222 0.16667
dInd(Distance index) 0.22222 0.67586 0.60093
sInd(Similarity index) 0.84287 0.52209 0.57508
<BLANKLINE>
>>> cm.relabel({0:"L1",1:"L2",2:"L3"})
>>> print(cm)
Predict L1 L2 L3
Actual
L1 3 0 0
<BLANKLINE>
L2 0 1 2
<BLANKLINE>
L3 2 1 3
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
Overall Statistics :
<BLANKLINE>
95% CI (0.30439,0.86228)
AUNP 0.66667
AUNU 0.69444
Bennett S 0.375
CBA 0.47778
Chi-Squared 6.6
Chi-Squared DF 4
Conditional Entropy 0.95915
Cramer V 0.5244
Cross Entropy 1.59352
Gwet AC1 0.38931
Hamming Loss 0.41667
Joint Entropy 2.45915
KL Divergence 0.09352
Kappa 0.35484
Kappa 95% CI (-0.07708,0.78675)
Kappa No Prevalence 0.16667
Kappa Standard Error 0.22036
Kappa Unbiased 0.34426
Lambda A 0.16667
Lambda B 0.42857
Mutual Information 0.52421
NIR 0.5
Overall ACC 0.58333
Overall CEN 0.46381
Overall J (1.225,0.40833)
Overall MCC 0.36667
Overall MCEN 0.51894
Overall RACC 0.35417
Overall RACCU 0.36458
P-Value 0.38721
PPV Macro 0.56667
PPV Micro 0.58333
Phi-Squared 0.55
RCI 0.34947
RR 4.0
Reference Entropy 1.5
Response Entropy 1.48336
SOA1(Landis & Koch) Fair
SOA2(Fleiss) Poor
SOA3(Altman) Fair
SOA4(Cicchetti) Poor
Scott PI 0.34426
Standard Error 0.14232
TPR Macro 0.61111
TPR Micro 0.58333
Zero-one Loss 5
<BLANKLINE>
Class Statistics :
<BLANKLINE>
Classes L1 L2 L3
ACC(Accuracy) 0.83333 0.75 0.58333
AUC(Area under the roc curve) 0.88889 0.61111 0.58333
AUCI(Auc value interpretation) Very Good Fair Poor
BM(Informedness or bookmaker informedness) 0.77778 0.22222 0.16667
CEN(Confusion entropy) 0.25 0.49658 0.60442
DOR(Diagnostic odds ratio) None 4.0 2.0
DP(Discriminant power) None 0.33193 0.16597
DPI(Discriminant power interpretation) None Poor Poor
ERR(Error rate) 0.16667 0.25 0.41667
F0.5(F0.5 score) 0.65217 0.45455 0.57692
F1(F1 score - harmonic mean of precision and sensitivity) 0.75 0.4 0.54545
F2(F2 score) 0.88235 0.35714 0.51724
FDR(False discovery rate) 0.4 0.5 0.4
FN(False negative/miss/type 2 error) 0 2 3
FNR(Miss rate or false negative rate) 0.0 0.66667 0.5
FOR(False omission rate) 0.0 0.2 0.42857
FP(False positive/type 1 error/false alarm) 2 1 2
FPR(Fall-out or false positive rate) 0.22222 0.11111 0.33333
G(G-measure geometric mean of precision and sensitivity) 0.7746 0.40825 0.54772
GI(Gini index) 0.77778 0.22222 0.16667
IS(Information score) 1.26303 1.0 0.26303
J(Jaccard index) 0.6 0.25 0.375
LS(Lift score) 2.4 2.0 1.2
MCC(Matthews correlation coefficient) 0.68313 0.2582 0.16903
MCEN(Modified confusion entropy) 0.26439 0.5 0.6875
MK(Markedness) 0.6 0.3 0.17143
N(Condition negative) 9 9 6
NLR(Negative likelihood ratio) 0.0 0.75 0.75
NPV(Negative predictive value) 1.0 0.8 0.57143
P(Condition positive or support) 3 3 6
PLR(Positive likelihood ratio) 4.5 3.0 1.5
PLRI(Positive likelihood ratio interpretation) Poor Poor Poor
POP(Population) 12 12 12
PPV(Precision or positive predictive value) 0.6 0.5 0.6
PRE(Prevalence) 0.25 0.25 0.5
RACC(Random accuracy) 0.10417 0.04167 0.20833
RACCU(Random accuracy unbiased) 0.11111 0.0434 0.21007
TN(True negative/correct rejection) 7 8 4
TNR(Specificity or true negative rate) 0.77778 0.88889 0.66667
TON(Test outcome negative) 7 10 7
TOP(Test outcome positive) 5 2 5
TP(True positive/hit) 3 1 3
TPR(Sensitivity, recall, hit rate, or true positive rate) 1.0 0.33333 0.5
Y(Youden index) 0.77778 0.22222 0.16667
dInd(Distance index) 0.22222 0.67586 0.60093
sInd(Similarity index) 0.84287 0.52209 0.57508
<BLANKLINE>
>>> cm.Y["L2"]
0.2222222222222221
>>> cm_2 = ConfusionMatrix(y_actu, 2)
Traceback (most recent call last):
...
pycm.pycm_obj.pycmVectorError: The type of input vectors is assumed to be a list or a NumPy array
>>> cm_3 = ConfusionMatrix(y_actu, [1,2])
Traceback (most recent call last):
...
pycm.pycm_obj.pycmVectorError: Input vectors must have same length
>>> cm_4 = ConfusionMatrix([], [])
Traceback (most recent call last):
...
pycm.pycm_obj.pycmVectorError: Input vectors are empty
>>> cm_5 = ConfusionMatrix([1,1,1,], [1,1,1,1])
Traceback (most recent call last):
...
pycm.pycm_obj.pycmVectorError: Input vectors must have same length
>>> pycm_help()
<BLANKLINE>
PyCM is a multi-class confusion matrix library written in Python that
supports both input data vectors and direct matrix, and a proper tool for
post-classification model evaluation that supports most classes and overall
statistics parameters.
PyCM is the swiss-army knife of confusion matrices, targeted mainly at
data scientists that need a broad array of metrics for predictive models
and an accurate evaluation of large variety of classifiers.
<BLANKLINE>
Repo : https://github.com/sepandhaghighi/pycm
Webpage : http://www.pycm.ir
<BLANKLINE>
<BLANKLINE>
>>> RCI_calc(24,0)
'None'
>>> CBA_calc([1,2], {1:{1:0,2:0},2:{1:0,2:0}}, {1:0,2:0}, {1:0,2:0})
'None'
>>> RR_calc([], {1:0,2:0})
'None'
>>> overall_MCC_calc([1,2], {1:{1:0,2:0},2:{1:0,2:0}}, {1:0,2:0}, {1:0,2:0})
'None'
>>> CEN_misclassification_calc({1:{1:0,2:0},2:{1:0,2:0}},{1:0,2:0},{1:0,2:0},1,1,2)
'None'
>>> vector_check([1,2,3,0.4])
False
>>> vector_check([1,2,3,-2])
False
>>> matrix_check({1:{1:0.5,2:0},2:{1:0,2:0}})
False
>>> matrix_check([])
False
>>> TTPN_calc(0,0)
'None'
>>> TTPN_calc(1,4)
0.2
>>> FXR_calc(None)
'None'
>>> FXR_calc(0.2)
0.8
>>> ACC_calc(0,0,0,0)
'None'
>>> ACC_calc(1,1,3,4)
0.2222222222222222
>>> MCC_calc(0,2,0,2)
'None'
>>> MCC_calc(1,2,3,4)
-0.408248290463863
>>> LR_calc(1,2)
0.5
>>> LR_calc(1,0)
'None'
>>> MK_BM_calc(2,"None")
'None'
>>> MK_BM_calc(1,2)
2
>>> PRE_calc(None,2)
'None'
>>> PRE_calc(1,5)
0.2
>>> PRE_calc(1,0)
'None'
>>> G_calc(None,2)
'None'
>>> G_calc(1,2)
1.4142135623730951
>>> RACC_calc(2,3,4)
0.375
>>> reliability_calc(1,None)
'None'
>>> reliability_calc(2,0.3)
1.7
>>> micro_calc({1:2,2:3},{1:1,2:4})
0.5
>>> micro_calc({1:2,2:3},None)
'None'
>>> macro_calc(None)
'None'
>>> macro_calc({1:2,2:3})
2.5
>>> F_calc(TP=0,FP=0,FN=0,Beta=1)
'None'
>>> F_calc(TP=3,FP=2,FN=1,Beta=5)
0.7428571428571429
>>> save_stat=cm.save_stat("test",address=False)
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_stat("test_filtered",address=False,overall_param=["Kappa","Scott PI"],class_param=["TPR","TNR","ACC","AUC"])
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_stat("test_filtered2",address=False,overall_param=["Kappa","Scott PI"],class_param=["TPR","TNR","ACC","AUC"],class_name=["L1","L2"])
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_stat("test_filtered3",address=False,overall_param=["Kappa","Scott PI"],class_param=["TPR","TNR","ACC","AUC"],class_name=[])
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_stat("/asdasd,qweqwe.eo/",address=True)
>>> save_stat=={'Status': False, 'Message': "[Errno 2] No such file or directory: '/asdasd,qweqwe.eo/.pycm'"}
True
>>> ERR_calc(None)
'None'
>>> ERR_calc(0.1)
0.9
>>> cm.F_beta(4)["L1"]
0.9622641509433962
>>> cm.F_beta(4)["L2"]
0.34
>>> cm.F_beta(4)["L3"]
0.504950495049505
>>> import numpy as np
>>> y_test = np.array([600, 200, 200, 200, 200, 200, 200, 200, 500, 500, 500, 200, 200, 200, 200, 200, 200, 200, 200, 200])
>>> y_pred = np.array([100, 200, 200, 100, 100, 200, 200, 200, 100, 200, 500, 100, 100, 100, 100, 100, 100, 100, 500, 200])
>>> cm=ConfusionMatrix(y_test, y_pred)
>>> print(cm)
Predict 100 200 500 600
Actual
100 0 0 0 0
<BLANKLINE>
200 9 6 1 0
<BLANKLINE>
500 1 1 1 0
<BLANKLINE>
600 1 0 0 0
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
Overall Statistics :
<BLANKLINE>
95% CI (0.14096,0.55904)
AUNP None
AUNU None
Bennett S 0.13333
CBA 0.17708
Chi-Squared None
Chi-Squared DF 9
Conditional Entropy 1.23579
Cramer V None
Cross Entropy 1.70995
Gwet AC1 0.19505
Hamming Loss 0.65
Joint Entropy 2.11997
KL Divergence None
Kappa 0.07801
Kappa 95% CI (-0.2185,0.37453)
Kappa No Prevalence -0.3
Kappa Standard Error 0.15128
Kappa Unbiased -0.12554
Lambda A 0.0
Lambda B 0.0
Mutual Information 0.10088
NIR 0.8
Overall ACC 0.35
Overall CEN 0.3648
Overall J (0.60294,0.15074)
Overall MCC 0.12642
Overall MCEN 0.37463
Overall RACC 0.295
Overall RACCU 0.4225
P-Value 1.0
PPV Macro None
PPV Micro 0.35
Phi-Squared None
RCI 0.11409
RR 5.0
Reference Entropy 0.88418
Response Entropy 1.33667
SOA1(Landis & Koch) Slight
SOA2(Fleiss) Poor
SOA3(Altman) Poor
SOA4(Cicchetti) Poor
Scott PI -0.12554
Standard Error 0.10665
TPR Macro None
TPR Micro 0.35
Zero-one Loss 13
<BLANKLINE>
Class Statistics :
<BLANKLINE>
Classes 100 200 500 600
ACC(Accuracy) 0.45 0.45 0.85 0.95
AUC(Area under the roc curve) None 0.5625 0.63725 0.5
AUCI(Auc value interpretation) None Poor Fair Poor
BM(Informedness or bookmaker informedness) None 0.125 0.27451 0.0
CEN(Confusion entropy) 0.33496 0.35708 0.53895 0.0
DOR(Diagnostic odds ratio) None 1.8 8.0 None
DP(Discriminant power) None 0.14074 0.4979 None
DPI(Discriminant power interpretation) None Poor Poor None
ERR(Error rate) 0.55 0.55 0.15 0.05
F0.5(F0.5 score) 0.0 0.68182 0.45455 0.0
F1(F1 score - harmonic mean of precision and sensitivity) 0.0 0.52174 0.4 0.0
F2(F2 score) 0.0 0.42254 0.35714 0.0
FDR(False discovery rate) 1.0 0.14286 0.5 None
FN(False negative/miss/type 2 error) 0 10 2 1
FNR(Miss rate or false negative rate) None 0.625 0.66667 1.0
FOR(False omission rate) 0.0 0.76923 0.11111 0.05
FP(False positive/type 1 error/false alarm) 11 1 1 0
FPR(Fall-out or false positive rate) 0.55 0.25 0.05882 0.0
G(G-measure geometric mean of precision and sensitivity) None 0.56695 0.40825 None
GI(Gini index) None 0.125 0.27451 0.0
IS(Information score) None 0.09954 1.73697 None
J(Jaccard index) 0.0 0.35294 0.25 0.0
LS(Lift score) None 1.07143 3.33333 None
MCC(Matthews correlation coefficient) None 0.10483 0.32673 None
MCEN(Modified confusion entropy) 0.33496 0.37394 0.58028 0.0
MK(Markedness) 0.0 0.08791 0.38889 None
N(Condition negative) 20 4 17 19
NLR(Negative likelihood ratio) None 0.83333 0.70833 1.0
NPV(Negative predictive value) 1.0 0.23077 0.88889 0.95
P(Condition positive or support) 0 16 3 1
PLR(Positive likelihood ratio) None 1.5 5.66667 None
PLRI(Positive likelihood ratio interpretation) None Poor Fair None
POP(Population) 20 20 20 20
PPV(Precision or positive predictive value) 0.0 0.85714 0.5 None
PRE(Prevalence) 0.0 0.8 0.15 0.05
RACC(Random accuracy) 0.0 0.28 0.015 0.0
RACCU(Random accuracy unbiased) 0.07563 0.33062 0.01562 0.00063
TN(True negative/correct rejection) 9 3 16 19
TNR(Specificity or true negative rate) 0.45 0.75 0.94118 1.0
TON(Test outcome negative) 9 13 18 20
TOP(Test outcome positive) 11 7 2 0
TP(True positive/hit) 0 6 1 0
TPR(Sensitivity, recall, hit rate, or true positive rate) None 0.375 0.33333 0.0
Y(Youden index) None 0.125 0.27451 0.0
dInd(Distance index) None 0.67315 0.66926 1.0
sInd(Similarity index) None 0.52401 0.52676 0.29289
<BLANKLINE>
>>> cm.stat()
Overall Statistics :
<BLANKLINE>
95% CI (0.14096,0.55904)
AUNP None
AUNU None
Bennett S 0.13333
CBA 0.17708
Chi-Squared None
Chi-Squared DF 9
Conditional Entropy 1.23579
Cramer V None
Cross Entropy 1.70995
Gwet AC1 0.19505
Hamming Loss 0.65
Joint Entropy 2.11997
KL Divergence None
Kappa 0.07801
Kappa 95% CI (-0.2185,0.37453)
Kappa No Prevalence -0.3
Kappa Standard Error 0.15128
Kappa Unbiased -0.12554
Lambda A 0.0
Lambda B 0.0
Mutual Information 0.10088
NIR 0.8
Overall ACC 0.35
Overall CEN 0.3648
Overall J (0.60294,0.15074)
Overall MCC 0.12642
Overall MCEN 0.37463
Overall RACC 0.295
Overall RACCU 0.4225
P-Value 1.0
PPV Macro None
PPV Micro 0.35
Phi-Squared None
RCI 0.11409
RR 5.0
Reference Entropy 0.88418
Response Entropy 1.33667
SOA1(Landis & Koch) Slight
SOA2(Fleiss) Poor
SOA3(Altman) Poor
SOA4(Cicchetti) Poor
Scott PI -0.12554
Standard Error 0.10665
TPR Macro None
TPR Micro 0.35
Zero-one Loss 13
<BLANKLINE>
Class Statistics :
<BLANKLINE>
Classes 100 200 500 600
ACC(Accuracy) 0.45 0.45 0.85 0.95
AUC(Area under the roc curve) None 0.5625 0.63725 0.5
AUCI(Auc value interpretation) None Poor Fair Poor
BM(Informedness or bookmaker informedness) None 0.125 0.27451 0.0
CEN(Confusion entropy) 0.33496 0.35708 0.53895 0.0
DOR(Diagnostic odds ratio) None 1.8 8.0 None
DP(Discriminant power) None 0.14074 0.4979 None
DPI(Discriminant power interpretation) None Poor Poor None
ERR(Error rate) 0.55 0.55 0.15 0.05
F0.5(F0.5 score) 0.0 0.68182 0.45455 0.0
F1(F1 score - harmonic mean of precision and sensitivity) 0.0 0.52174 0.4 0.0
F2(F2 score) 0.0 0.42254 0.35714 0.0
FDR(False discovery rate) 1.0 0.14286 0.5 None
FN(False negative/miss/type 2 error) 0 10 2 1
FNR(Miss rate or false negative rate) None 0.625 0.66667 1.0
FOR(False omission rate) 0.0 0.76923 0.11111 0.05
FP(False positive/type 1 error/false alarm) 11 1 1 0
FPR(Fall-out or false positive rate) 0.55 0.25 0.05882 0.0
G(G-measure geometric mean of precision and sensitivity) None 0.56695 0.40825 None
GI(Gini index) None 0.125 0.27451 0.0
IS(Information score) None 0.09954 1.73697 None
J(Jaccard index) 0.0 0.35294 0.25 0.0
LS(Lift score) None 1.07143 3.33333 None
MCC(Matthews correlation coefficient) None 0.10483 0.32673 None
MCEN(Modified confusion entropy) 0.33496 0.37394 0.58028 0.0
MK(Markedness) 0.0 0.08791 0.38889 None
N(Condition negative) 20 4 17 19
NLR(Negative likelihood ratio) None 0.83333 0.70833 1.0
NPV(Negative predictive value) 1.0 0.23077 0.88889 0.95
P(Condition positive or support) 0 16 3 1
PLR(Positive likelihood ratio) None 1.5 5.66667 None
PLRI(Positive likelihood ratio interpretation) None Poor Fair None
POP(Population) 20 20 20 20
PPV(Precision or positive predictive value) 0.0 0.85714 0.5 None
PRE(Prevalence) 0.0 0.8 0.15 0.05
RACC(Random accuracy) 0.0 0.28 0.015 0.0
RACCU(Random accuracy unbiased) 0.07563 0.33062 0.01562 0.00063
TN(True negative/correct rejection) 9 3 16 19
TNR(Specificity or true negative rate) 0.45 0.75 0.94118 1.0
TON(Test outcome negative) 9 13 18 20
TOP(Test outcome positive) 11 7 2 0
TP(True positive/hit) 0 6 1 0
TPR(Sensitivity, recall, hit rate, or true positive rate) None 0.375 0.33333 0.0
Y(Youden index) None 0.125 0.27451 0.0
dInd(Distance index) None 0.67315 0.66926 1.0
sInd(Similarity index) None 0.52401 0.52676 0.29289
<BLANKLINE>
>>> cm.stat(overall_param=["Kappa","Scott PI"],class_param=["TPR","TNR","ACC","AUC"])
Overall Statistics :
<BLANKLINE>
Kappa 0.07801
Scott PI -0.12554
<BLANKLINE>
Class Statistics :
<BLANKLINE>
Classes 100 200 500 600
ACC(Accuracy) 0.45 0.45 0.85 0.95
AUC(Area under the roc curve) None 0.5625 0.63725 0.5
TNR(Specificity or true negative rate) 0.45 0.75 0.94118 1.0
TPR(Sensitivity, recall, hit rate, or true positive rate) None 0.375 0.33333 0.0
<BLANKLINE>
>>> cm.stat(overall_param=["Kappa","Scott PI"],class_param=["TPR","TNR","ACC","AUC"],class_name=[100])
Overall Statistics :
<BLANKLINE>
Kappa 0.07801
Scott PI -0.12554
<BLANKLINE>
Class Statistics :
<BLANKLINE>
Classes 100
ACC(Accuracy) 0.45
AUC(Area under the roc curve) None
TNR(Specificity or true negative rate) 0.45
TPR(Sensitivity, recall, hit rate, or true positive rate) None
<BLANKLINE>
>>> cm.stat(overall_param=["Kappa","Scott PI"],class_param=["TPR","TNR","ACC","AUC"],class_name=[])
Overall Statistics :
<BLANKLINE>
Kappa 0.07801
Scott PI -0.12554
<BLANKLINE>
>>> cm.stat(overall_param=["Kappa","Scott PI"],class_param=[],class_name=[100])
Overall Statistics :
<BLANKLINE>
Kappa 0.07801
Scott PI -0.12554
<BLANKLINE>
>>> cm.stat(overall_param=["Kappa","Scott PI"],class_param=["TPR"],class_name=[100])
Overall Statistics :
<BLANKLINE>
Kappa 0.07801
Scott PI -0.12554
<BLANKLINE>
Class Statistics :
<BLANKLINE>
Classes 100
TPR(Sensitivity, recall, hit rate, or true positive rate) None
<BLANKLINE>
>>> cm.stat(overall_param=[],class_param=["TPR"],class_name=[100])
<BLANKLINE>
Class Statistics :
<BLANKLINE>
Classes 100
TPR(Sensitivity, recall, hit rate, or true positive rate) None
<BLANKLINE>
>>> cm.print_normalized_matrix()
Predict 100 200 500 600
Actual
100 0.0 0.0 0.0 0.0
200 0.5625 0.375 0.0625 0.0
500 0.33333 0.33333 0.33333 0.0
600 1.0 0.0 0.0 0.0
<BLANKLINE>
>>> cm.print_matrix()
Predict 100 200 500 600
Actual
100 0 0 0 0
200 9 6 1 0
500 1 1 1 0
600 1 0 0 0
<BLANKLINE>
>>> cm.print_matrix(one_vs_all=True,class_name=200)
Predict 200 ~
Actual
200 6 10
~ 1 3
<BLANKLINE>
>>> cm.print_normalized_matrix(one_vs_all=True,class_name=200)
Predict 200 ~
Actual
200 0.375 0.625
~ 0.25 0.75
<BLANKLINE>
>>> kappa_analysis_koch(-0.1)
'Poor'
>>> kappa_analysis_koch(0)
'Slight'
>>> kappa_analysis_koch(0.2)
'Fair'
>>> kappa_analysis_koch(0.4)
'Moderate'
>>> kappa_analysis_koch(0.6)
'Substantial'
>>> kappa_analysis_koch(0.8)
'Almost Perfect'
>>> kappa_analysis_koch(1.2)
'None'
>>> kappa_analysis_fleiss(0.4)
'Intermediate to Good'
>>> kappa_analysis_fleiss(0.75)
'Excellent'
>>> kappa_analysis_fleiss(1.2)
'Excellent'
>>> kappa_analysis_altman(-0.2)
'Poor'
>>> kappa_analysis_altman(0.2)
'Fair'
>>> kappa_analysis_altman(0.4)
'Moderate'
>>> kappa_analysis_altman(0.6)
'Good'
>>> kappa_analysis_altman(0.8)
'Very Good'
>>> kappa_analysis_altman(1.2)
'None'
>>> kappa_analysis_fleiss(0.2)
'Poor'
>>> kappa_analysis_cicchetti(0.3)
'Poor'
>>> kappa_analysis_cicchetti(0.5)
'Fair'
>>> kappa_analysis_cicchetti(0.65)
'Good'
>>> kappa_analysis_cicchetti(0.8)
'Excellent'
>>> PLR_analysis(1)
'Negligible'
>>> PLR_analysis(3)
'Poor'
>>> PLR_analysis(7)
'Fair'
>>> PLR_analysis(11)
'Good'
>>> DP_analysis(0.2)
'Poor'
>>> DP_analysis(1.5)
'Limited'
>>> DP_analysis(2.5)
'Fair'
>>> DP_analysis(10)
'Good'
>>> AUC_analysis(0.5)
'Poor'
>>> AUC_analysis(0.65)
'Fair'
>>> AUC_analysis(0.75)
'Good'
>>> AUC_analysis(0.86)
'Very Good'
>>> AUC_analysis(0.97)
'Excellent'
>>> AUC_analysis(1.0)
'Excellent'
>>> PC_PI_calc(1,1,1)
'None'
>>> PC_PI_calc({1:12},{1:6},{1:45})
0.04000000000000001
>>> PC_AC1_calc(1,1,1)
'None'
>>> PC_AC1_calc({1:123,2:2},{1:120,2:5},{1:125,2:125})
0.05443200000000002
>>> y_act=[0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,2,2,2,2,2,2]
>>> y_pre=[0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,1,1,1,1,1,2,0,1,2,2,2,2]
>>> cm2=ConfusionMatrix(y_act,y_pre)
>>> chi_squared=chi_square_calc(cm2.classes,cm2.table,cm2.TOP,cm2.P,cm2.POP)
>>> chi_squared
15.525641025641026
>>> population = list(cm2.POP.values())[0]
>>> phi_squared=phi_square_calc(chi_squared,population)
>>> phi_squared
0.5750237416904084
>>> V=cramers_V_calc(phi_squared,cm2.classes)
>>> V
0.5362013342441477
>>> DF=DF_calc(cm2.classes)
>>> DF
4
>>> SE=se_calc(cm2.Overall_ACC,population)
>>> SE
0.09072184232530289
>>> CI=CI_calc(cm2.Overall_ACC,SE)
>>> CI
(0.48885185570907297, 0.8444814776242603)
>>> response_entropy=entropy_calc(cm2.TOP,cm2.POP)
>>> response_entropy
1.486565953154142
>>> reference_entropy=entropy_calc(cm2.P,cm2.POP)
>>> reference_entropy
1.5304930567574824
>>> cross_entropy = cross_entropy_calc(cm2.TOP,cm2.P,cm2.POP)
>>> cross_entropy
1.5376219392005763
>>> join_entropy = joint_entropy_calc(cm2.classes,cm2.table,cm2.POP)
>>> join_entropy
2.619748965432189
>>> conditional_entropy = conditional_entropy_calc(cm2.classes,cm2.table,cm2.P,cm2.POP)
>>> conditional_entropy
1.089255908674706
>>> kl_divergence=kl_divergence_calc(cm2.P,cm2.TOP,cm2.POP)
>>> kl_divergence
0.007128882443093773
>>> lambda_B=lambda_B_calc(cm2.classes,cm2.table,cm2.TOP,population)
>>> lambda_B
0.35714285714285715
>>> lambda_A=lambda_A_calc(cm2.classes,cm2.table,cm2.P,population)
>>> lambda_A
0.4
>>> IS_calc(13,0,0,38)
1.5474877953024933
>>> kappa_no_prevalence_calc(cm2.Overall_ACC)
0.33333333333333326
>>> reliability_calc(cm2.Overall_RACC,cm2.Overall_ACC)
0.4740259740259741
>>> mutual_information_calc(cm2.ResponseEntropy,cm2.ConditionalEntropy)
0.39731004447943596
>>> cm3=ConfusionMatrix(matrix=cm2.table)
>>> cm3
pycm.ConfusionMatrix(classes: [0, 1, 2])
>>> cm3.CI
(0.48885185570907297, 0.8444814776242603)
>>> cm3.Chi_Squared
15.525641025641026
>>> cm3.Phi_Squared
0.5750237416904084
>>> cm3.V
0.5362013342441477
>>> cm3.DF
4
>>> cm3.ResponseEntropy
1.486565953154142
>>> cm3.ReferenceEntropy
1.5304930567574824
>>> cm3.CrossEntropy
1.5376219392005763
>>> cm3.JointEntropy
2.619748965432189
>>> cm3.ConditionalEntropy
1.089255908674706
>>> cm3.KL
0.007128882443093773
>>> cm3.LambdaA
0.4
>>> cm3.LambdaB
0.35714285714285715
>>> cm3=ConfusionMatrix(matrix={})
Traceback (most recent call last):
...
pycm.pycm_obj.pycmMatrixError: Input confusion matrix format error
>>> cm_4=ConfusionMatrix(matrix={1:{1:2,"1":2},"1":{1:2,"1":3}})
Traceback (most recent call last):
...
pycm.pycm_obj.pycmMatrixError: Type of the input matrix classes is assumed be the same
>>> cm_5=ConfusionMatrix(matrix={1:{1:2}})
Traceback (most recent call last):
...
pycm.pycm_obj.pycmVectorError: Number of the classes is lower than 2
>>> save_stat=cm.save_html("test",address=False)
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_html("test_filtered",address=False,overall_param=["Kappa","Scott PI"],class_param=["TPR","TNR","ACC","AUC"])
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_html("test_filtered2",address=False,overall_param=["Kappa","Scott PI"],class_param=["TPR","TNR","ACC","AUC"],class_name=[100])
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_html("test_filtered3",address=False,overall_param=["Kappa","Scott PI"],class_param=["TPR","TNR","ACC","AUC"],class_name=[])
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_html("test_filtered4",address=False,overall_param=["Kappa","Scott PI"],class_param=[],class_name=[100])
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_html("test_filtered5",address=False,overall_param=[],class_param=["TPR","TNR","ACC","AUC"],class_name=[100])
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_html("test_colored",address=False,color=(130,100,200))
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_csv("test",address=False)
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_csv("test_filtered",address=False,class_param=["TPR","TNR","ACC","AUC"])
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_csv("test_filtered2",address=False,class_param=["TPR","TNR","ACC","AUC"],class_name=[100])
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_csv("test_filtered3",address=False,class_param=["TPR","TNR","ACC","AUC"],class_name=[])
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_csv("test_filtered4",address=False,class_param=[],class_name=[100])
>>> save_stat=={'Status': True, 'Message': None}
True
>>> save_stat=cm.save_html("/asdasd,qweqwe.eo/",address=True)
>>> save_stat=={'Status': False, 'Message': "[Errno 2] No such file or directory: '/asdasd,qweqwe.eo/.html'"}
True
>>> save_stat=cm.save_csv("/asdasd,qweqwe.eo/",address=True)
>>> save_stat=={'Status': False, 'Message': "[Errno 2] No such file or directory: '/asdasd,qweqwe.eo/.csv'"}
True
>>> def activation(i):
... if i<0.7:
... return 1
... else:
... return 0
>>> cm_6 = ConfusionMatrix([0,0,1,0],[0.87,0.34,0.9,0.12],threshold=activation)
>>> cm_6.print_matrix()
Predict 0 1
Actual
0 1 2
1 1 0
>>> save_obj=cm.save_obj("test",address=False)
>>> save_obj=={'Status': True, 'Message': None}
True
>>> cm_file=ConfusionMatrix(file=open("test.obj","r"))
>>> print(cm_file)
Predict 100 200 500 600
Actual
100 0 0 0 0
<BLANKLINE>
200 9 6 1 0
<BLANKLINE>
500 1 1 1 0
<BLANKLINE>
600 1 0 0 0
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
Overall Statistics :
<BLANKLINE>
95% CI (0.14096,0.55904)
AUNP None
AUNU None
Bennett S 0.13333
CBA 0.17708
Chi-Squared None
Chi-Squared DF 9
Conditional Entropy 1.23579
Cramer V None
Cross Entropy 1.70995
Gwet AC1 0.19505
Hamming Loss 0.65
Joint Entropy 2.11997
KL Divergence None
Kappa 0.07801
Kappa 95% CI (-0.2185,0.37453)
Kappa No Prevalence -0.3
Kappa Standard Error 0.15128
Kappa Unbiased -0.12554
Lambda A 0.0
Lambda B 0.0
Mutual Information 0.10088
NIR 0.8
Overall ACC 0.35
Overall CEN 0.3648
Overall J (0.60294,0.15074)
Overall MCC 0.12642
Overall MCEN 0.37463
Overall RACC 0.295
Overall RACCU 0.4225
P-Value 1.0
PPV Macro None
PPV Micro 0.35
Phi-Squared None
RCI 0.11409
RR 5.0
Reference Entropy 0.88418
Response Entropy 1.33667
SOA1(Landis & Koch) Slight
SOA2(Fleiss) Poor
SOA3(Altman) Poor
SOA4(Cicchetti) Poor
Scott PI -0.12554
Standard Error 0.10665
TPR Macro None
TPR Micro 0.35
Zero-one Loss 13
<BLANKLINE>
Class Statistics :
<BLANKLINE>
Classes 100 200 500 600
ACC(Accuracy) 0.45 0.45 0.85 0.95
AUC(Area under the roc curve) None 0.5625 0.63725 0.5
AUCI(Auc value interpretation) None Poor Fair Poor
BM(Informedness or bookmaker informedness) None 0.125 0.27451 0.0
CEN(Confusion entropy) 0.33496 0.35708 0.53895 0.0
DOR(Diagnostic odds ratio) None 1.8 8.0 None
DP(Discriminant power) None 0.14074 0.4979 None
DPI(Discriminant power interpretation) None Poor Poor None
ERR(Error rate) 0.55 0.55 0.15 0.05
F0.5(F0.5 score) 0.0 0.68182 0.45455 0.0
F1(F1 score - harmonic mean of precision and sensitivity) 0.0 0.52174 0.4 0.0
F2(F2 score) 0.0 0.42254 0.35714 0.0
FDR(False discovery rate) 1.0 0.14286 0.5 None
FN(False negative/miss/type 2 error) 0 10 2 1
FNR(Miss rate or false negative rate) None 0.625 0.66667 1.0
FOR(False omission rate) 0.0 0.76923 0.11111 0.05
FP(False positive/type 1 error/false alarm) 11 1 1 0
FPR(Fall-out or false positive rate) 0.55 0.25 0.05882 0.0
G(G-measure geometric mean of precision and sensitivity) None 0.56695 0.40825 None
GI(Gini index) None 0.125 0.27451 0.0
IS(Information score) None 0.09954 1.73697 None
J(Jaccard index) 0.0 0.35294 0.25 0.0
LS(Lift score) None 1.07143 3.33333 None
MCC(Matthews correlation coefficient) None 0.10483 0.32673 None
MCEN(Modified confusion entropy) 0.33496 0.37394 0.58028 0.0
MK(Markedness) 0.0 0.08791 0.38889 None
N(Condition negative) 20 4 17 19
NLR(Negative likelihood ratio) None 0.83333 0.70833 1.0
NPV(Negative predictive value) 1.0 0.23077 0.88889 0.95
P(Condition positive or support) 0 16 3 1
PLR(Positive likelihood ratio) None 1.5 5.66667 None
PLRI(Positive likelihood ratio interpretation) None Poor Fair None
POP(Population) 20 20 20 20
PPV(Precision or positive predictive value) 0.0 0.85714 0.5 None
PRE(Prevalence) 0.0 0.8 0.15 0.05
RACC(Random accuracy) 0.0 0.28 0.015 0.0
RACCU(Random accuracy unbiased) 0.07563 0.33062 0.01562 0.00063
TN(True negative/correct rejection) 9 3 16 19
TNR(Specificity or true negative rate) 0.45 0.75 0.94118 1.0
TON(Test outcome negative) 9 13 18 20
TOP(Test outcome positive) 11 7 2 0
TP(True positive/hit) 0 6 1 0
TPR(Sensitivity, recall, hit rate, or true positive rate) None 0.375 0.33333 0.0
Y(Youden index) None 0.125 0.27451 0.0
dInd(Distance index) None 0.67315 0.66926 1.0
sInd(Similarity index) None 0.52401 0.52676 0.29289
<BLANKLINE>
>>> save_obj=cm_6.save_obj("test2",address=False)
>>> save_obj=={'Status': True, 'Message': None}
True
>>> cm_file_2=ConfusionMatrix(file=open("test2.obj","r"))
>>> cm_file_2.print_matrix()
Predict 0 1
Actual
0 1 2
1 1 0
>>> cm = ConfusionMatrix(matrix={"Class1":{"Class1":9,"Class2":3,"Class3":0},"Class2":{"Class1":3,"Class2":5,"Class3":1},"Class3":{"Class1":1,"Class2":1,"Class3":4}})
>>> print(cm)
Predict Class1 Class2 Class3
Actual
Class1 9 3 0
<BLANKLINE>
Class2 3 5 1
<BLANKLINE>
Class3 1 1 4
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
Overall Statistics :
<BLANKLINE>
95% CI (0.48885,0.84448)
AUNP 0.73175
AUNU 0.73929
Bennett S 0.5
CBA 0.63818
Chi-Squared 15.52564
Chi-Squared DF 4
Conditional Entropy 1.08926
Cramer V 0.5362
Cross Entropy 1.53762
Gwet AC1 0.51229
Hamming Loss 0.33333
Joint Entropy 2.61975
KL Divergence 0.00713
Kappa 0.47403
Kappa 95% CI (0.19345,0.7546)
Kappa No Prevalence 0.33333
Kappa Standard Error 0.14315
Kappa Unbiased 0.47346
Lambda A 0.4
Lambda B 0.35714
Mutual Information 0.39731
NIR 0.44444
Overall ACC 0.66667
Overall CEN 0.52986
Overall J (1.51854,0.50618)
Overall MCC 0.47511
Overall MCEN 0.65286
Overall RACC 0.36626
Overall RACCU 0.36694
P-Value 0.01667
PPV Macro 0.68262
PPV Micro 0.66667
Phi-Squared 0.57502
RCI 0.2596
RR 9.0
Reference Entropy 1.53049
Response Entropy 1.48657
SOA1(Landis & Koch) Moderate
SOA2(Fleiss) Intermediate to Good
SOA3(Altman) Moderate
SOA4(Cicchetti) Fair
Scott PI 0.47346
Standard Error 0.09072
TPR Macro 0.65741
TPR Micro 0.66667
Zero-one Loss 9
<BLANKLINE>
Class Statistics :
<BLANKLINE>
Classes Class1 Class2 Class3
ACC(Accuracy) 0.74074 0.7037 0.88889
AUC(Area under the roc curve) 0.74167 0.66667 0.80952
AUCI(Auc value interpretation) Good Fair Very Good
BM(Informedness or bookmaker informedness) 0.48333 0.33333 0.61905
CEN(Confusion entropy) 0.45994 0.66249 0.47174
DOR(Diagnostic odds ratio) 8.25 4.375 40.0
DP(Discriminant power) 0.50527 0.35339 0.88326
DPI(Discriminant power interpretation) Poor Poor Poor
ERR(Error rate) 0.25926 0.2963 0.11111
F0.5(F0.5 score) 0.70312 0.55556 0.76923
F1(F1 score - harmonic mean of precision and sensitivity) 0.72 0.55556 0.72727
F2(F2 score) 0.7377 0.55556 0.68966
FDR(False discovery rate) 0.30769 0.44444 0.2
FN(False negative/miss/type 2 error) 3 4 2
FNR(Miss rate or false negative rate) 0.25 0.44444 0.33333
FOR(False omission rate) 0.21429 0.22222 0.09091
FP(False positive/type 1 error/false alarm) 4 4 1
FPR(Fall-out or false positive rate) 0.26667 0.22222 0.04762
G(G-measure geometric mean of precision and sensitivity) 0.72058 0.55556 0.7303
GI(Gini index) 0.48333 0.33333 0.61905
IS(Information score) 0.63941 0.73697 1.848
J(Jaccard index) 0.5625 0.38462 0.57143
LS(Lift score) 1.55769 1.66667 3.6
MCC(Matthews correlation coefficient) 0.48067 0.33333 0.66254
MCEN(Modified confusion entropy) 0.57782 0.77284 0.60158
MK(Markedness) 0.47802 0.33333 0.70909
N(Condition negative) 15 18 21
NLR(Negative likelihood ratio) 0.34091 0.57143 0.35
NPV(Negative predictive value) 0.78571 0.77778 0.90909
P(Condition positive or support) 12 9 6
PLR(Positive likelihood ratio) 2.8125 2.5 14.0
PLRI(Positive likelihood ratio interpretation) Poor Poor Good
POP(Population) 27 27 27
PPV(Precision or positive predictive value) 0.69231 0.55556 0.8
PRE(Prevalence) 0.44444 0.33333 0.22222
RACC(Random accuracy) 0.21399 0.11111 0.04115
RACCU(Random accuracy unbiased) 0.21433 0.11111 0.0415
TN(True negative/correct rejection) 11 14 20
TNR(Specificity or true negative rate) 0.73333 0.77778 0.95238
TON(Test outcome negative) 14 18 22
TOP(Test outcome positive) 13 9 5
TP(True positive/hit) 9 5 4
TPR(Sensitivity, recall, hit rate, or true positive rate) 0.75 0.55556 0.66667
Y(Youden index) 0.48333 0.33333 0.61905
dInd(Distance index) 0.36553 0.4969 0.33672
sInd(Similarity index) 0.74153 0.64864 0.7619
<BLANKLINE>
>>> cm = ConfusionMatrix(matrix={"Class1":{"Class1":9,"Class2":3,"Class3":1},"Class2":{"Class1":3,"Class2":5,"Class3":1},"Class3":{"Class1":0,"Class2":1,"Class3":4}},transpose=True)
>>> print(cm)
Predict Class1 Class2 Class3
Actual
Class1 9 3 0
<BLANKLINE>
Class2 3 5 1
<BLANKLINE>
Class3 1 1 4
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
Overall Statistics :
<BLANKLINE>
95% CI (0.48885,0.84448)
AUNP 0.73175
AUNU 0.73929
Bennett S 0.5
CBA 0.63818
Chi-Squared 15.52564
Chi-Squared DF 4
Conditional Entropy 1.08926
Cramer V 0.5362
Cross Entropy 1.53762
Gwet AC1 0.51229
Hamming Loss 0.33333
Joint Entropy 2.61975
KL Divergence 0.00713
Kappa 0.47403
Kappa 95% CI (0.19345,0.7546)
Kappa No Prevalence 0.33333
Kappa Standard Error 0.14315
Kappa Unbiased 0.47346
Lambda A 0.4
Lambda B 0.35714
Mutual Information 0.39731
NIR 0.44444
Overall ACC 0.66667
Overall CEN 0.52986
Overall J (1.51854,0.50618)
Overall MCC 0.47511
Overall MCEN 0.65286
Overall RACC 0.36626
Overall RACCU 0.36694
P-Value 0.01667
PPV Macro 0.68262
PPV Micro 0.66667
Phi-Squared 0.57502
RCI 0.2596
RR 9.0
Reference Entropy 1.53049
Response Entropy 1.48657
SOA1(Landis & Koch) Moderate
SOA2(Fleiss) Intermediate to Good
SOA3(Altman) Moderate
SOA4(Cicchetti) Fair
Scott PI 0.47346
Standard Error 0.09072
TPR Macro 0.65741
TPR Micro 0.66667
Zero-one Loss 9
<BLANKLINE>
Class Statistics :
<BLANKLINE>
Classes Class1 Class2 Class3
ACC(Accuracy) 0.74074 0.7037 0.88889
AUC(Area under the roc curve) 0.74167 0.66667 0.80952
AUCI(Auc value interpretation) Good Fair Very Good
BM(Informedness or bookmaker informedness) 0.48333 0.33333 0.61905
CEN(Confusion entropy) 0.45994 0.66249 0.47174
DOR(Diagnostic odds ratio) 8.25 4.375 40.0
DP(Discriminant power) 0.50527 0.35339 0.88326
DPI(Discriminant power interpretation) Poor Poor Poor
ERR(Error rate) 0.25926 0.2963 0.11111
F0.5(F0.5 score) 0.70312 0.55556 0.76923
F1(F1 score - harmonic mean of precision and sensitivity) 0.72 0.55556 0.72727
F2(F2 score) 0.7377 0.55556 0.68966
FDR(False discovery rate) 0.30769 0.44444 0.2
FN(False negative/miss/type 2 error) 3 4 2
FNR(Miss rate or false negative rate) 0.25 0.44444 0.33333
FOR(False omission rate) 0.21429 0.22222 0.09091
FP(False positive/type 1 error/false alarm) 4 4 1
FPR(Fall-out or false positive rate) 0.26667 0.22222 0.04762
G(G-measure geometric mean of precision and sensitivity) 0.72058 0.55556 0.7303
GI(Gini index) 0.48333 0.33333 0.61905
IS(Information score) 0.63941 0.73697 1.848
J(Jaccard index) 0.5625 0.38462 0.57143
LS(Lift score) 1.55769 1.66667 3.6
MCC(Matthews correlation coefficient) 0.48067 0.33333 0.66254
MCEN(Modified confusion entropy) 0.57782 0.77284 0.60158
MK(Markedness) 0.47802 0.33333 0.70909
N(Condition negative) 15 18 21
NLR(Negative likelihood ratio) 0.34091 0.57143 0.35
NPV(Negative predictive value) 0.78571 0.77778 0.90909
P(Condition positive or support) 12 9 6
PLR(Positive likelihood ratio) 2.8125 2.5 14.0
PLRI(Positive likelihood ratio interpretation) Poor Poor Good
POP(Population) 27 27 27
PPV(Precision or positive predictive value) 0.69231 0.55556 0.8
PRE(Prevalence) 0.44444 0.33333 0.22222
RACC(Random accuracy) 0.21399 0.11111 0.04115
RACCU(Random accuracy unbiased) 0.21433 0.11111 0.0415
TN(True negative/correct rejection) 11 14 20
TNR(Specificity or true negative rate) 0.73333 0.77778 0.95238
TON(Test outcome negative) 14 18 22
TOP(Test outcome positive) 13 9 5
TP(True positive/hit) 9 5 4
TPR(Sensitivity, recall, hit rate, or true positive rate) 0.75 0.55556 0.66667
Y(Youden index) 0.48333 0.33333 0.61905
dInd(Distance index) 0.36553 0.4969 0.33672
sInd(Similarity index) 0.74153 0.64864 0.7619
<BLANKLINE>
>>> online_help(param=None)
Please choose one parameter :
<BLANKLINE>
Example : online_help("J") or online_help(2)
<BLANKLINE>
1-95% CI
2-ACC
3-AUC
4-AUCI
5-AUNP
6-AUNU
7-BM
8-Bennett S
9-CBA
10-CEN
11-Chi-Squared
12-Chi-Squared DF
13-Conditional Entropy
14-Cramer V
15-Cross Entropy
16-DOR
17-DP
18-DPI
19-ERR
20-F0.5
21-F1
22-F2
23-FDR
24-FN
25-FNR
26-FOR
27-FP
28-FPR
29-G
30-GI
31-Gwet AC1
32-Hamming Loss
33-IS
34-J
35-Joint Entropy
36-KL Divergence
37-Kappa
38-Kappa 95% CI
39-Kappa No Prevalence
40-Kappa Standard Error
41-Kappa Unbiased
42-LS
43-Lambda A
44-Lambda B
45-MCC
46-MCEN
47-MK
48-Mutual Information
49-N
50-NIR
51-NLR
52-NPV
53-Overall ACC
54-Overall CEN
55-Overall J
56-Overall MCC
57-Overall MCEN
58-Overall RACC
59-Overall RACCU
60-P
61-P-Value
62-PLR
63-PLRI
64-POP
65-PPV
66-PPV Macro
67-PPV Micro
68-PRE
69-Phi-Squared
70-RACC
71-RACCU
72-RCI
73-RR
74-Reference Entropy
75-Response Entropy
76-SOA1(Landis & Koch)
77-SOA2(Fleiss)
78-SOA3(Altman)
79-SOA4(Cicchetti)
80-Scott PI
81-Standard Error
82-TN
83-TNR
84-TON
85-TOP
86-TP
87-TPR
88-TPR Macro
89-TPR Micro
90-Y
91-Zero-one Loss
92-dInd
93-sInd
>>> online_help("J")
...
>>> online_help(4)
...
>>> y_actu = [2, 0, 2, 2, 0, 1, 1, 2, 2, 0, 1, 2]
>>> y_pred = [0, 0, 2, 1, 0, 2, 1, 0, 2, 0, 2, 2]
>>> cm = ConfusionMatrix(y_actu, y_pred, sample_weight=[2, 2, 2, 2, 3, 1, 1, 2, 2, 1, 1, 2])
>>> print(cm)
Predict 0 1 2
Actual
0 6 0 0
<BLANKLINE>
1 0 1 2
<BLANKLINE>
2 4 2 6
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
Overall Statistics :
<BLANKLINE>
95% CI (0.41134,0.82675)
AUNP 0.7
AUNU 0.70556
Bennett S 0.42857
CBA 0.47778
Chi-Squared 10.44167
Chi-Squared DF 4
Conditional Entropy 0.96498
Cramer V 0.49861
Cross Entropy 1.50249
Gwet AC1 0.45277
Hamming Loss 0.38095
Joint Entropy 2.34377
KL Divergence 0.1237
Kappa 0.3913
Kappa 95% CI (0.05943,0.72318)
Kappa No Prevalence 0.2381
Kappa Standard Error 0.16932
Kappa Unbiased 0.37313
Lambda A 0.22222
Lambda B 0.36364
Mutual Information 0.47618
NIR 0.57143
Overall ACC 0.61905
Overall CEN 0.43947
Overall J (1.22857,0.40952)
Overall MCC 0.41558
Overall MCEN 0.50059
Overall RACC 0.37415
Overall RACCU 0.39229
P-Value 0.41709
PPV Macro 0.56111
PPV Micro 0.61905
Phi-Squared 0.49722
RCI 0.34536
RR 7.0
Reference Entropy 1.37878
Response Entropy 1.44117
SOA1(Landis & Koch) Fair
SOA2(Fleiss) Poor
SOA3(Altman) Fair
SOA4(Cicchetti) Poor
Scott PI 0.37313
Standard Error 0.10597
TPR Macro 0.61111
TPR Micro 0.61905
Zero-one Loss 8
<BLANKLINE>
Class Statistics :
<BLANKLINE>
Classes 0 1 2
ACC(Accuracy) 0.80952 0.80952 0.61905
AUC(Area under the roc curve) 0.86667 0.61111 0.63889
AUCI(Auc value interpretation) Very Good Fair Fair
BM(Informedness or bookmaker informedness) 0.73333 0.22222 0.27778
CEN(Confusion entropy) 0.25 0.52832 0.56439
DOR(Diagnostic odds ratio) None 4.0 3.5
DP(Discriminant power) None 0.33193 0.29996
DPI(Discriminant power interpretation) None Poor Poor
ERR(Error rate) 0.19048 0.19048 0.38095
F0.5(F0.5 score) 0.65217 0.33333 0.68182
F1(F1 score - harmonic mean of precision and sensitivity) 0.75 0.33333 0.6
F2(F2 score) 0.88235 0.33333 0.53571
FDR(False discovery rate) 0.4 0.66667 0.25
FN(False negative/miss/type 2 error) 0 2 6
FNR(Miss rate or false negative rate) 0.0 0.66667 0.5
FOR(False omission rate) 0.0 0.11111 0.46154
FP(False positive/type 1 error/false alarm) 4 2 2
FPR(Fall-out or false positive rate) 0.26667 0.11111 0.22222
G(G-measure geometric mean of precision and sensitivity) 0.7746 0.33333 0.61237
GI(Gini index) 0.73333 0.22222 0.27778
IS(Information score) 1.07039 1.22239 0.39232
J(Jaccard index) 0.6 0.2 0.42857
LS(Lift score) 2.1 2.33333 1.3125
MCC(Matthews correlation coefficient) 0.66332 0.22222 0.28307
MCEN(Modified confusion entropy) 0.26439 0.52877 0.65924
MK(Markedness) 0.6 0.22222 0.28846
N(Condition negative) 15 18 9
NLR(Negative likelihood ratio) 0.0 0.75 0.64286
NPV(Negative predictive value) 1.0 0.88889 0.53846
P(Condition positive or support) 6 3 12
PLR(Positive likelihood ratio) 3.75 3.0 2.25
PLRI(Positive likelihood ratio interpretation) Poor Poor Poor
POP(Population) 21 21 21
PPV(Precision or positive predictive value) 0.6 0.33333 0.75
PRE(Prevalence) 0.28571 0.14286 0.57143
RACC(Random accuracy) 0.13605 0.02041 0.21769
RACCU(Random accuracy unbiased) 0.14512 0.02041 0.22676
TN(True negative/correct rejection) 11 16 7
TNR(Specificity or true negative rate) 0.73333 0.88889 0.77778
TON(Test outcome negative) 11 18 13
TOP(Test outcome positive) 10 3 8
TP(True positive/hit) 6 1 6
TPR(Sensitivity, recall, hit rate, or true positive rate) 1.0 0.33333 0.5
Y(Youden index) 0.73333 0.22222 0.27778
dInd(Distance index) 0.26667 0.67586 0.54716
sInd(Similarity index) 0.81144 0.52209 0.6131
<BLANKLINE>
>>> save_obj=cm.save_obj("test3",address=False)
>>> save_obj=={'Status': True, 'Message': None}
True
>>> cm_file_3=ConfusionMatrix(file=open("test3.obj","r"))
>>> cm_file_3.print_matrix()
Predict 0 1 2
Actual
0 6 0 0
1 0 1 2
2 4 2 6
<BLANKLINE>
>>> cm_file_3.stat()
Overall Statistics :
<BLANKLINE>
95% CI (0.41134,0.82675)
AUNP 0.7
AUNU 0.70556
Bennett S 0.42857
CBA 0.47778
Chi-Squared 10.44167
Chi-Squared DF 4
Conditional Entropy 0.96498
Cramer V 0.49861
Cross Entropy 1.50249
Gwet AC1 0.45277
Hamming Loss 0.38095
Joint Entropy 2.34377
KL Divergence 0.1237
Kappa 0.3913
Kappa 95% CI (0.05943,0.72318)
Kappa No Prevalence 0.2381
Kappa Standard Error 0.16932
Kappa Unbiased 0.37313
Lambda A 0.22222
Lambda B 0.36364
Mutual Information 0.47618
NIR 0.57143
Overall ACC 0.61905
Overall CEN 0.43947
Overall J (1.22857,0.40952)
Overall MCC 0.41558
Overall MCEN 0.50059
Overall RACC 0.37415
Overall RACCU 0.39229
P-Value 0.41709
PPV Macro 0.56111
PPV Micro 0.61905
Phi-Squared 0.49722
RCI 0.34536
RR 7.0
Reference Entropy 1.37878
Response Entropy 1.44117
SOA1(Landis & Koch) Fair
SOA2(Fleiss) Poor
SOA3(Altman) Fair
SOA4(Cicchetti) Poor
Scott PI 0.37313
Standard Error 0.10597
TPR Macro 0.61111
TPR Micro 0.61905
Zero-one Loss 8
<BLANKLINE>
Class Statistics :
<BLANKLINE>
Classes 0 1 2
ACC(Accuracy) 0.80952 0.80952 0.61905
AUC(Area under the roc curve) 0.86667 0.61111 0.63889
AUCI(Auc value interpretation) Very Good Fair Fair
BM(Informedness or bookmaker informedness) 0.73333 0.22222 0.27778
CEN(Confusion entropy) 0.25 0.52832 0.56439
DOR(Diagnostic odds ratio) None 4.0 3.5
DP(Discriminant power) None 0.33193 0.29996
DPI(Discriminant power interpretation) None Poor Poor
ERR(Error rate) 0.19048 0.19048 0.38095
F0.5(F0.5 score) 0.65217 0.33333 0.68182
F1(F1 score - harmonic mean of precision and sensitivity) 0.75 0.33333 0.6
F2(F2 score) 0.88235 0.33333 0.53571
FDR(False discovery rate) 0.4 0.66667 0.25
FN(False negative/miss/type 2 error) 0 2 6
FNR(Miss rate or false negative rate) 0.0 0.66667 0.5
FOR(False omission rate) 0.0 0.11111 0.46154
FP(False positive/type 1 error/false alarm) 4 2 2
FPR(Fall-out or false positive rate) 0.26667 0.11111 0.22222
G(G-measure geometric mean of precision and sensitivity) 0.7746 0.33333 0.61237
GI(Gini index) 0.73333 0.22222 0.27778
IS(Information score) 1.07039 1.22239 0.39232
J(Jaccard index) 0.6 0.2 0.42857
LS(Lift score) 2.1 2.33333 1.3125
MCC(Matthews correlation coefficient) 0.66332 0.22222 0.28307
MCEN(Modified confusion entropy) 0.26439 0.52877 0.65924
MK(Markedness) 0.6 0.22222 0.28846
N(Condition negative) 15 18 9
NLR(Negative likelihood ratio) 0.0 0.75 0.64286
NPV(Negative predictive value) 1.0 0.88889 0.53846
P(Condition positive or support) 6 3 12
PLR(Positive likelihood ratio) 3.75 3.0 2.25
PLRI(Positive likelihood ratio interpretation) Poor Poor Poor
POP(Population) 21 21 21
PPV(Precision or positive predictive value) 0.6 0.33333 0.75
PRE(Prevalence) 0.28571 0.14286 0.57143
RACC(Random accuracy) 0.13605 0.02041 0.21769
RACCU(Random accuracy unbiased) 0.14512 0.02041 0.22676
TN(True negative/correct rejection) 11 16 7
TNR(Specificity or true negative rate) 0.73333 0.88889 0.77778
TON(Test outcome negative) 11 18 13
TOP(Test outcome positive) 10 3 8
TP(True positive/hit) 6 1 6
TPR(Sensitivity, recall, hit rate, or true positive rate) 1.0 0.33333 0.5
Y(Youden index) 0.73333 0.22222 0.27778
dInd(Distance index) 0.26667 0.67586 0.54716
sInd(Similarity index) 0.81144 0.52209 0.6131
>>> NIR_calc({'Class2': 804, 'Class1': 196},1000) # Verified Case
0.804
>>> cm = ConfusionMatrix(matrix={0:{0:3,1:1},1:{0:4,1:2}}) # Verified Case
>>> cm.LS[1]
1.1111111111111112
>>> cm.LS[0]
1.0714285714285714
>>> cm = ConfusionMatrix(matrix={"Class1":{"Class1":183,"Class2":13},"Class2":{"Class1":141,"Class2":663}}) # Verified Case
>>> cm.PValue
0.000342386296143693
>>> cm = ConfusionMatrix(matrix={"Class1":{"Class1":4,"Class2":2},"Class2":{"Class1":2,"Class2":4}}) # Verified Case
>>> cm.Overall_CEN
0.861654166907052
>>> cm.Overall_MCEN
0.6666666666666666
>>> cm.IS["Class1"]
0.4150374992788437
>>> cm.IS["Class2"]
0.4150374992788437
>>> cm = ConfusionMatrix(matrix={1:{1:5,2:0,3:0},2:{1:0,2:10,3:0},3:{1:0,2:300,3:0}}) # Verified Case
>>> cm.Overall_CEN
0.022168905807495587
>>> cm.Overall_MCC
0.3012440235352457
>>> cm.CBA
0.3440860215053763
>>> cm = ConfusionMatrix(matrix={1:{1:1,2:3,3:0,4:0},2:{1:9,2:1,3:0,4:0},3:{1:0,2:0,3:100,4:0},4:{1:0,2:0,3:0,4:200}}) # Verified Case
>>> cm.RCI
0.9785616782831341
>>> cm = ConfusionMatrix(matrix={1:{1:1,2:0,3:3},2:{1:0,2:100,3:0},3:{1:0,2:0,3:200}}) # Verified Case
>>> cm.RCI
0.9264007150415143
>>> cm = ConfusionMatrix(matrix={1:{1:5,2:0,3:0},2:{1:0,2:10,3:0},3:{1:0,2:300,3:0}})
>>> cm.RCI
0.3675708571923818
>>> cm = ConfusionMatrix(matrix={1:{1:12806,2:26332},2:{1:5484,2:299777}},transpose=True) # Verified Case
>>> cm.AUC[1]
0.8097090079101759
>>> cm.GI[1]
0.6194180158203517
>>> cm.Overall_ACC
0.9076187793808925
>>> cm.DP[1]
0.7854399677022138
>>> cm.Y[1]
0.6194180158203517
>>> cm.BM[1]
0.6194180158203517
>>> cm = ConfusionMatrix(matrix={1:{1:13182,2:30516},2:{1:5108,2:295593}},transpose=True) # Verified Case
>>> cm.AUC[1]
0.8135728157964055
>>> cm.GI[1]
0.627145631592811
>>> cm.Overall_ACC
0.896561836706843
>>> cm.DP[1]
0.770700985610517
>>> cm.Y[1]
0.627145631592811
>>> cm.BM[1]
0.627145631592811
>>> save_obj = cm.save_obj("test4",address=False)
>>> save_obj=={'Status': True, 'Message': None}
True
>>> cm_file=ConfusionMatrix(file=open("test4.obj","r"))
>>> cm_file.DP[1]
0.770700985610517
>>> cm_file.Y[1]
0.627145631592811
>>> cm_file.BM[1]
0.627145631592811
>>> cm_file.transpose
True
>>> json.dump({"Actual-Vector": None, "Digit": 5, "Predict-Vector": None, "Matrix": {"0": {"0": 3, "1": 0, "2": 2}, "1": {"0": 0, "1": 1, "2": 1}, "2": {"0": 0, "1": 2, "2": 3}}, "Transpose": True,"Sample-Weight": None},open("test5.obj","w"))
>>> cm_file=ConfusionMatrix(file=open("test5.obj","r"))
>>> cm_file.transpose
True
>>> cm_file.matrix == {"0": {"0": 3, "1": 0, "2": 2}, "1": {"0": 0, "1": 1, "2": 1}, "2": {"0": 0, "1": 2, "2": 3}}
True
>>> cm = ConfusionMatrix([1,2,3,4],[1,2,3,"4"])
>>> cm
pycm.ConfusionMatrix(classes: ['1', '2', '3', '4'])
>>> os.remove("test.csv")
>>> os.remove("test.obj")
>>> os.remove("test.html")
>>> os.remove("test_filtered.html")
>>> os.remove("test_filtered.csv")
>>> os.remove("test_filtered.pycm")
>>> os.remove("test_filtered2.html")
>>> os.remove("test_filtered3.html")
>>> os.remove("test_filtered4.html")
>>> os.remove("test_filtered5.html")
>>> os.remove("test_colored.html")
>>> os.remove("test_filtered2.csv")
>>> os.remove("test_filtered3.csv")
>>> os.remove("test_filtered4.csv")
>>> os.remove("test_filtered2.pycm")
>>> os.remove("test_filtered3.pycm")
>>> os.remove("test2.obj")
>>> os.remove("test3.obj")
>>> os.remove("test4.obj")
>>> os.remove("test5.obj")
>>> os.remove("test.pycm")
"""
|
#encoding:utf-8
subreddit = 'talesfromtechsupport'
t_channel = '@r_talesfromtechsupport'
def send_post(submission, r2t):
return r2t.send_simple(submission)
|
subreddit = 'talesfromtechsupport'
t_channel = '@r_talesfromtechsupport'
def send_post(submission, r2t):
return r2t.send_simple(submission)
|
def factors(n):
factorlist=[]
for i in range(1,n+1):
if n%i==0:
factorlist=factorlist+[i]
return(factorlist)
def isprime(n):
return(factors(n)==[1,n])
def sumprimes(l):
sum=0
for i in range(0,len(l)):
if isprime(l[i]):
sum=sum+l[i]
return(sum)
|
def factors(n):
factorlist = []
for i in range(1, n + 1):
if n % i == 0:
factorlist = factorlist + [i]
return factorlist
def isprime(n):
return factors(n) == [1, n]
def sumprimes(l):
sum = 0
for i in range(0, len(l)):
if isprime(l[i]):
sum = sum + l[i]
return sum
|
def for_A():
"""We are creating user defined function for alphabetical pattern of capital A with "*" symbol"""
row=7
col=5
for i in range(row):
for j in range(col):
if ((j==0 or j==4)and i>0)or ((i==0 or i==4)and j>0 and j<4):
print("*",end=" ")
else:
print(" ",end=" ")
print()
def while_A():
i=0
while i<7:
j=0
while j<5:
if ((j==0 or j==4)and i>0)or ((i==0 or i==4)and j>0 and j<4):
print("*",end=" ")
else:
print(" ",end=" ")
j+=1
i+=1
print()
|
def for_a():
"""We are creating user defined function for alphabetical pattern of capital A with "*" symbol"""
row = 7
col = 5
for i in range(row):
for j in range(col):
if (j == 0 or j == 4) and i > 0 or ((i == 0 or i == 4) and j > 0 and (j < 4)):
print('*', end=' ')
else:
print(' ', end=' ')
print()
def while_a():
i = 0
while i < 7:
j = 0
while j < 5:
if (j == 0 or j == 4) and i > 0 or ((i == 0 or i == 4) and j > 0 and (j < 4)):
print('*', end=' ')
else:
print(' ', end=' ')
j += 1
i += 1
print()
|
# A python method for an optimized Bubble Sort Algorithm
def bubble_sort(arr):
n = len(arr)
# Traverse through all array elements
for i in range(n):
swapped = False
# Last i elements are already in place
for j in range(0, n-i-1):
# Traverse the array from - to n-i-1
# Swap if the element found is greater
# than the next element
if arr[j] > arr[j+1]:
arr[j], arr[j+1] = arr[j+1], arr[j]
swapped = True
# If no two elements were swapped by inner loop, then break
if swapped == False:
break
return arr
# Testing the sort
""" arr = [62, 22, 25, 11, 21, 11, 85]
bubble_sort(arr)
print("Sorted array:")
for i in range(len(arr)):
print("%d" % arr[i], end=" ")
"""
|
def bubble_sort(arr):
n = len(arr)
for i in range(n):
swapped = False
for j in range(0, n - i - 1):
if arr[j] > arr[j + 1]:
(arr[j], arr[j + 1]) = (arr[j + 1], arr[j])
swapped = True
if swapped == False:
break
return arr
' arr = [62, 22, 25, 11, 21, 11, 85]\n\nbubble_sort(arr)\n\nprint("Sorted array:")\nfor i in range(len(arr)):\n print("%d" % arr[i], end=" ")\n '
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.